changeset 18158:974119c4df2b

merge with stable
author Mads Kiilerich <madski@unity3d.com>
date Fri, 28 Dec 2012 14:13:06 +0100
parents 0e68a3c11295 (diff) 52581d2b98ac (current diff)
children 8019f96ec4ce
files
diffstat 132 files changed, 3960 insertions(+), 1655 deletions(-) [+]
line wrap: on
line diff
--- a/Makefile	Fri Dec 28 14:10:35 2012 +0100
+++ b/Makefile	Fri Dec 28 14:13:06 2012 +0100
@@ -11,6 +11,9 @@
 PYFILES:=$(shell find mercurial hgext doc -name '*.py')
 DOCFILES=mercurial/help/*.txt
 
+# Set this to e.g. "mingw32" to use a non-default compiler.
+COMPILER=
+
 help:
 	@echo 'Commonly used make targets:'
 	@echo '  all          - build program and documentation'
@@ -33,11 +36,15 @@
 all: build doc
 
 local:
-	$(PYTHON) setup.py $(PURE) build_py -c -d . build_ext -i build_hgexe -i build_mo
-	$(PYTHON) hg version
+	$(PYTHON) setup.py $(PURE) \
+	  build_py -c -d . \
+	  build_ext $(COMPILER:%=-c %) -i \
+	  build_hgexe $(COMPILER:%=-c %) -i \
+	  build_mo
+	env HGRCPATH= $(PYTHON) hg version
 
 build:
-	$(PYTHON) setup.py $(PURE) build
+	$(PYTHON) setup.py $(PURE) build $(COMPILER:%=-c %)
 
 doc:
 	$(MAKE) -C doc
--- a/contrib/check-code.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/contrib/check-code.py	Fri Dec 28 14:13:06 2012 +0100
@@ -133,9 +133,9 @@
     (r'\S;\s*\n', "semicolon"),
     (r'[^_]_\("[^"]+"\s*%', "don't use % inside _()"),
     (r"[^_]_\('[^']+'\s*%", "don't use % inside _()"),
-    (r'\w,\w', "missing whitespace after ,"),
-    (r'\w[+/*\-<>]\w', "missing whitespace in expression"),
-    (r'^\s+\w+=\w+[^,)\n]$', "missing whitespace in assignment"),
+    (r'(\w|\)),\w', "missing whitespace after ,"),
+    (r'(\w|\))[+/*\-<>]\w', "missing whitespace in expression"),
+    (r'^\s+(\w|\.)+=\w[^,()\n]*$', "missing whitespace in assignment"),
     (r'(\s+)try:\n((?:\n|\1\s.*\n)+?)\1except.*?:\n'
      r'((?:\n|\1\s.*\n)+?)\1finally:', 'no try/except/finally in Python 2.4'),
     (r'(\s+)try:\n((?:\n|\1\s.*\n)*?)\1\s*yield\b.*?'
@@ -211,11 +211,11 @@
     (r'\.strip\(\)\.split\(\)', "no need to strip before splitting"),
     (r'^\s*except\s*:', "warning: naked except clause", r'#.*re-raises'),
     (r':\n(    )*( ){1,3}[^ ]', "must indent 4 spaces"),
+    (r'ui\.(status|progress|write|note|warn)\([\'\"]x',
+     "missing _() in ui message (use () to hide false-positives)"),
   ],
   # warnings
   [
-    (r'ui\.(status|progress|write|note|warn)\([\'\"]x',
-     "warning: unwrapped ui message"),
   ]
 ]
 
--- a/contrib/hgk	Fri Dec 28 14:10:35 2012 +0100
+++ b/contrib/hgk	Fri Dec 28 14:13:06 2012 +0100
@@ -15,8 +15,43 @@
 # The whole snipped is activated only under windows, mouse wheel
 # bindings working already under MacOSX and Linux.
 
+if {[catch {package require Ttk}]} {
+    # use a shim
+    namespace eval ttk {
+        proc style args {}
+
+        proc entry args {
+            eval [linsert $args 0 ::entry] -relief flat
+        }
+    }
+
+    interp alias {} ttk::button {} button
+    interp alias {} ttk::frame {} frame
+    interp alias {} ttk::label {} label
+    interp alias {} ttk::scrollbar {} scrollbar
+    interp alias {} ttk::optionMenu {} tk_optionMenu
+} else {
+    proc ::ttk::optionMenu {w varName firstValue args} {
+        upvar #0 $varName var
+
+        if {![info exists var]} {
+            set var $firstValue
+        }
+        ttk::menubutton $w -textvariable $varName -menu $w.menu \
+                -direction flush
+        menu $w.menu -tearoff 0
+        $w.menu add radiobutton -label $firstValue -variable $varName
+        foreach i $args {
+            $w.menu add radiobutton -label $i -variable $varName
+        }
+        return $w.menu
+    }
+}
+
 if {[tk windowingsystem] eq "win32"} {
 
+ttk::style theme use xpnative
+
 set mw_classes [list Text Listbox Table TreeCtrl]
    foreach class $mw_classes { bind $class <MouseWheel> {} }
 
@@ -72,6 +107,12 @@
 bind all <MouseWheel> [list ::tk::MouseWheel %W %X %Y %D 0]
 
 # end of win32 section
+} else {
+
+if {[ttk::style theme use] eq "default"} {
+    ttk::style theme use clam
+}
+
 }
 
 
@@ -480,7 +521,7 @@
     wm transient $w .
     message $w.m -text $msg -justify center -aspect 400
     pack $w.m -side top -fill x -padx 20 -pady 20
-    button $w.ok -text OK -command "destroy $w"
+    ttk::button $w.ok -text OK -command "destroy $w"
     pack $w.ok -side bottom -fill x
     bind $w <Visibility> "grab $w; focus $w"
     tkwait window $w
@@ -526,11 +567,11 @@
 	set geometry(ctexth) [expr {($texth - 8) /
 				    [font metrics $textfont -linespace]}]
     }
-    frame .ctop.top
-    frame .ctop.top.bar
+    ttk::frame .ctop.top
+    ttk::frame .ctop.top.bar
     pack .ctop.top.bar -side bottom -fill x
     set cscroll .ctop.top.csb
-    scrollbar $cscroll -command {allcanvs yview} -highlightthickness 0
+    ttk::scrollbar $cscroll -command {allcanvs yview}
     pack $cscroll -side right -fill y
     panedwindow .ctop.top.clist -orient horizontal -sashpad 0 -handlesize 4
     pack .ctop.top.clist -side top -fill both -expand 1
@@ -538,15 +579,15 @@
     set canv .ctop.top.clist.canv
     canvas $canv -height $geometry(canvh) -width $geometry(canv1) \
 	-bg $bgcolor -bd 0 \
-	-yscrollincr $linespc -yscrollcommand "$cscroll set" -selectbackground grey
+	-yscrollincr $linespc -yscrollcommand "$cscroll set" -selectbackground "#c0c0c0"
     .ctop.top.clist add $canv
     set canv2 .ctop.top.clist.canv2
     canvas $canv2 -height $geometry(canvh) -width $geometry(canv2) \
-	-bg $bgcolor -bd 0 -yscrollincr $linespc -selectbackground grey
+	-bg $bgcolor -bd 0 -yscrollincr $linespc -selectbackground "#c0c0c0"
     .ctop.top.clist add $canv2
     set canv3 .ctop.top.clist.canv3
     canvas $canv3 -height $geometry(canvh) -width $geometry(canv3) \
-	-bg $bgcolor -bd 0 -yscrollincr $linespc -selectbackground grey
+	-bg $bgcolor -bd 0 -yscrollincr $linespc -selectbackground "#c0c0c0"
     .ctop.top.clist add $canv3
     bind .ctop.top.clist <Configure> {resizeclistpanes %W %w}
 
@@ -557,7 +598,7 @@
 	-command gotocommit -width 8
     $sha1but conf -disabledforeground [$sha1but cget -foreground]
     pack .ctop.top.bar.sha1label -side left
-    entry $sha1entry -width 40 -font $textfont -textvariable sha1string
+    ttk::entry $sha1entry -width 40 -font $textfont -textvariable sha1string
     trace add variable sha1string write sha1change
     pack $sha1entry -side left -pady 2
 
@@ -577,25 +618,25 @@
 	0x00, 0x38, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0x00, 0x38, 0x00, 0x1c,
 	0x00, 0x0e, 0x00, 0x07, 0x80, 0x03, 0xc0, 0x01};
     }
-    button .ctop.top.bar.leftbut -image bm-left -command goback \
+    ttk::button .ctop.top.bar.leftbut -image bm-left -command goback \
 	-state disabled -width 26
     pack .ctop.top.bar.leftbut -side left -fill y
-    button .ctop.top.bar.rightbut -image bm-right -command goforw \
+    ttk::button .ctop.top.bar.rightbut -image bm-right -command goforw \
 	-state disabled -width 26
     pack .ctop.top.bar.rightbut -side left -fill y
 
-    button .ctop.top.bar.findbut -text "Find" -command dofind
+    ttk::button .ctop.top.bar.findbut -text "Find" -command dofind
     pack .ctop.top.bar.findbut -side left
     set findstring {}
     set fstring .ctop.top.bar.findstring
     lappend entries $fstring
-    entry $fstring -width 30 -font $textfont -textvariable findstring
+    ttk::entry $fstring -width 30 -font $textfont -textvariable findstring
     pack $fstring -side left -expand 1 -fill x
     set findtype Exact
-    set findtypemenu [tk_optionMenu .ctop.top.bar.findtype \
+    set findtypemenu [ttk::optionMenu .ctop.top.bar.findtype \
 			  findtype Exact IgnCase Regexp]
     set findloc "All fields"
-    tk_optionMenu .ctop.top.bar.findloc findloc "All fields" Headline \
+    ttk::optionMenu .ctop.top.bar.findloc findloc "All fields" Headline \
 	Comments Author Committer Files Pickaxe
     pack .ctop.top.bar.findloc -side right
     pack .ctop.top.bar.findtype -side right
@@ -604,14 +645,14 @@
 
     panedwindow .ctop.cdet -orient horizontal
     .ctop add .ctop.cdet
-    frame .ctop.cdet.left
+    ttk::frame .ctop.cdet.left
     set ctext .ctop.cdet.left.ctext
     text $ctext -fg $fgcolor -bg $bgcolor -state disabled -font $textfont \
 	-width $geometry(ctextw) -height $geometry(ctexth) \
 	-yscrollcommand ".ctop.cdet.left.sb set" \
 	-xscrollcommand ".ctop.cdet.left.hb set" -wrap none
-    scrollbar .ctop.cdet.left.sb -command "$ctext yview"
-    scrollbar .ctop.cdet.left.hb -orient horizontal -command "$ctext xview"
+    ttk::scrollbar .ctop.cdet.left.sb -command "$ctext yview"
+    ttk::scrollbar .ctop.cdet.left.hb -orient horizontal -command "$ctext xview"
     pack .ctop.cdet.left.sb -side right -fill y
     pack .ctop.cdet.left.hb -side bottom -fill x
     pack $ctext -side left -fill both -expand 1
@@ -643,12 +684,12 @@
 	$ctext tag conf found -back yellow
     }
 
-    frame .ctop.cdet.right
+    ttk::frame .ctop.cdet.right
     set cflist .ctop.cdet.right.cfiles
     listbox $cflist -fg $fgcolor -bg $bgcolor \
         -selectmode extended -width $geometry(cflistw) \
 	-yscrollcommand ".ctop.cdet.right.sb set"
-    scrollbar .ctop.cdet.right.sb -command "$cflist yview"
+    ttk::scrollbar .ctop.cdet.right.sb -command "$cflist yview"
     pack .ctop.cdet.right.sb -side right -fill y
     pack $cflist -side left -fill both -expand 1
     .ctop.cdet add .ctop.cdet.right
@@ -901,7 +942,7 @@
 Use and redistribute under the terms of the GNU General Public License} \
 	    -justify center -aspect 400
     pack $w.m -side top -fill x -padx 20 -pady 20
-    button $w.ok -text Close -command "destroy $w"
+    ttk::button $w.ok -text Close -command "destroy $w"
     pack $w.ok -side bottom
 }
 
@@ -1219,7 +1260,7 @@
         } else {
             # draw a head or other ref
             if {[incr nheads -1] >= 0} {
-                set col green
+                set col "#00ff00"
             } else {
                 set col "#ddddff"
             }
@@ -2417,8 +2458,7 @@
     set currentid $id
     $sha1entry delete 0 end
     $sha1entry insert 0 $id
-    $sha1entry selection from 0
-    $sha1entry selection to end
+    $sha1entry selection range 0 end
 
     $ctext conf -state normal
     $ctext delete 0.0 end
@@ -3675,36 +3715,36 @@
     set patchtop $top
     catch {destroy $top}
     toplevel $top
-    label $top.title -text "Generate patch"
+    ttk::label $top.title -text "Generate patch"
     grid $top.title - -pady 10
-    label $top.from -text "From:"
-    entry $top.fromsha1 -width 40 -relief flat
+    ttk::label $top.from -text "From:"
+    ttk::entry $top.fromsha1 -width 40
     $top.fromsha1 insert 0 $oldid
     $top.fromsha1 conf -state readonly
     grid $top.from $top.fromsha1 -sticky w
-    entry $top.fromhead -width 60 -relief flat
+    ttk::entry $top.fromhead -width 60
     $top.fromhead insert 0 $oldhead
     $top.fromhead conf -state readonly
     grid x $top.fromhead -sticky w
-    label $top.to -text "To:"
-    entry $top.tosha1 -width 40 -relief flat
+    ttk::label $top.to -text "To:"
+    ttk::entry $top.tosha1 -width 40
     $top.tosha1 insert 0 $newid
     $top.tosha1 conf -state readonly
     grid $top.to $top.tosha1 -sticky w
-    entry $top.tohead -width 60 -relief flat
+    ttk::entry $top.tohead -width 60
     $top.tohead insert 0 $newhead
     $top.tohead conf -state readonly
     grid x $top.tohead -sticky w
-    button $top.rev -text "Reverse" -command mkpatchrev -padx 5
+    ttk::button $top.rev -text "Reverse" -command mkpatchrev
     grid $top.rev x -pady 10
-    label $top.flab -text "Output file:"
-    entry $top.fname -width 60
+    ttk::label $top.flab -text "Output file:"
+    ttk::entry $top.fname -width 60
     $top.fname insert 0 [file normalize "patch$patchnum.patch"]
     incr patchnum
     grid $top.flab $top.fname -sticky w
-    frame $top.buts
-    button $top.buts.gen -text "Generate" -command mkpatchgo
-    button $top.buts.can -text "Cancel" -command mkpatchcan
+    ttk::frame $top.buts
+    ttk::button $top.buts.gen -text "Generate" -command mkpatchgo
+    ttk::button $top.buts.can -text "Cancel" -command mkpatchcan
     grid $top.buts.gen $top.buts.can
     grid columnconfigure $top.buts 0 -weight 1 -uniform a
     grid columnconfigure $top.buts 1 -weight 1 -uniform a
@@ -3755,23 +3795,23 @@
     set mktagtop $top
     catch {destroy $top}
     toplevel $top
-    label $top.title -text "Create tag"
+    ttk::label $top.title -text "Create tag"
     grid $top.title - -pady 10
-    label $top.id -text "ID:"
-    entry $top.sha1 -width 40 -relief flat
+    ttk::label $top.id -text "ID:"
+    ttk::entry $top.sha1 -width 40
     $top.sha1 insert 0 $rowmenuid
     $top.sha1 conf -state readonly
     grid $top.id $top.sha1 -sticky w
-    entry $top.head -width 60 -relief flat
+    ttk::entry $top.head -width 60
     $top.head insert 0 [lindex $commitinfo($rowmenuid) 0]
     $top.head conf -state readonly
     grid x $top.head -sticky w
-    label $top.tlab -text "Tag name:"
-    entry $top.tag -width 60
+    ttk::label $top.tlab -text "Tag name:"
+    ttk::entry $top.tag -width 60
     grid $top.tlab $top.tag -sticky w
-    frame $top.buts
-    button $top.buts.gen -text "Create" -command mktaggo
-    button $top.buts.can -text "Cancel" -command mktagcan
+    ttk::frame $top.buts
+    ttk::button $top.buts.gen -text "Create" -command mktaggo
+    ttk::button $top.buts.can -text "Cancel" -command mktagcan
     grid $top.buts.gen $top.buts.can
     grid columnconfigure $top.buts 0 -weight 1 -uniform a
     grid columnconfigure $top.buts 1 -weight 1 -uniform a
@@ -3835,27 +3875,27 @@
     set wrcomtop $top
     catch {destroy $top}
     toplevel $top
-    label $top.title -text "Write commit to file"
+    ttk::label $top.title -text "Write commit to file"
     grid $top.title - -pady 10
-    label $top.id -text "ID:"
-    entry $top.sha1 -width 40 -relief flat
+    ttk::label $top.id -text "ID:"
+    ttk::entry $top.sha1 -width 40
     $top.sha1 insert 0 $rowmenuid
     $top.sha1 conf -state readonly
     grid $top.id $top.sha1 -sticky w
-    entry $top.head -width 60 -relief flat
+    ttk::entry $top.head -width 60
     $top.head insert 0 [lindex $commitinfo($rowmenuid) 0]
     $top.head conf -state readonly
     grid x $top.head -sticky w
-    label $top.clab -text "Command:"
-    entry $top.cmd -width 60 -textvariable wrcomcmd
+    ttk::label $top.clab -text "Command:"
+    ttk::entry $top.cmd -width 60 -textvariable wrcomcmd
     grid $top.clab $top.cmd -sticky w -pady 10
-    label $top.flab -text "Output file:"
-    entry $top.fname -width 60
+    ttk::label $top.flab -text "Output file:"
+    ttk::entry $top.fname -width 60
     $top.fname insert 0 [file normalize "commit-[string range $rowmenuid 0 6]"]
     grid $top.flab $top.fname -sticky w
-    frame $top.buts
-    button $top.buts.gen -text "Write" -command wrcomgo
-    button $top.buts.can -text "Cancel" -command wrcomcan
+    ttk::frame $top.buts
+    ttk::button $top.buts.gen -text "Write" -command wrcomgo
+    ttk::button $top.buts.can -text "Cancel" -command wrcomcan
     grid $top.buts.gen $top.buts.can
     grid columnconfigure $top.buts 0 -weight 1 -uniform a
     grid columnconfigure $top.buts 1 -weight 1 -uniform a
--- a/contrib/perf.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/contrib/perf.py	Fri Dec 28 14:13:06 2012 +0100
@@ -40,11 +40,11 @@
         except Exception:
             timer(lambda: len(list(cmdutil.walk(repo, pats, {}))))
 
-def perfstatus(ui, repo, *pats):
+def perfstatus(ui, repo, **opts):
     #m = match.always(repo.root, repo.getcwd())
     #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
     #                                                False))))
-    timer(lambda: sum(map(len, repo.status())))
+    timer(lambda: sum(map(len, repo.status(**opts))))
 
 def clearcaches(cl):
     # behave somewhat consistently across internal API changes
@@ -78,6 +78,15 @@
             pass
     timer(d)
 
+def perfancestorset(ui, repo, revset):
+    revs = repo.revs(revset)
+    heads = repo.changelog.headrevs()
+    def d():
+        s = repo.changelog.ancestors(heads)
+        for rev in revs:
+            rev in s
+    timer(d)
+
 def perfdirstate(ui, repo):
     "a" in repo.dirstate
     def d():
@@ -228,6 +237,11 @@
 
     timer(d)
 
+def perfrevset(ui, repo, expr):
+    def d():
+        repo.revs(expr)
+    timer(d)
+
 cmdtable = {
     'perfcca': (perfcca, []),
     'perffncacheload': (perffncacheload, []),
@@ -238,7 +252,9 @@
     'perfnodelookup': (perfnodelookup, []),
     'perfparents': (perfparents, []),
     'perfstartup': (perfstartup, []),
-    'perfstatus': (perfstatus, []),
+    'perfstatus': (perfstatus,
+                   [('u', 'unknown', False,
+                     'ask status to look for unknown files')]),
     'perfwalk': (perfwalk, []),
     'perfmanifest': (perfmanifest, []),
     'perfchangeset': (perfchangeset, []),
@@ -246,6 +262,7 @@
     'perfheads': (perfheads, []),
     'perftags': (perftags, []),
     'perfancestors': (perfancestors, []),
+    'perfancestorset': (perfancestorset, [], "REVSET"),
     'perfdirstate': (perfdirstate, []),
     'perfdirstatedirs': (perfdirstate, []),
     'perfdirstatewrite': (perfdirstatewrite, []),
@@ -256,4 +273,5 @@
     'perfrevlog': (perfrevlog,
                    [('d', 'dist', 100, 'distance between the revisions')],
                    "[INDEXFILE]"),
+    'perfrevset': (perfrevset, [], "REVSET")
 }
--- a/contrib/synthrepo.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/contrib/synthrepo.py	Fri Dec 28 14:13:06 2012 +0100
@@ -231,6 +231,8 @@
     fp.close()
 
     def cdf(l):
+        if not l:
+            return [], []
         vals, probs = zip(*sorted(l, key=lambda x: x[1], reverse=True))
         t = float(sum(probs, 0))
         s, cdfs = 0, []
--- a/contrib/vim/hgtest.vim	Fri Dec 28 14:10:35 2012 +0100
+++ b/contrib/vim/hgtest.vim	Fri Dec 28 14:13:06 2012 +0100
@@ -2,7 +2,8 @@
 " Language: Mercurial unified tests
 " Author: Steve Losh (steve@stevelosh.com)
 "
-" Add the following line to your ~/.vimrc to enable:
+" Place this file in ~/.vim/syntax/ and add the following line to your
+" ~/.vimrc to enable:
 " au BufNewFile,BufRead *.t set filetype=hgtest
 "
 " If you want folding you'll need the following line as well:
--- a/doc/hgmanpage.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/doc/hgmanpage.py	Fri Dec 28 14:13:06 2012 +0100
@@ -146,7 +146,7 @@
                 text.extend(cell)
                 if not text[-1].endswith('\n'):
                     text[-1] += '\n'
-                if i < len(row)-1:
+                if i < len(row) - 1:
                     text.append('T}'+self._tab_char+'T{\n')
                 else:
                     text.append('T}\n')
@@ -258,7 +258,7 @@
             # ensure we get a ".TH" as viewers require it.
             self.head.append(self.header())
         # filter body
-        for i in xrange(len(self.body)-1, 0, -1):
+        for i in xrange(len(self.body) - 1, 0, -1):
             # remove superfluous vertical gaps.
             if self.body[i] == '.sp\n':
                 if self.body[i - 1][:4] in ('.BI ','.IP '):
@@ -880,7 +880,7 @@
         self.context[-3] = '.BI' # bold/italic alternate
         if node['delimiter'] != ' ':
             self.body.append('\\fB%s ' % node['delimiter'])
-        elif self.body[len(self.body)-1].endswith('='):
+        elif self.body[len(self.body) - 1].endswith('='):
             # a blank only means no blank in output, just changing font
             self.body.append(' ')
         else:
--- a/hgext/convert/__init__.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/convert/__init__.py	Fri Dec 28 14:13:06 2012 +0100
@@ -191,6 +191,10 @@
         branch indicated in the regex as the second parent of the
         changeset. Default is ``{{mergefrombranch ([-\\w]+)}}``
 
+    :convert.localtimezone: use local time (as determined by the TZ
+        environment variable) for changeset date/times. The default
+        is False (use UTC).
+
     :hook.cvslog: Specify a Python function to be called at the end of
         gathering the CVS log. The function is passed a list with the
         log entries, and can modify the entries in-place, or add or
@@ -231,6 +235,10 @@
     :convert.svn.trunk: specify the name of the trunk branch. The
         default is ``trunk``.
 
+    :convert.localtimezone: use local time (as determined by the TZ
+        environment variable) for changeset date/times. The default
+        is False (use UTC).
+
     Source history can be retrieved starting at a specific revision,
     instead of being integrally converted. Only single branch
     conversions are supported.
--- a/hgext/convert/common.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/convert/common.py	Fri Dec 28 14:13:06 2012 +0100
@@ -5,7 +5,7 @@
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
 
-import base64, errno, subprocess, os
+import base64, errno, subprocess, os, datetime
 import cPickle as pickle
 from mercurial import util
 from mercurial.i18n import _
@@ -446,3 +446,10 @@
         if e.errno != errno.ENOENT:
             raise
     return m
+
+def makedatetimestamp(t):
+    """Like util.makedate() but for time t instead of current time"""
+    delta = (datetime.datetime.utcfromtimestamp(t) -
+             datetime.datetime.fromtimestamp(t))
+    tz = delta.days * 86400 + delta.seconds
+    return t, tz
--- a/hgext/convert/cvs.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/convert/cvs.py	Fri Dec 28 14:13:06 2012 +0100
@@ -11,6 +11,7 @@
 from mercurial.i18n import _
 
 from common import NoRepo, commit, converter_source, checktool
+from common import makedatetimestamp
 import cvsps
 
 class convert_cvs(converter_source):
@@ -70,6 +71,8 @@
                 cs.author = self.recode(cs.author)
                 self.lastbranch[cs.branch] = id
                 cs.comment = self.recode(cs.comment)
+                if self.ui.configbool('convert', 'localtimezone'):
+                    cs.date = makedatetimestamp(cs.date[0])
                 date = util.datestr(cs.date, '%Y-%m-%d %H:%M:%S %1%2')
                 self.tags.update(dict.fromkeys(cs.tags, id))
 
--- a/hgext/convert/cvsps.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/convert/cvsps.py	Fri Dec 28 14:13:06 2012 +0100
@@ -801,22 +801,22 @@
             # Note: trailing spaces on several lines here are needed to have
             #       bug-for-bug compatibility with cvsps.
             ui.write('---------------------\n')
-            ui.write('PatchSet %d \n' % cs.id)
-            ui.write('Date: %s\n' % util.datestr(cs.date,
-                                                 '%Y/%m/%d %H:%M:%S %1%2'))
-            ui.write('Author: %s\n' % cs.author)
-            ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
-            ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
-                                  ','.join(cs.tags) or '(none)'))
+            ui.write(('PatchSet %d \n' % cs.id))
+            ui.write(('Date: %s\n' % util.datestr(cs.date,
+                                                 '%Y/%m/%d %H:%M:%S %1%2')))
+            ui.write(('Author: %s\n' % cs.author))
+            ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
+            ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
+                                  ','.join(cs.tags) or '(none)')))
             branchpoints = getattr(cs, 'branchpoints', None)
             if branchpoints:
-                ui.write('Branchpoints: %s \n' % ', '.join(branchpoints))
+                ui.write(('Branchpoints: %s \n' % ', '.join(branchpoints)))
             if opts["parents"] and cs.parents:
                 if len(cs.parents) > 1:
-                    ui.write('Parents: %s\n' %
-                             (','.join([str(p.id) for p in cs.parents])))
+                    ui.write(('Parents: %s\n' %
+                             (','.join([str(p.id) for p in cs.parents]))))
                 else:
-                    ui.write('Parent: %d\n' % cs.parents[0].id)
+                    ui.write(('Parent: %d\n' % cs.parents[0].id))
 
             if opts["ancestors"]:
                 b = cs.branch
@@ -825,11 +825,11 @@
                     b, c = ancestors[b]
                     r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
                 if r:
-                    ui.write('Ancestors: %s\n' % (','.join(r)))
+                    ui.write(('Ancestors: %s\n' % (','.join(r))))
 
-            ui.write('Log:\n')
+            ui.write(('Log:\n'))
             ui.write('%s\n\n' % cs.comment)
-            ui.write('Members: \n')
+            ui.write(('Members: \n'))
             for f in cs.entries:
                 fn = f.file
                 if fn.startswith(opts["prefix"]):
--- a/hgext/convert/git.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/convert/git.py	Fri Dec 28 14:13:06 2012 +0100
@@ -6,12 +6,24 @@
 # GNU General Public License version 2 or any later version.
 
 import os
-from mercurial import util
+from mercurial import util, config
 from mercurial.node import hex, nullid
 from mercurial.i18n import _
 
 from common import NoRepo, commit, converter_source, checktool
 
+class submodule(object):
+    def __init__(self, path, node, url):
+        self.path = path
+        self.node = node
+        self.url = url
+
+    def hgsub(self):
+        return "%s = [git]%s" % (self.path, self.url)
+
+    def hgsubstate(self):
+        return "%s %s" % (self.node, self.path)
+
 class convert_git(converter_source):
     # Windows does not support GIT_DIR= construct while other systems
     # cannot remove environment variable. Just assume none have
@@ -55,6 +67,7 @@
         checktool('git', 'git')
 
         self.path = path
+        self.submodules = []
 
     def getheads(self):
         if not self.rev:
@@ -76,16 +89,57 @@
         return data
 
     def getfile(self, name, rev):
-        data = self.catfile(rev, "blob")
-        mode = self.modecache[(name, rev)]
+        if name == '.hgsub':
+            data = '\n'.join([m.hgsub() for m in self.submoditer()])
+            mode = ''
+        elif name == '.hgsubstate':
+            data = '\n'.join([m.hgsubstate() for m in self.submoditer()])
+            mode = ''
+        else:
+            data = self.catfile(rev, "blob")
+            mode = self.modecache[(name, rev)]
         return data, mode
 
+    def submoditer(self):
+        null = hex(nullid)
+        for m in sorted(self.submodules, key=lambda p: p.path):
+            if m.node != null:
+                yield m
+
+    def parsegitmodules(self, content):
+        """Parse the formatted .gitmodules file, example file format:
+        [submodule "sub"]\n
+        \tpath = sub\n
+        \turl = git://giturl\n
+        """
+        self.submodules = []
+        c = config.config()
+        # Each item in .gitmodules starts with \t that cant be parsed
+        c.parse('.gitmodules', content.replace('\t',''))
+        for sec in c.sections():
+            s = c[sec]
+            if 'url' in s and 'path' in s:
+                self.submodules.append(submodule(s['path'], '', s['url']))
+
+    def retrievegitmodules(self, version):
+        modules, ret = self.gitread("git show %s:%s" % (version, '.gitmodules'))
+        if ret:
+            raise util.Abort(_('cannot read submodules config file in %s') %
+                             version)
+        self.parsegitmodules(modules)
+        for m in self.submodules:
+            node, ret = self.gitread("git rev-parse %s:%s" % (version, m.path))
+            if ret:
+                continue
+            m.node = node.strip()
+
     def getchanges(self, version):
         self.modecache = {}
         fh = self.gitopen("git diff-tree -z --root -m -r %s" % version)
         changes = []
         seen = set()
         entry = None
+        subexists = False
         for l in fh.read().split('\x00'):
             if not entry:
                 if not l.startswith(':'):
@@ -97,15 +151,24 @@
                 seen.add(f)
                 entry = entry.split()
                 h = entry[3]
-                if entry[1] == '160000':
-                    raise util.Abort('git submodules are not supported!')
                 p = (entry[1] == "100755")
                 s = (entry[1] == "120000")
-                self.modecache[(f, h)] = (p and "x") or (s and "l") or ""
-                changes.append((f, h))
+
+                if f == '.gitmodules':
+                    subexists = True
+                    changes.append(('.hgsub', ''))
+                elif entry[1] == '160000' or entry[0] == ':160000':
+                    subexists = True
+                else:
+                    self.modecache[(f, h)] = (p and "x") or (s and "l") or ""
+                    changes.append((f, h))
             entry = None
         if fh.close():
             raise util.Abort(_('cannot read changes in %s') % version)
+
+        if subexists:
+            self.retrievegitmodules(version)
+            changes.append(('.hgsubstate', ''))
         return (changes, {})
 
     def getcommit(self, version):
--- a/hgext/convert/hg.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/convert/hg.py	Fri Dec 28 14:13:06 2012 +0100
@@ -219,9 +219,10 @@
             return
 
         self.ui.status(_("updating bookmarks\n"))
+        destmarks = self.repo._bookmarks
         for bookmark in updatedbookmark:
-            self.repo._bookmarks[bookmark] = bin(updatedbookmark[bookmark])
-            bookmarks.write(self.repo)
+            destmarks[bookmark] = bin(updatedbookmark[bookmark])
+        destmarks.write()
 
     def hascommit(self, rev):
         if rev not in self.repo and self.clonebranches:
--- a/hgext/convert/subversion.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/convert/subversion.py	Fri Dec 28 14:13:06 2012 +0100
@@ -18,6 +18,7 @@
 
 from common import NoRepo, MissingTool, commit, encodeargs, decodeargs
 from common import commandline, converter_source, converter_sink, mapfile
+from common import makedatetimestamp
 
 try:
     from svn.core import SubversionException, Pool
@@ -802,6 +803,8 @@
             # ISO-8601 conformant
             # '2007-01-04T17:35:00.902377Z'
             date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
+            if self.ui.configbool('convert', 'localtimezone'):
+                date = makedatetimestamp(date[0])
 
             log = message and self.recode(message) or ''
             author = author and self.recode(author) or ''
--- a/hgext/eol.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/eol.py	Fri Dec 28 14:13:06 2012 +0100
@@ -307,7 +307,7 @@
                 eolmtime = 0
 
             if eolmtime > cachemtime:
-                ui.debug("eol: detected change in .hgeol\n")
+                self.ui.debug("eol: detected change in .hgeol\n")
                 wlock = None
                 try:
                     wlock = self.wlock()
--- a/hgext/hgk.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/hgk.py	Fri Dec 28 14:13:06 2012 +0100
@@ -98,9 +98,9 @@
     if ctx is None:
         ctx = repo[n]
     # use ctx.node() instead ??
-    ui.write("tree %s\n" % short(ctx.changeset()[0]))
+    ui.write(("tree %s\n" % short(ctx.changeset()[0])))
     for p in ctx.parents():
-        ui.write("parent %s\n" % p)
+        ui.write(("parent %s\n" % p))
 
     date = ctx.date()
     description = ctx.description().replace("\0", "")
@@ -108,12 +108,13 @@
     if lines and lines[-1].startswith('committer:'):
         committer = lines[-1].split(': ')[1].rstrip()
     else:
-        committer = ctx.user()
+        committer = ""
 
-    ui.write("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1]))
-    ui.write("committer %s %s %s\n" % (committer, int(date[0]), date[1]))
-    ui.write("revision %d\n" % ctx.rev())
-    ui.write("branch %s\n\n" % ctx.branch())
+    ui.write(("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1])))
+    if committer != '':
+        ui.write(("committer %s %s %s\n" % (committer, int(date[0]), date[1])))
+    ui.write(("revision %d\n" % ctx.rev()))
+    ui.write(("branch %s\n\n" % ctx.branch()))
 
     if prefix != "":
         ui.write("%s%s\n" % (prefix,
@@ -302,7 +303,7 @@
 def config(ui, repo, **opts):
     """print extension options"""
     def writeopt(name, value):
-        ui.write('k=%s\nv=%s\n' % (name, value))
+        ui.write(('k=%s\nv=%s\n' % (name, value)))
 
     writeopt('vdiff', ui.config('hgk', 'vdiff', ''))
 
--- a/hgext/highlight/highlight.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/highlight/highlight.py	Fri Dec 28 14:13:06 2012 +0100
@@ -50,7 +50,7 @@
     colorized = highlight(text, lexer, formatter)
     # strip wrapping div
     colorized = colorized[:colorized.find('\n</pre>')]
-    colorized = colorized[colorized.find('<pre>')+5:]
+    colorized = colorized[colorized.find('<pre>') + 5:]
     coloriter = (s.encode(encoding.encoding, 'replace')
                  for s in colorized.splitlines())
 
--- a/hgext/histedit.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/histedit.py	Fri Dec 28 14:13:06 2012 +0100
@@ -144,7 +144,6 @@
     import pickle
 import os
 
-from mercurial import bookmarks
 from mercurial import cmdutil
 from mercurial import discovery
 from mercurial import error
@@ -740,12 +739,13 @@
             # nothing to move
         moves.append((bk, new[-1]))
     if moves:
+        marks = repo._bookmarks
         for mark, new in moves:
-            old = repo._bookmarks[mark]
+            old = marks[mark]
             ui.note(_('histedit: moving bookmarks %s from %s to %s\n')
                     % (mark, node.short(old), node.short(new)))
-            repo._bookmarks[mark] = new
-        bookmarks.write(repo)
+            marks[mark] = new
+        marks.write()
 
 def cleanupnode(ui, repo, name, nodes):
     """strip a group of nodes from the repository
--- a/hgext/inotify/linux/watcher.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/inotify/linux/watcher.py	Fri Dec 28 14:13:06 2012 +0100
@@ -72,7 +72,7 @@
 
     def __repr__(self):
         r = repr(self.raw)
-        return 'event(path=' + repr(self.path) + ', ' + r[r.find('(')+1:]
+        return 'event(path=' + repr(self.path) + ', ' + r[r.find('(') + 1:]
 
 
 _event_props = {
--- a/hgext/inotify/linuxserver.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/inotify/linuxserver.py	Fri Dec 28 14:13:06 2012 +0100
@@ -405,14 +405,7 @@
 
     def shutdown(self):
         self.sock.close()
-        try:
-            os.unlink(self.sockpath)
-            if self.realsockpath:
-                os.unlink(self.realsockpath)
-                os.rmdir(os.path.dirname(self.realsockpath))
-        except OSError, err:
-            if err.errno != errno.ENOENT:
-                raise
+        self.sock.cleanup()
 
     def answer_stat_query(self, cs):
         if self.repowatcher.timeout:
--- a/hgext/inotify/server.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/inotify/server.py	Fri Dec 28 14:13:06 2012 +0100
@@ -6,7 +6,7 @@
 # GNU General Public License version 2 or any later version.
 
 from mercurial.i18n import _
-from mercurial import cmdutil, osutil, util
+from mercurial import cmdutil, posix, osutil, util
 import common
 
 import errno
@@ -15,7 +15,6 @@
 import stat
 import struct
 import sys
-import tempfile
 
 class AlreadyStartedException(Exception):
     pass
@@ -330,42 +329,15 @@
     def __init__(self, ui, root, repowatcher, timeout):
         self.ui = ui
         self.repowatcher = repowatcher
-        self.sock = socket.socket(socket.AF_UNIX)
-        self.sockpath = join(root, '.hg/inotify.sock')
-
-        self.realsockpath = self.sockpath
-        if os.path.islink(self.sockpath):
-            if os.path.exists(self.sockpath):
-                self.realsockpath = os.readlink(self.sockpath)
-            else:
-                raise util.Abort('inotify-server: cannot start: '
-                                '.hg/inotify.sock is a broken symlink')
         try:
-            self.sock.bind(self.realsockpath)
-        except socket.error, err:
+            self.sock = posix.unixdomainserver(
+                lambda p: os.path.join(root, '.hg', p),
+                'inotify')
+        except (OSError, socket.error), err:
             if err.args[0] == errno.EADDRINUSE:
-                raise AlreadyStartedException(_('cannot start: socket is '
-                                                'already bound'))
-            if err.args[0] == "AF_UNIX path too long":
-                tempdir = tempfile.mkdtemp(prefix="hg-inotify-")
-                self.realsockpath = os.path.join(tempdir, "inotify.sock")
-                try:
-                    self.sock.bind(self.realsockpath)
-                    os.symlink(self.realsockpath, self.sockpath)
-                except (OSError, socket.error), inst:
-                    try:
-                        os.unlink(self.realsockpath)
-                    except OSError:
-                        pass
-                    os.rmdir(tempdir)
-                    if inst.errno == errno.EEXIST:
-                        raise AlreadyStartedException(_('cannot start: tried '
-                            'linking .hg/inotify.sock to a temporary socket but'
-                            ' .hg/inotify.sock already exists'))
-                    raise
-            else:
-                raise
-        self.sock.listen(5)
+                raise AlreadyStartedException(_('cannot start: '
+                                                'socket is already bound'))
+            raise
         self.fileno = self.sock.fileno
 
     def answer_stat_query(self, cs):
--- a/hgext/largefiles/basestore.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/largefiles/basestore.py	Fri Dec 28 14:13:06 2012 +0100
@@ -26,14 +26,8 @@
         self.detail = detail
 
     def longmessage(self):
-        if self.url:
-            return ('%s: %s\n'
-                    '(failed URL: %s)\n'
-                    % (self.filename, self.detail, self.url))
-        else:
-            return ('%s: %s\n'
-                    '(no default or default-push path set in hgrc)\n'
-                    % (self.filename, self.detail))
+        return (_("error getting %s from %s for %s: %s\n") %
+                 (self.hash, self.url, self.filename, self.detail))
 
     def __str__(self):
         return "%s: %s" % (self.url, self.detail)
--- a/hgext/largefiles/lfcommands.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/largefiles/lfcommands.py	Fri Dec 28 14:13:06 2012 +0100
@@ -383,6 +383,13 @@
     store = basestore._openstore(repo)
     return store.verify(revs, contents=contents)
 
+def debugdirstate(ui, repo):
+    '''Show basic information for the largefiles dirstate'''
+    lfdirstate = lfutil.openlfdirstate(ui, repo)
+    for file_, ent in sorted(lfdirstate._map.iteritems()):
+        mode = '%3o' % (ent[1] & 0777 & ~util.umask)
+        ui.write("%c %s %10d %s\n" % (ent[0], mode, ent[2], file_))
+
 def cachelfiles(ui, repo, node, filelist=None):
     '''cachelfiles ensures that all largefiles needed by the specified revision
     are present in the repository's largefile cache.
--- a/hgext/largefiles/lfutil.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/largefiles/lfutil.py	Fri Dec 28 14:13:06 2012 +0100
@@ -18,43 +18,10 @@
 from mercurial.i18n import _
 
 shortname = '.hglf'
+shortnameslash = shortname + '/'
 longname = 'largefiles'
 
 
-# -- Portability wrappers ----------------------------------------------
-
-def dirstatewalk(dirstate, matcher, unknown=False, ignored=False):
-    return dirstate.walk(matcher, [], unknown, ignored)
-
-def repoadd(repo, list):
-    add = repo[None].add
-    return add(list)
-
-def reporemove(repo, list, unlink=False):
-    def remove(list, unlink):
-        wlock = repo.wlock()
-        try:
-            if unlink:
-                for f in list:
-                    try:
-                        util.unlinkpath(repo.wjoin(f))
-                    except OSError, inst:
-                        if inst.errno != errno.ENOENT:
-                            raise
-            repo[None].forget(list)
-        finally:
-            wlock.release()
-    return remove(list, unlink=unlink)
-
-def repoforget(repo, list):
-    forget = repo[None].forget
-    return forget(list)
-
-def findoutgoing(repo, remote, force):
-    from mercurial import discovery
-    outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=force)
-    return outgoing.missing
-
 # -- Private worker functions ------------------------------------------
 
 def getminsize(ui, assumelfiles, opt, default=10):
@@ -139,24 +106,26 @@
         return super(largefilesdirstate, self).forget(unixpath(f))
     def normallookup(self, f):
         return super(largefilesdirstate, self).normallookup(unixpath(f))
+    def _ignore(self):
+        return False
 
 def openlfdirstate(ui, repo, create=True):
     '''
     Return a dirstate object that tracks largefiles: i.e. its root is
     the repo root, but it is saved in .hg/largefiles/dirstate.
     '''
-    admin = repo.join(longname)
-    opener = scmutil.opener(admin)
+    lfstoredir = repo.join(longname)
+    opener = scmutil.opener(lfstoredir)
     lfdirstate = largefilesdirstate(opener, ui, repo.root,
                                      repo.dirstate._validate)
 
     # If the largefiles dirstate does not exist, populate and create
     # it. This ensures that we create it on the first meaningful
     # largefiles operation in a new clone.
-    if create and not os.path.exists(os.path.join(admin, 'dirstate')):
-        util.makedirs(admin)
+    if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
+        util.makedirs(lfstoredir)
         matcher = getstandinmatcher(repo)
-        for standin in dirstatewalk(repo.dirstate, matcher):
+        for standin in repo.dirstate.walk(matcher, [], False, False):
             lfile = splitstandin(standin)
             hash = readstandin(repo, lfile)
             lfdirstate.normallookup(lfile)
@@ -250,7 +219,7 @@
 
 def getstandinmatcher(repo, pats=[], opts={}):
     '''Return a match object that applies pats to the standin directory'''
-    standindir = repo.pathto(shortname)
+    standindir = repo.wjoin(shortname)
     if pats:
         # patterns supplied: search standin directory relative to current dir
         cwd = repo.getcwd()
@@ -264,19 +233,11 @@
         pats = [standindir]
     else:
         # no patterns and no standin dir: return matcher that matches nothing
-        match = match_.match(repo.root, None, [], exact=True)
-        match.matchfn = lambda f: False
-        return match
-    return getmatcher(repo, pats, opts, showbad=False)
+        return match_.match(repo.root, None, [], exact=True)
 
-def getmatcher(repo, pats=[], opts={}, showbad=True):
-    '''Wrapper around scmutil.match() that adds showbad: if false,
-    neuter the match object's bad() method so it does not print any
-    warnings about missing files or directories.'''
+    # no warnings about missing files or directories
     match = scmutil.match(repo[None], pats, opts)
-
-    if not showbad:
-        match.bad = lambda f, msg: None
+    match.bad = lambda f, msg: None
     return match
 
 def composestandinmatcher(repo, rmatcher):
@@ -296,17 +257,17 @@
     file.'''
     # Notes:
     # 1) Some callers want an absolute path, but for instance addlargefiles
-    #    needs it repo-relative so it can be passed to repoadd().  So leave
-    #    it up to the caller to use repo.wjoin() to get an absolute path.
+    #    needs it repo-relative so it can be passed to repo[None].add().  So
+    #    leave it up to the caller to use repo.wjoin() to get an absolute path.
     # 2) Join with '/' because that's what dirstate always uses, even on
     #    Windows. Change existing separator to '/' first in case we are
     #    passed filenames from an external source (like the command line).
-    return shortname + '/' + util.pconvert(filename)
+    return shortnameslash + util.pconvert(filename)
 
 def isstandin(filename):
     '''Return true if filename is a big file standin. filename must be
     in Mercurial's internal form (slash-separated).'''
-    return filename.startswith(shortname + '/')
+    return filename.startswith(shortnameslash)
 
 def splitstandin(filename):
     # Split on / because that's what dirstate always uses, even on Windows.
@@ -435,7 +396,7 @@
 
 def islfilesrepo(repo):
     if ('largefiles' in repo.requirements and
-            util.any(shortname + '/' in f[0] for f in repo.store.datafiles())):
+            util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
         return True
 
     return util.any(openlfdirstate(repo.ui, repo, False))
@@ -455,7 +416,7 @@
 def getstandinsstate(repo):
     standins = []
     matcher = getstandinmatcher(repo)
-    for standin in dirstatewalk(repo.dirstate, matcher):
+    for standin in repo.dirstate.walk(matcher, [], False, False):
         lfile = splitstandin(standin)
         standins.append((lfile, readstandin(repo, lfile)))
     return standins
--- a/hgext/largefiles/localstore.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/largefiles/localstore.py	Fri Dec 28 14:13:06 2012 +0100
@@ -22,9 +22,8 @@
     the user cache.'''
 
     def __init__(self, ui, repo, remote):
-        url = os.path.join(remote.local().path, '.hg', lfutil.longname)
-        super(localstore, self).__init__(ui, repo, util.expandpath(url))
         self.remote = remote.local()
+        super(localstore, self).__init__(ui, repo, self.remote.url())
 
     def put(self, source, hash):
         util.makedirs(os.path.dirname(lfutil.storepath(self.remote, hash)))
@@ -46,7 +45,7 @@
         elif lfutil.inusercache(self.ui, hash):
             path = lfutil.usercachepath(self.ui, hash)
         else:
-            raise basestore.StoreError(filename, hash, '',
+            raise basestore.StoreError(filename, hash, self.url,
                 _("can't get file locally"))
         fd = open(path, 'rb')
         try:
--- a/hgext/largefiles/overrides.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/largefiles/overrides.py	Fri Dec 28 14:13:06 2012 +0100
@@ -12,7 +12,7 @@
 import copy
 
 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
-    node, archival, error, merge
+    node, archival, error, merge, discovery
 from mercurial.i18n import _
 from mercurial.node import hex
 from hgext import rebase
@@ -116,7 +116,7 @@
                     lfdirstate.add(f)
             lfdirstate.write()
             bad += [lfutil.splitstandin(f)
-                    for f in lfutil.repoadd(repo, standins)
+                    for f in repo[None].add(standins)
                     if f in m.files()]
     finally:
         wlock.release()
@@ -137,21 +137,23 @@
                                         if lfutil.standin(f) in manifest]
                                        for list in [s[0], s[1], s[3], s[6]]]
 
-    def warn(files, reason):
+    def warn(files, msg):
         for f in files:
-            ui.warn(_('not removing %s: %s (use forget to undo)\n')
-                    % (m.rel(f), reason))
+            ui.warn(msg % m.rel(f))
         return int(len(files) > 0)
 
     result = 0
 
     if after:
         remove, forget = deleted, []
-        result = warn(modified + added + clean, _('file still exists'))
+        result = warn(modified + added + clean,
+                      _('not removing %s: file still exists\n'))
     else:
         remove, forget = deleted + clean, []
-        result = warn(modified, _('file is modified'))
-        result = warn(added, _('file has been marked for add')) or result
+        result = warn(modified, _('not removing %s: file is modified (use -f'
+                                  ' to force removal)\n'))
+        result = warn(added, _('not removing %s: file has been marked for add'
+                               ' (use forget to undo)\n')) or result
 
     for f in sorted(remove + forget):
         if ui.verbose or not m.exact(f):
@@ -174,13 +176,13 @@
         lfdirstate.write()
         forget = [lfutil.standin(f) for f in forget]
         remove = [lfutil.standin(f) for f in remove]
-        lfutil.repoforget(repo, forget)
+        repo[None].forget(forget)
         # If this is being called by addremove, let the original addremove
         # function handle this.
         if not getattr(repo, "_isaddremove", False):
-            lfutil.reporemove(repo, remove, unlink=True)
-        else:
-            lfutil.reporemove(repo, remove, unlink=False)
+            for f in remove:
+                util.unlinkpath(repo.wjoin(f), ignoremissing=True)
+        repo[None].forget(remove)
     finally:
         wlock.release()
 
@@ -254,6 +256,13 @@
         result = result or lfcommands.verifylfiles(ui, repo, all, contents)
     return result
 
+def overridedebugstate(orig, ui, repo, *pats, **opts):
+    large = opts.pop('large', False)
+    if large:
+        lfcommands.debugdirstate(ui, repo)
+    else:
+        orig(ui, repo, *pats, **opts)
+
 # Override needs to refresh standins so that update's normal merge
 # will go through properly. Then the other update hook (overriding repo.update)
 # will get the new files. Filemerge is also overridden so that the merge
@@ -746,7 +755,7 @@
         # .hg/largefiles, and the standin matcher won't match anything anyway.)
         if 'largefiles' in repo.requirements:
             if opts.get('noupdate'):
-                util.makedirs(repo.pathto(lfutil.shortname))
+                util.makedirs(repo.wjoin(lfutil.shortname))
                 util.makedirs(repo.join(lfutil.longname))
 
         # Caching is implicitly limited to 'rev' option, since the dest repo was
@@ -949,8 +958,10 @@
             else:
                 lfdirstate.remove(f)
         lfdirstate.write()
-        lfutil.reporemove(repo, [lfutil.standin(f) for f in forget],
-            unlink=True)
+        standins = [lfutil.standin(f) for f in forget]
+        for f in standins:
+            util.unlinkpath(repo.wjoin(f), ignoremissing=True)
+        repo[None].forget(standins)
     finally:
         wlock.release()
 
@@ -967,10 +978,10 @@
         remote = hg.peer(repo, opts, dest)
     except error.RepoError:
         return None
-    o = lfutil.findoutgoing(repo, remote, False)
-    if not o:
-        return o
-    o = repo.changelog.nodesbetween(o, revs)[0]
+    outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=False)
+    if not outgoing.missing:
+        return outgoing.missing
+    o = repo.changelog.nodesbetween(outgoing.missing, revs)[0]
     if opts.get('newest_first'):
         o.reverse()
 
@@ -1065,6 +1076,9 @@
 # Calling purge with --all will cause the largefiles to be deleted.
 # Override repo.status to prevent this from happening.
 def overridepurge(orig, ui, repo, *dirs, **opts):
+    # XXX large file status is buggy when used on repo proxy.
+    # XXX this needs to be investigate.
+    repo = repo.unfiltered()
     oldstatus = repo.status
     def overridestatus(node1='.', node2=None, match=None, ignored=False,
                         clean=False, unknown=False, listsubrepos=False):
--- a/hgext/largefiles/reposetup.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/largefiles/reposetup.py	Fri Dec 28 14:13:06 2012 +0100
@@ -11,9 +11,11 @@
 import types
 import os
 
-from mercurial import context, error, manifest, match as match_, util
+from mercurial import context, error, manifest, match as match_, util, \
+    discovery
 from mercurial import node as node_
 from mercurial.i18n import _
+from mercurial import localrepo
 
 import lfcommands
 import proto
@@ -88,6 +90,9 @@
         # appropriate list in the result. Also removes standin files
         # from the listing. Revert to the original status if
         # self.lfstatus is False.
+        # XXX large file status is buggy when used on repo proxy.
+        # XXX this needs to be investigated.
+        @localrepo.unfilteredmethod
         def status(self, node1='.', node2=None, match=None, ignored=False,
                 clean=False, unknown=False, listsubrepos=False):
             listignored, listclean, listunknown = ignored, clean, unknown
@@ -153,78 +158,54 @@
                             newfiles.append(f)
                     return newfiles
 
-                # Create a function that we can use to override what is
-                # normally the ignore matcher.  We've already checked
-                # for ignored files on the first dirstate walk, and
-                # unnecessarily re-checking here causes a huge performance
-                # hit because lfdirstate only knows about largefiles
-                def _ignoreoverride(self):
-                    return False
-
                 m = copy.copy(match)
                 m._files = tostandins(m._files)
 
                 result = super(lfilesrepo, self).status(node1, node2, m,
                     ignored, clean, unknown, listsubrepos)
                 if working:
-                    try:
-                        # Any non-largefiles that were explicitly listed must be
-                        # taken out or lfdirstate.status will report an error.
-                        # The status of these files was already computed using
-                        # super's status.
-                        # Override lfdirstate's ignore matcher to not do
-                        # anything
-                        origignore = lfdirstate._ignore
-                        lfdirstate._ignore = _ignoreoverride
+
+                    def sfindirstate(f):
+                        sf = lfutil.standin(f)
+                        dirstate = self.dirstate
+                        return sf in dirstate or sf in dirstate.dirs()
 
-                        def sfindirstate(f):
-                            sf = lfutil.standin(f)
-                            dirstate = self.dirstate
-                            return sf in dirstate or sf in dirstate.dirs()
-                        match._files = [f for f in match._files
-                                        if sfindirstate(f)]
-                        # Don't waste time getting the ignored and unknown
-                        # files again; we already have them
-                        s = lfdirstate.status(match, [], False,
-                                listclean, False)
-                        (unsure, modified, added, removed, missing, unknown,
-                                ignored, clean) = s
-                        # Replace the list of ignored and unknown files with
-                        # the previously calculated lists, and strip out the
-                        # largefiles
-                        lfiles = set(lfdirstate._map)
-                        ignored = set(result[5]).difference(lfiles)
-                        unknown = set(result[4]).difference(lfiles)
-                        if parentworking:
-                            for lfile in unsure:
-                                standin = lfutil.standin(lfile)
-                                if standin not in ctx1:
-                                    # from second parent
-                                    modified.append(lfile)
-                                elif ctx1[standin].data().strip() \
-                                        != lfutil.hashfile(self.wjoin(lfile)):
+                    match._files = [f for f in match._files
+                                    if sfindirstate(f)]
+                    # Don't waste time getting the ignored and unknown
+                    # files from lfdirstate
+                    s = lfdirstate.status(match, [], False,
+                            listclean, False)
+                    (unsure, modified, added, removed, missing, _unknown,
+                            _ignored, clean) = s
+                    if parentworking:
+                        for lfile in unsure:
+                            standin = lfutil.standin(lfile)
+                            if standin not in ctx1:
+                                # from second parent
+                                modified.append(lfile)
+                            elif ctx1[standin].data().strip() \
+                                    != lfutil.hashfile(self.wjoin(lfile)):
+                                modified.append(lfile)
+                            else:
+                                clean.append(lfile)
+                                lfdirstate.normal(lfile)
+                    else:
+                        tocheck = unsure + modified + added + clean
+                        modified, added, clean = [], [], []
+
+                        for lfile in tocheck:
+                            standin = lfutil.standin(lfile)
+                            if inctx(standin, ctx1):
+                                if ctx1[standin].data().strip() != \
+                                        lfutil.hashfile(self.wjoin(lfile)):
                                     modified.append(lfile)
                                 else:
                                     clean.append(lfile)
-                                    lfdirstate.normal(lfile)
-                        else:
-                            tocheck = unsure + modified + added + clean
-                            modified, added, clean = [], [], []
+                            else:
+                                added.append(lfile)
 
-                            for lfile in tocheck:
-                                standin = lfutil.standin(lfile)
-                                if inctx(standin, ctx1):
-                                    if ctx1[standin].data().strip() != \
-                                            lfutil.hashfile(self.wjoin(lfile)):
-                                        modified.append(lfile)
-                                    else:
-                                        clean.append(lfile)
-                                else:
-                                    added.append(lfile)
-                    finally:
-                        # Replace the original ignore function
-                        lfdirstate._ignore = origignore
-
+                    # Standins no longer found in lfdirstate has been removed
                     for standin in ctx1.manifest():
                         if not lfutil.isstandin(standin):
                             continue
@@ -239,20 +220,17 @@
 
                     # Largefiles are not really removed when they're
                     # still in the normal dirstate. Likewise, normal
-                    # files are not really removed if it's still in
+                    # files are not really removed if they are still in
                     # lfdirstate. This happens in merges where files
                     # change type.
                     removed = [f for f in removed if f not in self.dirstate]
                     result[2] = [f for f in result[2] if f not in lfdirstate]
 
+                    lfiles = set(lfdirstate._map)
                     # Unknown files
-                    unknown = set(unknown).difference(ignored)
-                    result[4] = [f for f in unknown
-                                 if (self.dirstate[f] == '?' and
-                                     not lfutil.isstandin(f))]
-                    # Ignored files were calculated earlier by the dirstate,
-                    # and we already stripped out the largefiles from the list
-                    result[5] = ignored
+                    result[4] = set(result[4]).difference(lfiles)
+                    # Ignored files
+                    result[5] = set(result[5]).difference(lfiles)
                     # combine normal files and largefiles
                     normals = [[fn for fn in filelist
                                 if not lfutil.isstandin(fn)]
@@ -361,7 +339,7 @@
                 # Case 2: user calls commit with specified patterns: refresh
                 # any matching big files.
                 smatcher = lfutil.composestandinmatcher(self, match)
-                standins = lfutil.dirstatewalk(self.dirstate, smatcher)
+                standins = self.dirstate.walk(smatcher, [], False, False)
 
                 # No matching big files: get out of the way and pass control to
                 # the usual commit() method.
@@ -427,10 +405,11 @@
                 wlock.release()
 
         def push(self, remote, force=False, revs=None, newbranch=False):
-            o = lfutil.findoutgoing(self, remote, force)
-            if o:
+            outgoing = discovery.findcommonoutgoing(repo, remote.peer(),
+                                                    force=force)
+            if outgoing.missing:
                 toupload = set()
-                o = self.changelog.nodesbetween(o, revs)[0]
+                o = self.changelog.nodesbetween(outgoing.missing, revs)[0]
                 for n in o:
                     parents = [p for p in self.changelog.parents(n)
                                if p != node_.nullid]
--- a/hgext/largefiles/uisetup.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/largefiles/uisetup.py	Fri Dec 28 14:13:06 2012 +0100
@@ -59,6 +59,11 @@
                      _('verify largefile contents not just existence'))]
     entry[1].extend(verifyopt)
 
+    entry = extensions.wrapcommand(commands.table, 'debugstate',
+                                   overrides.overridedebugstate)
+    debugstateopt = [('', 'large', None, _('display largefiles dirstate'))]
+    entry[1].extend(debugstateopt)
+
     entry = extensions.wrapcommand(commands.table, 'outgoing',
         overrides.overrideoutgoing)
     outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
--- a/hgext/mq.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/mq.py	Fri Dec 28 14:13:06 2012 +0100
@@ -63,7 +63,7 @@
 from mercurial.node import bin, hex, short, nullid, nullrev
 from mercurial.lock import release
 from mercurial import commands, cmdutil, hg, scmutil, util, revset
-from mercurial import repair, extensions, error, phases, bookmarks
+from mercurial import repair, extensions, error, phases
 from mercurial import patch as patchmod
 import os, re, errno, shutil
 
@@ -275,6 +275,7 @@
     It should be used instead of repo.commit inside the mq source for operation
     creating new changeset.
     """
+    repo = repo.unfiltered()
     if phase is None:
         if repo.ui.configbool('mq', 'secret', False):
             phase = phases.secret
@@ -826,7 +827,11 @@
             if r:
                 r[None].forget(patches)
             for p in patches:
-                os.unlink(self.join(p))
+                try:
+                    os.unlink(self.join(p))
+                except OSError, inst:
+                    if inst.errno != errno.ENOENT:
+                        raise
 
         qfinished = []
         if numrevs:
@@ -1146,7 +1151,7 @@
                 return matches[0]
             if self.series and self.applied:
                 if s == 'qtip':
-                    return self.series[self.seriesend(True)-1]
+                    return self.series[self.seriesend(True) - 1]
                 if s == 'qbase':
                     return self.series[0]
             return None
@@ -1324,11 +1329,7 @@
                 # created while patching
                 for f in all_files:
                     if f not in repo.dirstate:
-                        try:
-                            util.unlinkpath(repo.wjoin(f))
-                        except OSError, inst:
-                            if inst.errno != errno.ENOENT:
-                                raise
+                        util.unlinkpath(repo.wjoin(f), ignoremissing=True)
                 self.ui.warn(_('done\n'))
                 raise
 
@@ -1437,11 +1438,7 @@
                 self.backup(repo, tobackup)
 
                 for f in a:
-                    try:
-                        util.unlinkpath(repo.wjoin(f))
-                    except OSError, e:
-                        if e.errno != errno.ENOENT:
-                            raise
+                    util.unlinkpath(repo.wjoin(f), ignoremissing=True)
                     repo.dirstate.drop(f)
                 for f in m + r:
                     fctx = ctx[f]
@@ -1625,7 +1622,7 @@
                 # if the patch excludes a modified file, mark that
                 # file with mtime=0 so status can see it.
                 mm = []
-                for i in xrange(len(m)-1, -1, -1):
+                for i in xrange(len(m) - 1, -1, -1):
                     if not matchfn(m[i]):
                         mm.append(m[i])
                         del m[i]
@@ -1675,9 +1672,10 @@
                     patchf.write(chunk)
                 patchf.close()
 
+                marks = repo._bookmarks
                 for bm in bmlist:
-                    repo._bookmarks[bm] = n
-                bookmarks.write(repo)
+                    marks[bm] = n
+                marks.write()
 
                 self.applied.append(statusentry(n, patchfn))
             except: # re-raises
@@ -2999,7 +2997,7 @@
             revs.update(set(rsrevs))
         if not revs:
             del marks[mark]
-            repo._writebookmarks(mark)
+            marks.write()
             ui.write(_("bookmark '%s' deleted\n") % mark)
 
     if not revs:
@@ -3049,7 +3047,7 @@
 
     if opts.get('bookmark'):
         del marks[mark]
-        repo._writebookmarks(marks)
+        marks.write()
         ui.write(_("bookmark '%s' deleted\n") % mark)
 
     repo.mq.strip(repo, revs, backup=backup, update=update,
@@ -3435,7 +3433,7 @@
                             outapplied.pop()
                 # looking for pushed and shared changeset
                 for node in outapplied:
-                    if repo[node].phase() < phases.secret:
+                    if self[node].phase() < phases.secret:
                         raise util.Abort(_('source has mq patches applied'))
                 # no non-secret patches pushed
             super(mqrepo, self).checkpush(force, revs)
@@ -3451,7 +3449,8 @@
             mqtags = [(patch.node, patch.name) for patch in q.applied]
 
             try:
-                self.changelog.rev(mqtags[-1][0])
+                # for now ignore filtering business
+                self.unfiltered().changelog.rev(mqtags[-1][0])
             except error.LookupError:
                 self.ui.warn(_('mq status file refers to unknown node %s\n')
                              % short(mqtags[-1][0]))
@@ -3470,7 +3469,7 @@
 
             return result
 
-        def _branchtags(self, partial, lrev):
+        def _cacheabletip(self):
             q = self.mq
             cl = self.changelog
             qbase = None
@@ -3481,29 +3480,14 @@
             else:
                 qbasenode = q.applied[0].node
                 try:
-                    qbase = cl.rev(qbasenode)
+                    qbase = self.unfiltered().changelog.rev(qbasenode)
                 except error.LookupError:
                     self.ui.warn(_('mq status file refers to unknown node %s\n')
                                  % short(qbasenode))
-            if qbase is None:
-                return super(mqrepo, self)._branchtags(partial, lrev)
-
-            start = lrev + 1
-            if start < qbase:
-                # update the cache (excluding the patches) and save it
-                ctxgen = (self[r] for r in xrange(lrev + 1, qbase))
-                self._updatebranchcache(partial, ctxgen)
-                self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
-                start = qbase
-            # if start = qbase, the cache is as updated as it should be.
-            # if start > qbase, the cache includes (part of) the patches.
-            # we might as well use it, but we won't save it.
-
-            # update the cache up to the tip
-            ctxgen = (self[r] for r in xrange(start, len(cl)))
-            self._updatebranchcache(partial, ctxgen)
-
-            return partial
+            ret = super(mqrepo, self)._cacheabletip()
+            if qbase is not None:
+                ret = min(qbase - 1, ret)
+            return ret
 
     if repo.local():
         repo.__class__ = mqrepo
--- a/hgext/patchbomb.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/patchbomb.py	Fri Dec 28 14:13:06 2012 +0100
@@ -474,11 +474,11 @@
 
     if opts.get('diffstat') or opts.get('confirm'):
         ui.write(_('\nFinal summary:\n\n'))
-        ui.write('From: %s\n' % sender)
+        ui.write(('From: %s\n' % sender))
         for addr in showaddrs:
             ui.write('%s\n' % addr)
         for m, subj, ds in msgs:
-            ui.write('Subject: %s\n' % subj)
+            ui.write(('Subject: %s\n' % subj))
             if ds:
                 ui.write(ds)
         ui.write('\n')
--- a/hgext/rebase.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/rebase.py	Fri Dec 28 14:13:06 2012 +0100
@@ -214,8 +214,8 @@
             else:
                 originalwd, target, state = result
                 if collapsef:
-                    targetancestors = set(repo.changelog.ancestors([target]))
-                    targetancestors.add(target)
+                    targetancestors = repo.changelog.ancestors([target],
+                                                               inclusive=True)
                     external = checkexternal(repo, state, targetancestors)
 
         if keepbranchesf:
@@ -233,8 +233,7 @@
 
         # Rebase
         if not targetancestors:
-            targetancestors = set(repo.changelog.ancestors([target]))
-            targetancestors.add(target)
+            targetancestors = repo.changelog.ancestors([target], inclusive=True)
 
         # Keep track of the current bookmarks in order to reset them later
         currentbookmarks = repo._bookmarks.copy()
@@ -479,13 +478,14 @@
 
 def updatebookmarks(repo, nstate, originalbookmarks, **opts):
     'Move bookmarks to their correct changesets'
+    marks = repo._bookmarks
     for k, v in originalbookmarks.iteritems():
         if v in nstate:
             if nstate[v] != nullmerge:
                 # update the bookmarks for revs that have moved
-                repo._bookmarks[k] = nstate[v]
+                marks[k] = nstate[v]
 
-    bookmarks.write(repo)
+    marks.write()
 
 def storestatus(repo, originalwd, target, state, collapse, keep, keepbranches,
                                                                 external):
@@ -655,9 +655,12 @@
     #
     # The actual abort is handled by `defineparents`
     if len(root.parents()) <= 1:
-        # (strict) ancestors of <root> not ancestors of <dest>
-        detachset = repo.revs('::%d - ::%d - %d', root, commonbase, root)
+        # ancestors of <root> not ancestors of <dest>
+        detachset = repo.changelog.findmissingrevs([commonbase.rev()],
+                                                   [root.rev()])
         state.update(dict.fromkeys(detachset, nullmerge))
+        # detachset can have root, and we definitely want to rebase that
+        state[root.rev()] = nullrev
     return repo['.'].rev(), dest.rev(), state
 
 def clearrebased(ui, repo, state, collapsedas=None):
--- a/hgext/transplant.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/transplant.py	Fri Dec 28 14:13:06 2012 +0100
@@ -94,7 +94,8 @@
             parentrev = repo.changelog.rev(parent)
         if hasnode(repo, node):
             rev = repo.changelog.rev(node)
-            reachable = repo.changelog.incancestors([parentrev], rev)
+            reachable = repo.changelog.ancestors([parentrev], rev,
+                                                 inclusive=True)
             if rev in reachable:
                 return True
         for t in self.transplants.get(node):
@@ -103,7 +104,8 @@
                 self.transplants.remove(t)
                 return False
             lnoderev = repo.changelog.rev(t.lnode)
-            if lnoderev in repo.changelog.incancestors([parentrev], lnoderev):
+            if lnoderev in repo.changelog.ancestors([parentrev], lnoderev,
+                                                    inclusive=True):
                 return True
         return False
 
--- a/hgext/win32text.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/hgext/win32text.py	Fri Dec 28 14:13:06 2012 +0100
@@ -121,7 +121,7 @@
     # changegroup that contains an unacceptable commit followed later
     # by a commit that fixes the problem.
     tip = repo['tip']
-    for rev in xrange(len(repo)-1, repo[node].rev()-1, -1):
+    for rev in xrange(len(repo) - 1, repo[node].rev() - 1, -1):
         c = repo[rev]
         for f in c.files():
             if f in seen or f not in tip or f not in c:
--- a/mercurial/ancestor.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/ancestor.py	Fri Dec 28 14:13:06 2012 +0100
@@ -5,7 +5,8 @@
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
 
-import heapq
+import heapq, util
+from node import nullrev
 
 def ancestor(a, b, pfunc):
     """
@@ -89,3 +90,175 @@
                 gx = x.next()
     except StopIteration:
         return None
+
+def missingancestors(revs, bases, pfunc):
+    """Return all the ancestors of revs that are not ancestors of bases.
+
+    This may include elements from revs.
+
+    Equivalent to the revset (::revs - ::bases). Revs are returned in
+    revision number order, which is a topological order.
+
+    revs and bases should both be iterables. pfunc must return a list of
+    parent revs for a given revs.
+    """
+
+    revsvisit = set(revs)
+    basesvisit = set(bases)
+    if not revsvisit:
+        return []
+    if not basesvisit:
+        basesvisit.add(nullrev)
+    start = max(max(revsvisit), max(basesvisit))
+    bothvisit = revsvisit.intersection(basesvisit)
+    revsvisit.difference_update(bothvisit)
+    basesvisit.difference_update(bothvisit)
+    # At this point, we hold the invariants that:
+    # - revsvisit is the set of nodes we know are an ancestor of at least one
+    #   of the nodes in revs
+    # - basesvisit is the same for bases
+    # - bothvisit is the set of nodes we know are ancestors of at least one of
+    #   the nodes in revs and one of the nodes in bases
+    # - a node may be in none or one, but not more, of revsvisit, basesvisit
+    #   and bothvisit at any given time
+    # Now we walk down in reverse topo order, adding parents of nodes already
+    # visited to the sets while maintaining the invariants. When a node is
+    # found in both revsvisit and basesvisit, it is removed from them and
+    # added to bothvisit instead. When revsvisit becomes empty, there are no
+    # more ancestors of revs that aren't also ancestors of bases, so exit.
+
+    missing = []
+    for curr in xrange(start, nullrev, -1):
+        if not revsvisit:
+            break
+
+        if curr in bothvisit:
+            bothvisit.remove(curr)
+            # curr's parents might have made it into revsvisit or basesvisit
+            # through another path
+            for p in pfunc(curr):
+                revsvisit.discard(p)
+                basesvisit.discard(p)
+                bothvisit.add(p)
+            continue
+
+        # curr will never be in both revsvisit and basesvisit, since if it
+        # were it'd have been pushed to bothvisit
+        if curr in revsvisit:
+            missing.append(curr)
+            thisvisit = revsvisit
+            othervisit = basesvisit
+        elif curr in basesvisit:
+            thisvisit = basesvisit
+            othervisit = revsvisit
+        else:
+            # not an ancestor of revs or bases: ignore
+            continue
+
+        thisvisit.remove(curr)
+        for p in pfunc(curr):
+            if p == nullrev:
+                pass
+            elif p in othervisit or p in bothvisit:
+                # p is implicitly in thisvisit. This means p is or should be
+                # in bothvisit
+                revsvisit.discard(p)
+                basesvisit.discard(p)
+                bothvisit.add(p)
+            else:
+                # visit later
+                thisvisit.add(p)
+
+    missing.reverse()
+    return missing
+
+class lazyancestors(object):
+    def __init__(self, cl, revs, stoprev=0, inclusive=False):
+        """Create a new object generating ancestors for the given revs. Does
+        not generate revs lower than stoprev.
+
+        This is computed lazily starting from revs. The object supports
+        iteration and membership.
+
+        cl should be a changelog and revs should be an iterable. inclusive is
+        a boolean that indicates whether revs should be included. Revs lower
+        than stoprev will not be generated.
+
+        Result does not include the null revision."""
+        self._parentrevs = cl.parentrevs
+        self._initrevs = revs
+        self._stoprev = stoprev
+        self._inclusive = inclusive
+
+        # Initialize data structures for __contains__.
+        # For __contains__, we use a heap rather than a deque because
+        # (a) it minimizes the number of parentrevs calls made
+        # (b) it makes the loop termination condition obvious
+        # Python's heap is a min-heap. Multiply all values by -1 to convert it
+        # into a max-heap.
+        self._containsvisit = [-rev for rev in revs]
+        heapq.heapify(self._containsvisit)
+        if inclusive:
+            self._containsseen = set(revs)
+        else:
+            self._containsseen = set()
+
+    def __iter__(self):
+        """Generate the ancestors of _initrevs in reverse topological order.
+
+        If inclusive is False, yield a sequence of revision numbers starting
+        with the parents of each revision in revs, i.e., each revision is *not*
+        considered an ancestor of itself.  Results are in breadth-first order:
+        parents of each rev in revs, then parents of those, etc.
+
+        If inclusive is True, yield all the revs first (ignoring stoprev),
+        then yield all the ancestors of revs as when inclusive is False.
+        If an element in revs is an ancestor of a different rev it is not
+        yielded again."""
+        seen = set()
+        revs = self._initrevs
+        if self._inclusive:
+            for rev in revs:
+                yield rev
+            seen.update(revs)
+
+        parentrevs = self._parentrevs
+        stoprev = self._stoprev
+        visit = util.deque(revs)
+
+        while visit:
+            for parent in parentrevs(visit.popleft()):
+                if parent >= stoprev and parent not in seen:
+                    visit.append(parent)
+                    seen.add(parent)
+                    yield parent
+
+    def __contains__(self, target):
+        """Test whether target is an ancestor of self._initrevs."""
+        # Trying to do both __iter__ and __contains__ using the same visit
+        # heap and seen set is complex enough that it slows down both. Keep
+        # them separate.
+        seen = self._containsseen
+        if target in seen:
+            return True
+
+        parentrevs = self._parentrevs
+        visit = self._containsvisit
+        stoprev = self._stoprev
+        heappop = heapq.heappop
+        heappush = heapq.heappush
+
+        targetseen = False
+
+        while visit and -visit[0] > target and not targetseen:
+            for parent in parentrevs(-heappop(visit)):
+                if parent < stoprev or parent in seen:
+                    continue
+                # We need to make sure we push all parents into the heap so
+                # that we leave it in a consistent state for future calls.
+                heappush(visit, -parent)
+                seen.add(parent)
+                if parent == target:
+                    targetseen = True
+
+        return targetseen
--- a/mercurial/bookmarks.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/bookmarks.py	Fri Dec 28 14:13:06 2012 +0100
@@ -7,40 +7,80 @@
 
 from mercurial.i18n import _
 from mercurial.node import hex
-from mercurial import encoding, error, util, obsolete, phases
+from mercurial import encoding, error, util, obsolete
 import errno, os
 
-def read(repo):
-    '''Parse .hg/bookmarks file and return a dictionary
+class bmstore(dict):
+    """Storage for bookmarks.
+
+    This object should do all bookmark reads and writes, so that it's
+    fairly simple to replace the storage underlying bookmarks without
+    having to clone the logic surrounding bookmarks.
+
+    This particular bmstore implementation stores bookmarks as
+    {hash}\s{name}\n (the same format as localtags) in
+    .hg/bookmarks. The mapping is stored as {name: nodeid}.
+
+    This class does NOT handle the "current" bookmark state at this
+    time.
+    """
 
-    Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
-    in the .hg/bookmarks file.
-    Read the file and return a (name=>nodeid) dictionary
-    '''
-    bookmarks = {}
-    try:
-        for line in repo.opener('bookmarks'):
-            line = line.strip()
-            if not line:
-                continue
-            if ' ' not in line:
-                repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') % line)
-                continue
-            sha, refspec = line.split(' ', 1)
-            refspec = encoding.tolocal(refspec)
+    def __init__(self, repo):
+        dict.__init__(self)
+        self._repo = repo
+        try:
+            for line in repo.vfs('bookmarks'):
+                line = line.strip()
+                if not line:
+                    continue
+                if ' ' not in line:
+                    repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n')
+                                 % line)
+                    continue
+                sha, refspec = line.split(' ', 1)
+                refspec = encoding.tolocal(refspec)
+                try:
+                    self[refspec] = repo.changelog.lookup(sha)
+                except LookupError:
+                    pass
+        except IOError, inst:
+            if inst.errno != errno.ENOENT:
+                raise
+
+    def write(self):
+        '''Write bookmarks
+
+        Write the given bookmark => hash dictionary to the .hg/bookmarks file
+        in a format equal to those of localtags.
+
+        We also store a backup of the previous state in undo.bookmarks that
+        can be copied back on rollback.
+        '''
+        repo = self._repo
+        if repo._bookmarkcurrent not in self:
+            setcurrent(repo, None)
+
+        wlock = repo.wlock()
+        try:
+
+            file = repo.vfs('bookmarks', 'w', atomictemp=True)
+            for name, node in self.iteritems():
+                file.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
+            file.close()
+
+            # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
             try:
-                bookmarks[refspec] = repo.changelog.lookup(sha)
-            except LookupError:
+                os.utime(repo.sjoin('00changelog.i'), None)
+            except OSError:
                 pass
-    except IOError, inst:
-        if inst.errno != errno.ENOENT:
-            raise
-    return bookmarks
+
+        finally:
+            wlock.release()
 
 def readcurrent(repo):
     '''Get the current bookmark
 
-    If we use gittishsh branches we have a current bookmark that
+    If we use gittish branches we have a current bookmark that
     we are on. This function returns the name of the bookmark. It
     is stored in .hg/bookmarks.current
     '''
@@ -60,37 +100,6 @@
         file.close()
     return mark
 
-def write(repo):
-    '''Write bookmarks
-
-    Write the given bookmark => hash dictionary to the .hg/bookmarks file
-    in a format equal to those of localtags.
-
-    We also store a backup of the previous state in undo.bookmarks that
-    can be copied back on rollback.
-    '''
-    refs = repo._bookmarks
-
-    if repo._bookmarkcurrent not in refs:
-        setcurrent(repo, None)
-
-    wlock = repo.wlock()
-    try:
-
-        file = repo.opener('bookmarks', 'w', atomictemp=True)
-        for refspec, node in refs.iteritems():
-            file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec)))
-        file.close()
-
-        # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
-        try:
-            os.utime(repo.sjoin('00changelog.i'), None)
-        except OSError:
-            pass
-
-    finally:
-        wlock.release()
-
 def setcurrent(repo, mark):
     '''Set the name of the bookmark that we are currently on
 
@@ -152,7 +161,7 @@
             if mark != cur:
                 del marks[mark]
     if update:
-        repo._writebookmarks(marks)
+        marks.write()
     return update
 
 def listbookmarks(repo):
@@ -179,7 +188,7 @@
             if new not in repo:
                 return False
             marks[key] = repo[new].node()
-        write(repo)
+        marks.write()
         return True
     finally:
         w.release()
@@ -188,16 +197,17 @@
     ui.debug("checking for updated bookmarks\n")
     rb = remote.listkeys('bookmarks')
     changed = False
+    localmarks = repo._bookmarks
     for k in rb.keys():
-        if k in repo._bookmarks:
-            nr, nl = rb[k], repo._bookmarks[k]
+        if k in localmarks:
+            nr, nl = rb[k], localmarks[k]
             if nr in repo:
                 cr = repo[nr]
                 cl = repo[nl]
                 if cl.rev() >= cr.rev():
                     continue
                 if validdest(repo, cl, cr):
-                    repo._bookmarks[k] = cr.node()
+                    localmarks[k] = cr.node()
                     changed = True
                     ui.status(_("updating bookmark %s\n") % k)
                 else:
@@ -208,7 +218,7 @@
                     # find a unique @ suffix
                     for x in range(1, 100):
                         n = '%s@%d' % (kd, x)
-                        if n not in repo._bookmarks:
+                        if n not in localmarks:
                             break
                     # try to use an @pathalias suffix
                     # if an @pathalias already exists, we overwrite (update) it
@@ -216,17 +226,17 @@
                         if path == u:
                             n = '%s@%s' % (kd, p)
 
-                    repo._bookmarks[n] = cr.node()
+                    localmarks[n] = cr.node()
                     changed = True
                     ui.warn(_("divergent bookmark %s stored as %s\n") % (k, n))
         elif rb[k] in repo:
             # add remote bookmarks for changes we already have
-            repo._bookmarks[k] = repo[rb[k]].node()
+            localmarks[k] = repo[rb[k]].node()
             changed = True
             ui.status(_("adding remote bookmark %s\n") % k)
 
     if changed:
-        write(repo)
+        localmarks.write()
 
 def diff(ui, dst, src):
     ui.status(_("searching for changed bookmarks\n"))
@@ -246,6 +256,7 @@
 
 def validdest(repo, old, new):
     """Is the new bookmark destination a valid update from the old one"""
+    repo = repo.unfiltered()
     if old == new:
         # Old == new -> nothing to update.
         return False
@@ -263,14 +274,10 @@
         while len(validdests) != plen:
             plen = len(validdests)
             succs = set(c.node() for c in validdests)
-            for c in validdests:
-                if c.phase() > phases.public:
-                    # obsolescence marker does not apply to public changeset
-                    succs.update(obsolete.allsuccessors(repo.obsstore,
-                                                        [c.node()]))
+            mutable = [c.node() for c in validdests if c.mutable()]
+            succs.update(obsolete.allsuccessors(repo.obsstore, mutable))
             known = (n for n in succs if n in nm)
             validdests = set(repo.set('%ln::', known))
-        validdests.remove(old)
         return new in validdests
     else:
         return old.descendant(new)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/branchmap.py	Fri Dec 28 14:13:06 2012 +0100
@@ -0,0 +1,175 @@
+# branchmap.py - logic to computes, maintain and stores branchmap for local repo
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from node import bin, hex, nullid, nullrev
+import encoding
+
+def read(repo):
+    try:
+        f = repo.opener("cache/branchheads")
+        lines = f.read().split('\n')
+        f.close()
+    except (IOError, OSError):
+        return branchcache()
+
+    try:
+        last, lrev = lines.pop(0).split(" ", 1)
+        last, lrev = bin(last), int(lrev)
+        partial = branchcache(tipnode=last, tiprev=lrev)
+        if not partial.validfor(repo):
+            # invalidate the cache
+            raise ValueError('invalidating branch cache (tip differs)')
+        for l in lines:
+            if not l:
+                continue
+            node, label = l.split(" ", 1)
+            label = encoding.tolocal(label.strip())
+            if not node in repo:
+                raise ValueError('invalidating branch cache because node '+
+                                 '%s does not exist' % node)
+            partial.setdefault(label, []).append(bin(node))
+    except KeyboardInterrupt:
+        raise
+    except Exception, inst:
+        if repo.ui.debugflag:
+            repo.ui.warn(str(inst), '\n')
+        partial = branchcache()
+    return partial
+
+
+
+def updatecache(repo):
+    repo = repo.unfiltered()  # Until we get a smarter cache management
+    cl = repo.changelog
+    partial = repo._branchcache
+
+    if partial is None or not partial.validfor(repo):
+        partial = read(repo)
+
+    catip = repo._cacheabletip()
+    # if partial.tiprev == catip: cache is already up to date
+    # if partial.tiprev >  catip: we have uncachable element in `partial` can't
+    #                             write on disk
+    if partial.tiprev < catip:
+        ctxgen = (repo[r] for r in cl.revs(partial.tiprev + 1, catip))
+        partial.update(repo, ctxgen)
+        partial.write(repo)
+    # If cacheable tip were lower than actual tip, we need to update the
+    # cache up to tip. This update (from cacheable to actual tip) is not
+    # written to disk since it's not cacheable.
+    tiprev = len(repo) - 1
+    if partial.tiprev < tiprev:
+        ctxgen = (repo[r] for r in cl.revs(partial.tiprev + 1, tiprev))
+        partial.update(repo, ctxgen)
+    repo._branchcache = partial
+
+class branchcache(dict):
+    """A dict like object that hold branches heads cache"""
+
+    def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev):
+        super(branchcache, self).__init__(entries)
+        self.tipnode = tipnode
+        self.tiprev = tiprev
+
+    def validfor(self, repo):
+        """Is the cache content valide regarding a repo
+
+        - False when cached tipnode are unknown or if we detect a strip.
+        - True when cache is up to date or a subset of current repo."""
+        try:
+            return self.tipnode == repo.changelog.node(self.tiprev)
+        except IndexError:
+            return False
+
+
+    def write(self, repo):
+        try:
+            f = repo.opener("cache/branchheads", "w", atomictemp=True)
+            f.write("%s %s\n" % (hex(self.tipnode), self.tiprev))
+            for label, nodes in self.iteritems():
+                for node in nodes:
+                    f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
+            f.close()
+        except (IOError, OSError):
+            pass
+
+    def update(self, repo, ctxgen):
+        """Given a branchhead cache, self, that may have extra nodes or be
+        missing heads, and a generator of nodes that are at least a superset of
+        heads missing, this function updates self to be correct.
+        """
+        cl = repo.changelog
+        # collect new branch entries
+        newbranches = {}
+        for c in ctxgen:
+            newbranches.setdefault(c.branch(), []).append(c.node())
+        # if older branchheads are reachable from new ones, they aren't
+        # really branchheads. Note checking parents is insufficient:
+        # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
+        for branch, newnodes in newbranches.iteritems():
+            bheads = self.setdefault(branch, [])
+            # Remove candidate heads that no longer are in the repo (e.g., as
+            # the result of a strip that just happened).  Avoid using 'node in
+            # self' here because that dives down into branchcache code somewhat
+            # recursively.
+            bheadrevs = [cl.rev(node) for node in bheads
+                         if cl.hasnode(node)]
+            newheadrevs = [cl.rev(node) for node in newnodes
+                           if cl.hasnode(node)]
+            ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
+            # Remove duplicates - nodes that are in newheadrevs and are already
+            # in bheadrevs.  This can happen if you strip a node whose parent
+            # was already a head (because they're on different branches).
+            bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
+
+            # Starting from tip means fewer passes over reachable.  If we know
+            # the new candidates are not ancestors of existing heads, we don't
+            # have to examine ancestors of existing heads
+            if ctxisnew:
+                iterrevs = sorted(newheadrevs)
+            else:
+                iterrevs = list(bheadrevs)
+
+            # This loop prunes out two kinds of heads - heads that are
+            # superseded by a head in newheadrevs, and newheadrevs that are not
+            # heads because an existing head is their descendant.
+            while iterrevs:
+                latest = iterrevs.pop()
+                if latest not in bheadrevs:
+                    continue
+                ancestors = set(cl.ancestors([latest],
+                                                         bheadrevs[0]))
+                if ancestors:
+                    bheadrevs = [b for b in bheadrevs if b not in ancestors]
+            self[branch] = [cl.node(rev) for rev in bheadrevs]
+            tiprev = max(bheadrevs)
+            if tiprev > self.tiprev:
+                self.tipnode = cl.node(tiprev)
+                self.tiprev = tiprev
+
+        # There may be branches that cease to exist when the last commit in the
+        # branch was stripped.  This code filters them out.  Note that the
+        # branch that ceased to exist may not be in newbranches because
+        # newbranches is the set of candidate heads, which when you strip the
+        # last commit in a branch will be the parent branch.
+        droppednodes = []
+        for branch in self.keys():
+            nodes = [head for head in self[branch]
+                     if cl.hasnode(head)]
+            if not nodes:
+                droppednodes.extend(nodes)
+                del self[branch]
+        if ((not self.validfor(repo)) or (self.tipnode in droppednodes)):
+
+            # cache key are not valid anymore
+            self.tipnode = nullid
+            self.tiprev = nullrev
+            for heads in self.values():
+                tiprev = max(cl.rev(node) for node in heads)
+                if tiprev > self.tiprev:
+                    self.tipnode = cl.node(tiprev)
+                    self.tiprev = tiprev
--- a/mercurial/bundlerepo.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/bundlerepo.py	Fri Dec 28 14:13:06 2012 +0100
@@ -32,7 +32,9 @@
         self.bundle = bundle
         self.basemap = {}
         n = len(self)
+        self.disktiprev = n - 1
         chain = None
+        self.bundlenodes = []
         while True:
             chunkdata = bundle.deltachunk(chain)
             if not chunkdata:
@@ -48,6 +50,7 @@
             start = bundle.tell() - size
 
             link = linkmapper(cs)
+            self.bundlenodes.append(node)
             if node in self.nodemap:
                 # this can happen if two branches make the same change
                 chain = node
@@ -212,7 +215,7 @@
         # dict with the mapping 'filename' -> position in the bundle
         self.bundlefilespos = {}
 
-    @util.propertycache
+    @localrepo.unfilteredpropertycache
     def changelog(self):
         # consume the header if it exists
         self.bundle.changelogheader()
@@ -220,7 +223,7 @@
         self.manstart = self.bundle.tell()
         return c
 
-    @util.propertycache
+    @localrepo.unfilteredpropertycache
     def manifest(self):
         self.bundle.seek(self.manstart)
         # consume the header if it exists
@@ -229,12 +232,12 @@
         self.filestart = self.bundle.tell()
         return m
 
-    @util.propertycache
+    @localrepo.unfilteredpropertycache
     def manstart(self):
         self.changelog
         return self.manstart
 
-    @util.propertycache
+    @localrepo.unfilteredpropertycache
     def filestart(self):
         self.manifest
         return self.filestart
@@ -282,9 +285,11 @@
     def getcwd(self):
         return os.getcwd() # always outside the repo
 
-    def _writebranchcache(self, branches, tip, tiprev):
-        # don't overwrite the disk cache with bundle-augmented data
-        pass
+    def _cacheabletip(self):
+        # we should not cache data from the bundle on disk
+        ret = super(bundlerepository, self)._cacheabletip()
+        return min(self.changelog.disktiprev, ret)
+
 
 def instance(ui, path, create):
     if create:
--- a/mercurial/cmdutil.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/cmdutil.py	Fri Dec 28 14:13:06 2012 +0100
@@ -10,7 +10,7 @@
 import os, sys, errno, re, tempfile
 import util, scmutil, templater, patch, error, templatekw, revlog, copies
 import match as matchmod
-import subrepo, context, repair, bookmarks, graphmod, revset, phases, obsolete
+import subrepo, context, repair, graphmod, revset, phases, obsolete
 import changelog
 import lock as lockmod
 
@@ -1759,9 +1759,10 @@
                 # Move bookmarks from old parent to amend commit
                 bms = repo.nodebookmarks(old.node())
                 if bms:
+                    marks = repo._bookmarks
                     for bm in bms:
-                        repo._bookmarks[bm] = newid
-                    bookmarks.write(repo)
+                        marks[bm] = newid
+                    marks.write()
             #commit the whole amend process
             if obsolete._enabled and newid != old.node():
                 # mark the new changeset as successor of the rewritten one
--- a/mercurial/commands.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/commands.py	Fri Dec 28 14:13:06 2012 +0100
@@ -549,6 +549,10 @@
           hg bisect --skip
           hg bisect --skip 23
 
+      - skip all revisions that do not touch directories ``foo`` or ``bar``
+
+          hg bisect --skip '!( file("path:foo") & file("path:bar") )'
+
       - forget the current bisection::
 
           hg bisect --reset
@@ -754,7 +758,7 @@
             cmdutil.bailifchanged(repo)
             return hg.clean(repo, node)
 
-@command('bookmarks',
+@command('bookmarks|bookmark',
     [('f', 'force', False, _('force')),
     ('r', 'rev', '', _('revision'), _('REV')),
     ('d', 'delete', False, _('delete a given bookmark')),
@@ -821,7 +825,7 @@
         if mark == repo._bookmarkcurrent:
             bookmarks.setcurrent(repo, None)
         del marks[mark]
-        bookmarks.write(repo)
+        marks.write()
 
     elif rename:
         if mark is None:
@@ -834,7 +838,7 @@
         if repo._bookmarkcurrent == rename and not inactive:
             bookmarks.setcurrent(repo, mark)
         del marks[rename]
-        bookmarks.write(repo)
+        marks.write()
 
     elif mark is not None:
         mark = checkformat(mark)
@@ -848,7 +852,7 @@
             marks[mark] = cur
         if not inactive and cur == marks[mark]:
             bookmarks.setcurrent(repo, mark)
-        bookmarks.write(repo)
+        marks.write()
 
     # Same message whether trying to deactivate the current bookmark (-i
     # with no NAME) or listing bookmarks
@@ -924,7 +928,7 @@
                                        ' exists'),
                                      # i18n: "it" refers to an existing branch
                                      hint=_("use 'hg update' to switch to it"))
-            scmutil.checknewlabel(None, label, 'branch')
+            scmutil.checknewlabel(repo, label, 'branch')
             repo.dirstate.setbranch(label)
             ui.status(_('marked working directory as branch %s\n') % label)
             ui.status(_('(branches are permanent and global, '
@@ -1322,11 +1326,12 @@
         elif marks:
             ui.debug('moving bookmarks %r from %s to %s\n' %
                      (marks, old.hex(), hex(node)))
+            newmarks = repo._bookmarks
             for bm in marks:
-                repo._bookmarks[bm] = node
+                newmarks[bm] = node
                 if bm == current:
                     bookmarks.setcurrent(repo, bm)
-            bookmarks.write(repo)
+            newmarks.write()
     else:
         e = cmdutil.commiteditor
         if opts.get('force_editor'):
@@ -1513,7 +1518,7 @@
         ui.progress(_('building'), id, unit=_('revisions'), total=total)
         for type, data in dagparser.parsedag(text):
             if type == 'n':
-                ui.note('node %s\n' % str(data))
+                ui.note(('node %s\n' % str(data)))
                 id, ps = data
 
                 files = []
@@ -1574,10 +1579,10 @@
                 at = id
             elif type == 'l':
                 id, name = data
-                ui.note('tag %s\n' % name)
+                ui.note(('tag %s\n' % name))
                 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
             elif type == 'a':
-                ui.note('branch %s\n' % data)
+                ui.note(('branch %s\n' % data))
                 atbranch = data
             ui.progress(_('building'), id, unit=_('revisions'), total=total)
         tr.close()
@@ -1595,7 +1600,7 @@
     try:
         gen = changegroup.readbundle(f, bundlepath)
         if all:
-            ui.write("format: id, p1, p2, cset, delta base, len(delta)\n")
+            ui.write(("format: id, p1, p2, cset, delta base, len(delta)\n"))
 
             def showchunks(named):
                 ui.write("\n%s\n" % named)
@@ -1787,11 +1792,11 @@
         d = util.parsedate(date, util.extendeddateformats)
     else:
         d = util.parsedate(date)
-    ui.write("internal: %s %s\n" % d)
-    ui.write("standard: %s\n" % util.datestr(d))
+    ui.write(("internal: %s %s\n") % d)
+    ui.write(("standard: %s\n") % util.datestr(d))
     if range:
         m = util.matchdate(range)
-        ui.write("match: %s\n" % m(d[0]))
+        ui.write(("match: %s\n") % m(d[0]))
 
 @command('debugdiscovery',
     [('', 'old', None, _('use old-style discovery')),
@@ -1821,7 +1826,7 @@
                                                                 force=True)
             common = set(common)
             if not opts.get('nonheads'):
-                ui.write("unpruned common: %s\n" % " ".join([short(n)
+                ui.write(("unpruned common: %s\n") % " ".join([short(n)
                                                             for n in common]))
                 dag = dagutil.revlogdag(repo.changelog)
                 all = dag.ancestorset(dag.internalizeall(common))
@@ -1831,11 +1836,11 @@
         common = set(common)
         rheads = set(hds)
         lheads = set(repo.heads())
-        ui.write("common heads: %s\n" % " ".join([short(n) for n in common]))
+        ui.write(("common heads: %s\n") % " ".join([short(n) for n in common]))
         if lheads <= common:
-            ui.write("local is subset\n")
+            ui.write(("local is subset\n"))
         elif rheads <= common:
-            ui.write("remote is subset\n")
+            ui.write(("remote is subset\n"))
 
     serverlogs = opts.get('serverlog')
     if serverlogs:
@@ -1879,9 +1884,9 @@
 def debugfsinfo(ui, path = "."):
     """show information detected about current filesystem"""
     util.writefile('.debugfsinfo', '')
-    ui.write('exec: %s\n' % (util.checkexec(path) and 'yes' or 'no'))
-    ui.write('symlink: %s\n' % (util.checklink(path) and 'yes' or 'no'))
-    ui.write('case-sensitive: %s\n' % (util.checkcase('.debugfsinfo')
+    ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
+    ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
+    ui.write(('case-sensitive: %s\n') % (util.checkcase('.debugfsinfo')
                                 and 'yes' or 'no'))
     os.unlink('.debugfsinfo')
 
@@ -1979,7 +1984,7 @@
             r = filelog
     if not r:
         r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), file_)
-    ui.write("digraph G {\n")
+    ui.write(("digraph G {\n"))
     for i in r:
         node = r.node(i)
         pp = r.parents(node)
@@ -2325,52 +2330,54 @@
     def pcfmt(value, total):
         return (value, 100 * float(value) / total)
 
-    ui.write('format : %d\n' % format)
-    ui.write('flags  : %s\n' % ', '.join(flags))
+    ui.write(('format : %d\n') % format)
+    ui.write(('flags  : %s\n') % ', '.join(flags))
 
     ui.write('\n')
     fmt = pcfmtstr(totalsize)
     fmt2 = dfmtstr(totalsize)
-    ui.write('revisions     : ' + fmt2 % numrevs)
-    ui.write('    merges    : ' + fmt % pcfmt(nummerges, numrevs))
-    ui.write('    normal    : ' + fmt % pcfmt(numrevs - nummerges, numrevs))
-    ui.write('revisions     : ' + fmt2 % numrevs)
-    ui.write('    full      : ' + fmt % pcfmt(numfull, numrevs))
-    ui.write('    deltas    : ' + fmt % pcfmt(numdeltas, numrevs))
-    ui.write('revision size : ' + fmt2 % totalsize)
-    ui.write('    full      : ' + fmt % pcfmt(fulltotal, totalsize))
-    ui.write('    deltas    : ' + fmt % pcfmt(deltatotal, totalsize))
+    ui.write(('revisions     : ') + fmt2 % numrevs)
+    ui.write(('    merges    : ') + fmt % pcfmt(nummerges, numrevs))
+    ui.write(('    normal    : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
+    ui.write(('revisions     : ') + fmt2 % numrevs)
+    ui.write(('    full      : ') + fmt % pcfmt(numfull, numrevs))
+    ui.write(('    deltas    : ') + fmt % pcfmt(numdeltas, numrevs))
+    ui.write(('revision size : ') + fmt2 % totalsize)
+    ui.write(('    full      : ') + fmt % pcfmt(fulltotal, totalsize))
+    ui.write(('    deltas    : ') + fmt % pcfmt(deltatotal, totalsize))
 
     ui.write('\n')
     fmt = dfmtstr(max(avgchainlen, compratio))
-    ui.write('avg chain length  : ' + fmt % avgchainlen)
-    ui.write('compression ratio : ' + fmt % compratio)
+    ui.write(('avg chain length  : ') + fmt % avgchainlen)
+    ui.write(('compression ratio : ') + fmt % compratio)
 
     if format > 0:
         ui.write('\n')
-        ui.write('uncompressed data size (min/max/avg) : %d / %d / %d\n'
+        ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
                  % tuple(datasize))
-    ui.write('full revision size (min/max/avg)     : %d / %d / %d\n'
+    ui.write(('full revision size (min/max/avg)     : %d / %d / %d\n')
              % tuple(fullsize))
-    ui.write('delta size (min/max/avg)             : %d / %d / %d\n'
+    ui.write(('delta size (min/max/avg)             : %d / %d / %d\n')
              % tuple(deltasize))
 
     if numdeltas > 0:
         ui.write('\n')
         fmt = pcfmtstr(numdeltas)
         fmt2 = pcfmtstr(numdeltas, 4)
-        ui.write('deltas against prev  : ' + fmt % pcfmt(numprev, numdeltas))
+        ui.write(('deltas against prev  : ') + fmt % pcfmt(numprev, numdeltas))
         if numprev > 0:
-            ui.write('    where prev = p1  : ' + fmt2 % pcfmt(nump1prev,
+            ui.write(('    where prev = p1  : ') + fmt2 % pcfmt(nump1prev,
                                                               numprev))
-            ui.write('    where prev = p2  : ' + fmt2 % pcfmt(nump2prev,
+            ui.write(('    where prev = p2  : ') + fmt2 % pcfmt(nump2prev,
                                                               numprev))
-            ui.write('    other            : ' + fmt2 % pcfmt(numoprev,
+            ui.write(('    other            : ') + fmt2 % pcfmt(numoprev,
                                                               numprev))
         if gdelta:
-            ui.write('deltas against p1    : ' + fmt % pcfmt(nump1, numdeltas))
-            ui.write('deltas against p2    : ' + fmt % pcfmt(nump2, numdeltas))
-            ui.write('deltas against other : ' + fmt % pcfmt(numother,
+            ui.write(('deltas against p1    : ')
+                     + fmt % pcfmt(nump1, numdeltas))
+            ui.write(('deltas against p2    : ')
+                     + fmt % pcfmt(nump2, numdeltas))
+            ui.write(('deltas against other : ') + fmt % pcfmt(numother,
                                                              numdeltas))
 
 @command('debugrevspec', [], ('REVSPEC'))
@@ -2448,9 +2455,63 @@
 def debugsub(ui, repo, rev=None):
     ctx = scmutil.revsingle(repo, rev, None)
     for k, v in sorted(ctx.substate.items()):
-        ui.write('path %s\n' % k)
-        ui.write(' source   %s\n' % v[0])
-        ui.write(' revision %s\n' % v[1])
+        ui.write(('path %s\n') % k)
+        ui.write((' source   %s\n') % v[0])
+        ui.write((' revision %s\n') % v[1])
+
+@command('debugsuccessorssets',
+    [],
+    _('[REV]'))
+def debugsuccessorssets(ui, repo, *revs):
+    """show set of successors for revision
+
+    A successors set of changeset A is a consistent group of revisions that
+    succeed A. It contains non-obsolete changesets only.
+
+    In most cases a changeset A has a single successors set containing a single
+    successors (changeset A replaced by A').
+
+    A changeset that is made obsolete with no successors are called "pruned".
+    Such changesets have no successors sets at all.
+
+    A changeset that has been "split" will have a successors set containing
+    more than one successors.
+
+    A changeset that has been rewritten in multiple different ways is called
+    "divergent". Such changesets have multiple successor sets (each of which
+    may also be split, i.e. have multiple successors).
+
+    Results are displayed as follows::
+
+        <rev1>
+            <successors-1A>
+        <rev2>
+            <successors-2A>
+            <successors-2B1> <successors-2B2> <successors-2B3>
+
+    Here rev2 has two possible (i.e. divergent) successors sets. The first
+    holds one element, whereas the second holds three (i.e. the changeset has
+    been split).
+    """
+    # passed to successorssets caching computation from one call to another
+    cache = {}
+    ctx2str = str
+    node2str = short
+    if ui.debug():
+        def ctx2str(ctx):
+            return ctx.hex()
+        node2str = hex
+    for rev in scmutil.revrange(repo, revs):
+        ctx = repo[rev]
+        ui.write('%s\n'% ctx2str(ctx))
+        for succsset in obsolete.successorssets(repo, ctx.node(), cache):
+            if succsset:
+                ui.write('    ')
+                ui.write(node2str(succsset[0]))
+                for node in succsset[1:]:
+                    ui.write(' ')
+                    ui.write(node2str(node))
+            ui.write('\n')
 
 @command('debugwalk', walkopts, _('[OPTION]... [FILE]...'))
 def debugwalk(ui, repo, *pats, **opts):
@@ -2823,13 +2884,27 @@
 
     wlock = repo.wlock()
     try:
+        current = repo['.']
         for pos, ctx in enumerate(repo.set("%ld", revs)):
-            current = repo['.']
 
             ui.status(_('grafting revision %s\n') % ctx.rev())
             if opts.get('dry_run'):
                 continue
 
+            source = ctx.extra().get('source')
+            if not source:
+                source = ctx.hex()
+            extra = {'source': source}
+            user = ctx.user()
+            if opts.get('user'):
+                user = opts['user']
+            date = ctx.date()
+            if opts.get('date'):
+                date = opts['date']
+            message = ctx.description()
+            if opts.get('log'):
+                message += '\n(grafted from %s)' % ctx.hex()
+
             # we don't merge the first commit when continuing
             if not cont:
                 # perform the graft merge with p1(rev) as 'ancestor'
@@ -2858,23 +2933,12 @@
             cmdutil.duplicatecopies(repo, ctx.rev(), ctx.p1().rev())
 
             # commit
-            source = ctx.extra().get('source')
-            if not source:
-                source = ctx.hex()
-            extra = {'source': source}
-            user = ctx.user()
-            if opts.get('user'):
-                user = opts['user']
-            date = ctx.date()
-            if opts.get('date'):
-                date = opts['date']
-            message = ctx.description()
-            if opts.get('log'):
-                message += '\n(grafted from %s)' % ctx.hex()
             node = repo.commit(text=message, user=user,
                         date=date, extra=extra, editor=editor)
             if node is None:
                 ui.status(_('graft for revision %s is empty\n') % ctx.rev())
+            else:
+                current = repo[node]
     finally:
         wlock.release()
 
@@ -4207,6 +4271,9 @@
 
     Returns 0 on success.
     """
+
+    fm = ui.formatter('manifest', opts)
+
     if opts.get('all'):
         if rev or node:
             raise util.Abort(_("can't specify a revision with --all"))
@@ -4224,7 +4291,9 @@
         finally:
             lock.release()
         for f in res:
-            ui.write("%s\n" % f)
+            fm.startitem()
+            fm.write("path", '%s\n', f)
+        fm.end()
         return
 
     if rev and node:
@@ -4233,14 +4302,17 @@
     if not node:
         node = rev
 
-    decor = {'l':'644 @ ', 'x':'755 * ', '':'644   '}
+    char = {'l': '@', 'x': '*', '': ''}
+    mode = {'l': '644', 'x': '755', '': '644'}
     ctx = scmutil.revsingle(repo, node)
+    mf = ctx.manifest()
     for f in ctx:
-        if ui.debugflag:
-            ui.write("%40s " % hex(ctx.manifest()[f]))
-        if ui.verbose:
-            ui.write(decor[ctx.flags(f)])
-        ui.write("%s\n" % f)
+        fm.startitem()
+        fl = ctx[f].flags()
+        fm.condwrite(ui.debugflag, 'hash', '%s ', hex(mf[f]))
+        fm.condwrite(ui.verbose, 'mode type', '%s %1s ', mode[fl], char[fl])
+        fm.write('path', '%s\n', f)
+    fm.end()
 
 @command('^merge',
     [('f', 'force', None, _('force a merge with outstanding changes')),
@@ -4666,11 +4738,12 @@
 
     # update specified bookmarks
     if opts.get('bookmark'):
+        marks = repo._bookmarks
         for b in opts['bookmark']:
             # explicit pull overrides local bookmark if any
             ui.status(_("importing bookmark %s\n") % b)
-            repo._bookmarks[b] = repo[rb[b]].node()
-        bookmarks.write(repo)
+            marks[b] = repo[rb[b]].node()
+        marks.write()
 
     return ret
 
@@ -4861,8 +4934,7 @@
     elif after:
         list = deleted
         for f in modified + added + clean:
-            ui.warn(_('not removing %s: file still exists (use -f'
-                      ' to force removal)\n') % m.rel(f))
+            ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
             ret = 1
     else:
         list = deleted + clean
@@ -4885,11 +4957,7 @@
             for f in list:
                 if f in added:
                     continue # we never unlink added files on remove
-                try:
-                    util.unlinkpath(repo.wjoin(f))
-                except OSError, inst:
-                    if inst.errno != errno.ENOENT:
-                        raise
+                util.unlinkpath(repo.wjoin(f), ignoremissing=True)
         repo[None].forget(list)
     finally:
         wlock.release()
@@ -5427,17 +5495,16 @@
         copy = copies.pathcopies(repo[node1], repo[node2])
 
     fm = ui.formatter('status', opts)
-    format = '%s %s' + end
-    if opts.get('no_status'):
-        format = '%.0s%s' + end
+    fmt = '%s' + end
+    showchar = not opts.get('no_status')
 
     for state, char, files in changestates:
         if state in show:
             label = 'status.' + state
             for f in files:
                 fm.startitem()
-                fm.write("status path", format, char,
-                         repo.pathto(f, cwd), label=label)
+                fm.condwrite(showchar, 'status', '%s ', char, label=label)
+                fm.write('path', fmt, repo.pathto(f, cwd), label=label)
                 if f in copy:
                     fm.write("copy", '  %s' + end, repo.pathto(copy[f], cwd),
                              label='status.copied')
@@ -5743,7 +5810,7 @@
         release(lock, wlock)
 
 @command('tags', [], '')
-def tags(ui, repo):
+def tags(ui, repo, **opts):
     """list repository tags
 
     This lists both regular and local tags. When the -v/--verbose
@@ -5752,27 +5819,27 @@
     Returns 0 on success.
     """
 
+    fm = ui.formatter('tags', opts)
     hexfunc = ui.debugflag and hex or short
     tagtype = ""
 
     for t, n in reversed(repo.tagslist()):
-        if ui.quiet:
-            ui.write("%s\n" % t, label='tags.normal')
-            continue
-
         hn = hexfunc(n)
-        r = "%5d:%s" % (repo.changelog.rev(n), hn)
-        rev = ui.label(r, 'log.changeset changeset.%s' % repo[n].phasestr())
-        spaces = " " * (30 - encoding.colwidth(t))
-
-        tag = ui.label(t, 'tags.normal')
-        if ui.verbose:
-            if repo.tagtype(t) == 'local':
-                tagtype = " local"
-                tag = ui.label(t, 'tags.local')
-            else:
-                tagtype = ""
-        ui.write("%s%s %s%s\n" % (tag, spaces, rev, tagtype))
+        label = 'tags.normal'
+        tagtype = ''
+        if repo.tagtype(t) == 'local':
+            label = 'tags.local'
+            tagtype = 'local'
+
+        fm.startitem()
+        fm.write('tag', '%s', t, label=label)
+        fmt = " " * (30 - encoding.colwidth(t)) + ' %5d:%s'
+        fm.condwrite(not ui.quiet, 'rev id', fmt,
+                     repo.changelog.rev(n), hn, label=label)
+        fm.condwrite(ui.verbose and tagtype, 'type', ' %s',
+                     tagtype, label=label)
+        fm.plain('\n')
+    fm.end()
 
 @command('tip',
     [('p', 'patch', None, _('show patch')),
--- a/mercurial/context.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/context.py	Fri Dec 28 14:13:06 2012 +0100
@@ -25,8 +25,12 @@
         self._repo = repo
 
         if isinstance(changeid, int):
+            try:
+                self._node = repo.changelog.node(changeid)
+            except IndexError:
+                raise error.RepoLookupError(
+                    _("unknown revision '%s'") % changeid)
             self._rev = changeid
-            self._node = repo.changelog.node(changeid)
             return
         if isinstance(changeid, long):
             changeid = str(changeid)
@@ -95,7 +99,10 @@
 
         # lookup failed
         # check if it might have come from damaged dirstate
-        if changeid in repo.dirstate.parents():
+        #
+        # XXX we could avoid the unfiltered if we had a recognizable exception
+        # for filtered changeset access
+        if changeid in repo.unfiltered().dirstate.parents():
             raise error.Abort(_("working directory has unknown parent '%s'!")
                               % short(changeid))
         try:
@@ -250,6 +257,13 @@
         """
         return self.rev() in obsmod.getrevs(self._repo, 'bumped')
 
+    def divergent(self):
+        """Is a successors of a changeset with multiple possible successors set
+
+        Only non-public and non-obsolete changesets may be divergent.
+        """
+        return self.rev() in obsmod.getrevs(self._repo, 'divergent')
+
     def _fileinfo(self, path):
         if '_manifest' in self.__dict__:
             try:
@@ -352,6 +366,9 @@
     def dirs(self):
         return self._dirs
 
+    def dirty(self):
+        return False
+
 class filectx(object):
     """A filecontext object makes access to data related to a particular
        filerevision convenient."""
--- a/mercurial/copies.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/copies.py	Fri Dec 28 14:13:06 2012 +0100
@@ -145,12 +145,16 @@
 
     return cm
 
-def _backwardcopies(a, b):
-    # because the forward mapping is 1:n, we can lose renames here
-    # in particular, we find renames better than copies
+def _backwardrenames(a, b):
+    # Even though we're not taking copies into account, 1:n rename situations
+    # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
+    # arbitrarily pick one of the renames.
     f = _forwardcopies(b, a)
     r = {}
     for k, v in f.iteritems():
+        # remove copies
+        if v in a:
+            continue
         r[v] = k
     return r
 
@@ -162,19 +166,25 @@
     if a == x:
         return _forwardcopies(x, y)
     if a == y:
-        return _backwardcopies(x, y)
-    return _chain(x, y, _backwardcopies(x, a), _forwardcopies(a, y))
+        return _backwardrenames(x, y)
+    return _chain(x, y, _backwardrenames(x, a), _forwardcopies(a, y))
 
 def mergecopies(repo, c1, c2, ca):
     """
     Find moves and copies between context c1 and c2 that are relevant
     for merging.
 
-    Returns two dicts, "copy" and "diverge".
+    Returns four dicts: "copy", "movewithdir", "diverge", and
+    "renamedelete".
 
     "copy" is a mapping from destination name -> source name,
     where source is in c1 and destination is in c2 or vice-versa.
 
+    "movewithdir" is a mapping from source name -> destination name,
+    where the file at source present in one context but not the other
+    needs to be moved to destination by the merge process, because the
+    other context moved the directory it is in.
+
     "diverge" is a mapping of source name -> list of destination names
     for divergent renames.
 
@@ -183,16 +193,16 @@
     """
     # avoid silly behavior for update from empty dir
     if not c1 or not c2 or c1 == c2:
-        return {}, {}, {}
+        return {}, {}, {}, {}
 
     # avoid silly behavior for parent -> working dir
     if c2.node() is None and c1.node() == repo.dirstate.p1():
-        return repo.dirstate.copies(), {}, {}
+        return repo.dirstate.copies(), {}, {}, {}
 
     limit = _findlimit(repo, c1.rev(), c2.rev())
     if limit is None:
         # no common ancestor, no copies
-        return {}, {}, {}
+        return {}, {}, {}, {}
     m1 = c1.manifest()
     m2 = c2.manifest()
     ma = ca.manifest()
@@ -206,6 +216,7 @@
 
     ctx = util.lrucachefunc(makectx)
     copy = {}
+    movewithdir = {}
     fullcopy = {}
     diverge = {}
 
@@ -311,11 +322,12 @@
                 note += "!"
             if f in renamedelete2:
                 note += "%"
-            repo.ui.debug("   %s -> %s %s\n" % (f, fullcopy[f], note))
+            repo.ui.debug("   src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
+                                                              note))
     del diverge2
 
     if not fullcopy:
-        return copy, diverge, renamedelete
+        return copy, movewithdir, diverge, renamedelete
 
     repo.ui.debug("  checking for directory renames\n")
 
@@ -352,10 +364,11 @@
     del d1, d2, invalid
 
     if not dirmove:
-        return copy, diverge, renamedelete
+        return copy, movewithdir, diverge, renamedelete
 
     for d in dirmove:
-        repo.ui.debug("  dir %s -> %s\n" % (d, dirmove[d]))
+        repo.ui.debug("   discovered dir src: '%s' -> dst: '%s'\n" %
+                      (d, dirmove[d]))
 
     # check unaccounted nonoverlapping files against directory moves
     for f in u1 + u2:
@@ -365,8 +378,9 @@
                     # new file added in a directory that was moved, move it
                     df = dirmove[d] + f[len(d):]
                     if df not in copy:
-                        copy[f] = df
-                        repo.ui.debug("  file %s -> %s\n" % (f, copy[f]))
+                        movewithdir[f] = df
+                        repo.ui.debug(("   pending file src: '%s' -> "
+                                       "dst: '%s'\n") % (f, df))
                     break
 
-    return copy, diverge, renamedelete
+    return copy, movewithdir, diverge, renamedelete
--- a/mercurial/dirstate.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/dirstate.py	Fri Dec 28 14:13:06 2012 +0100
@@ -607,7 +607,7 @@
             normalize = self._normalize
             skipstep3 = False
         else:
-            normalize = lambda x, y, z: x
+            normalize = None
 
         files = sorted(match.files())
         subrepos.sort()
@@ -628,7 +628,10 @@
 
         # step 1: find all explicit files
         for ff in files:
-            nf = normalize(normpath(ff), False, True)
+            if normalize:
+                nf = normalize(normpath(ff), False, True)
+            else:
+                nf = normpath(ff)
             if nf in results:
                 continue
 
@@ -678,7 +681,10 @@
                     continue
                 raise
             for f, kind, st in entries:
-                nf = normalize(nd and (nd + "/" + f) or f, True, True)
+                if normalize:
+                    nf = normalize(nd and (nd + "/" + f) or f, True, True)
+                else:
+                    nf = nd and (nd + "/" + f) or f
                 if nf not in results:
                     if kind == dirkind:
                         if not ignore(nf):
@@ -698,11 +704,9 @@
         # step 3: report unseen items in the dmap hash
         if not skipstep3 and not exact:
             visit = sorted([f for f in dmap if f not in results and matchfn(f)])
-            for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
-                if (not st is None and
-                    getkind(st.st_mode) not in (regkind, lnkkind)):
-                    st = None
-                results[nf] = st
+            nf = iter(visit).next
+            for st in util.statfiles([join(i) for i in visit]):
+                results[nf()] = st
         for s in subrepos:
             del results[s]
         del results['.hg']
@@ -748,13 +752,19 @@
         radd = removed.append
         dadd = deleted.append
         cadd = clean.append
+        mexact = match.exact
+        dirignore = self._dirignore
+        checkexec = self._checkexec
+        checklink = self._checklink
+        copymap = self._copymap
+        lastnormaltime = self._lastnormaltime
 
         lnkkind = stat.S_IFLNK
 
         for fn, st in self.walk(match, subrepos, listunknown,
                                 listignored).iteritems():
             if fn not in dmap:
-                if (listignored or match.exact(fn)) and self._dirignore(fn):
+                if (listignored or mexact(fn)) and dirignore(fn):
                     if listignored:
                         iadd(fn)
                 elif listunknown:
@@ -773,15 +783,15 @@
                 mtime = int(st.st_mtime)
                 if (size >= 0 and
                     ((size != st.st_size and size != st.st_size & _rangemask)
-                     or ((mode ^ st.st_mode) & 0100 and self._checkexec))
-                    and (mode & lnkkind != lnkkind or self._checklink)
+                     or ((mode ^ st.st_mode) & 0100 and checkexec))
+                    and (mode & lnkkind != lnkkind or checklink)
                     or size == -2 # other parent
-                    or fn in self._copymap):
+                    or fn in copymap):
                     madd(fn)
                 elif ((time != mtime and time != mtime & _rangemask)
-                      and (mode & lnkkind != lnkkind or self._checklink)):
+                      and (mode & lnkkind != lnkkind or checklink)):
                     ladd(fn)
-                elif mtime == self._lastnormaltime:
+                elif mtime == lastnormaltime:
                     # fn may have been changed in the same timeslot without
                     # changing its size. This can happen if we quickly do
                     # multiple commits in a single transaction.
--- a/mercurial/discovery.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/discovery.py	Fri Dec 28 14:13:06 2012 +0100
@@ -8,6 +8,7 @@
 from node import nullid, short
 from i18n import _
 import util, setdiscovery, treediscovery, phases, obsolete, bookmarks
+import branchmap
 
 def findcommonincoming(repo, remote, heads=None, force=False):
     """Return a tuple (common, anyincoming, heads) used to identify the common
@@ -192,9 +193,10 @@
 
     # D. Update newmap with outgoing changes.
     # This will possibly add new heads and remove existing ones.
-    newmap = dict((branch, heads[1]) for branch, heads in headssum.iteritems()
-                  if heads[0] is not None)
-    repo._updatebranchcache(newmap, missingctx)
+    newmap = branchmap.branchcache((branch, heads[1])
+                                 for branch, heads in headssum.iteritems()
+                                 if heads[0] is not None)
+    newmap.update(repo, missingctx)
     for branch, newheads in newmap.iteritems():
         headssum[branch][1][:] = newheads
     return headssum
@@ -205,7 +207,7 @@
     cl = repo.changelog
     # 1-4b. old servers: Check for new topological heads.
     # Construct {old,new}map with branch = None (topological branch).
-    # (code based on _updatebranchcache)
+    # (code based on update)
     oldheads = set(h for h in remoteheads if h in cl.nodemap)
     # all nodes in outgoing.missing are children of either:
     # - an element of oldheads
@@ -338,40 +340,9 @@
 
 def visibleheads(repo):
     """return the set of visible head of this repo"""
-    # XXX we want a cache on this
-    sroots = repo._phasecache.phaseroots[phases.secret]
-    if sroots or repo.obsstore:
-        # XXX very slow revset. storing heads or secret "boundary"
-        # would help.
-        revset = repo.set('heads(not (%ln:: + extinct()))', sroots)
-
-        vheads = [ctx.node() for ctx in revset]
-        if not vheads:
-            vheads.append(nullid)
-    else:
-        vheads = repo.heads()
-    return vheads
+    return repo.filtered('unserved').heads()
 
 
 def visiblebranchmap(repo):
     """return a branchmap for the visible set"""
-    # XXX Recomputing this data on the fly is very slow.  We should build a
-    # XXX cached version while computing the standard branchmap version.
-    sroots = repo._phasecache.phaseroots[phases.secret]
-    if sroots or repo.obsstore:
-        vbranchmap = {}
-        for branch, nodes in  repo.branchmap().iteritems():
-            # search for secret heads.
-            for n in nodes:
-                if repo[n].phase() >= phases.secret:
-                    nodes = None
-                    break
-            # if secret heads were found we must compute them again
-            if nodes is None:
-                s = repo.set('heads(branch(%s) - secret() - extinct())',
-                             branch)
-                nodes = [c.node() for c in s]
-            vbranchmap[branch] = nodes
-    else:
-        vbranchmap = repo.branchmap()
-    return vbranchmap
+    return repo.filtered('unserved').branchmap()
--- a/mercurial/formatter.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/formatter.py	Fri Dec 28 14:13:06 2012 +0100
@@ -31,6 +31,10 @@
         '''do default text output while assigning data to item'''
         for k, v in zip(fields.split(), fielddata):
             self._item[k] = v
+    def condwrite(self, cond, fields, deftext, *fielddata, **opts):
+        '''do conditional write (primarily for plain formatter)'''
+        for k, v in zip(fields.split(), fielddata):
+            self._item[k] = v
     def plain(self, text, **opts):
         '''show raw text for non-templated mode'''
         pass
@@ -51,6 +55,10 @@
         pass
     def write(self, fields, deftext, *fielddata, **opts):
         self._ui.write(deftext % fielddata, **opts)
+    def condwrite(self, cond, fields, deftext, *fielddata, **opts):
+        '''do conditional write'''
+        if cond:
+            self._ui.write(deftext % fielddata, **opts)
     def plain(self, text, **opts):
         self._ui.write(text, **opts)
     def end(self):
--- a/mercurial/help/config.txt	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/help/config.txt	Fri Dec 28 14:13:06 2012 +0100
@@ -1295,6 +1295,10 @@
     (DEPRECATED) Whether to allow .zip downloading of repository
     revisions. Default is False. This feature creates temporary files.
 
+``archivesubrepos``
+    Whether to recurse into subrepositories when archiving. Default is
+    False.
+
 ``baseurl``
     Base URL to use when publishing URLs in other locations, so
     third-party tools like email notification hooks can construct
--- a/mercurial/hg.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/hg.py	Fri Dec 28 14:13:06 2012 +0100
@@ -171,11 +171,14 @@
     r = repository(ui, root)
 
     default = srcrepo.ui.config('paths', 'default')
-    if default:
-        fp = r.opener("hgrc", "w", text=True)
-        fp.write("[paths]\n")
-        fp.write("default = %s\n" % default)
-        fp.close()
+    if not default:
+        # set default to source for being able to clone subrepos
+        default = os.path.abspath(util.urllocalpath(origsource))
+    fp = r.opener("hgrc", "w", text=True)
+    fp.write("[paths]\n")
+    fp.write("default = %s\n" % default)
+    fp.close()
+    r.ui.setconfig('paths', 'default', default)
 
     if update:
         r.ui.status(_("updating working directory\n"))
@@ -391,14 +394,15 @@
         destrepo = destpeer.local()
         if destrepo and srcpeer.capable("pushkey"):
             rb = srcpeer.listkeys('bookmarks')
+            marks = destrepo._bookmarks
             for k, n in rb.iteritems():
                 try:
                     m = destrepo.lookup(n)
-                    destrepo._bookmarks[k] = m
+                    marks[k] = m
                 except error.RepoLookupError:
                     pass
             if rb:
-                bookmarks.write(destrepo)
+                marks.write()
         elif srcrepo and destpeer.capable("pushkey"):
             for k, n in srcrepo._bookmarks.iteritems():
                 destpeer.pushkey('bookmarks', k, '', hex(n))
--- a/mercurial/hgweb/hgwebdir_mod.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/hgweb/hgwebdir_mod.py	Fri Dec 28 14:13:06 2012 +0100
@@ -304,7 +304,8 @@
                                description_sort="",
                                lastchange=d,
                                lastchange_sort=d[1]-d[0],
-                               archives=[])
+                               archives=[],
+                               isdirectory=True)
 
                     seendirs.add(name)
                     yield row
--- a/mercurial/hgweb/webcommands.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/hgweb/webcommands.py	Fri Dec 28 14:13:06 2012 +0100
@@ -14,6 +14,7 @@
 from common import HTTP_OK, HTTP_FORBIDDEN, HTTP_NOT_FOUND
 from mercurial import graphmod, patch
 from mercurial import help as helpmod
+from mercurial import scmutil
 from mercurial.i18n import _
 
 # __all__ is populated with the allowed commands. Be sure to add to it if
@@ -255,6 +256,9 @@
 
 def changeset(web, req, tmpl):
     ctx = webutil.changectx(web.repo, req)
+    basectx = webutil.basechangectx(web.repo, req)
+    if basectx is None:
+        basectx = ctx.p1()
     showtags = webutil.showtag(web.repo, tmpl, 'changesettag', ctx.node())
     showbookmarks = webutil.showbookmark(web.repo, tmpl, 'changesetbookmark',
                                          ctx.node())
@@ -273,10 +277,10 @@
         style = req.form['style'][0]
 
     parity = paritygen(web.stripecount)
-    diffs = webutil.diffs(web.repo, tmpl, ctx, None, parity, style)
+    diffs = webutil.diffs(web.repo, tmpl, ctx, basectx, None, parity, style)
 
     parity = paritygen(web.stripecount)
-    diffstatgen = webutil.diffstatgen(ctx)
+    diffstatgen = webutil.diffstatgen(ctx, basectx)
     diffstat = webutil.diffstat(tmpl, ctx, diffstatgen, parity)
 
     return tmpl('changeset',
@@ -285,6 +289,7 @@
                 node=ctx.hex(),
                 parent=webutil.parents(ctx),
                 child=webutil.children(ctx),
+                currentbaseline=basectx.hex(),
                 changesettag=showtags,
                 changesetbookmark=showbookmarks,
                 changesetbranch=showbranch,
@@ -569,7 +574,7 @@
     if 'style' in req.form:
         style = req.form['style'][0]
 
-    diffs = webutil.diffs(web.repo, tmpl, ctx, [path], parity, style)
+    diffs = webutil.diffs(web.repo, tmpl, ctx, None, [path], parity, style)
     rename = fctx and webutil.renamelink(fctx) or []
     ctx = fctx and fctx or ctx
     return tmpl("filediff",
@@ -802,7 +807,11 @@
         headers.append(('Content-Encoding', encoding))
     req.header(headers)
     req.respond(HTTP_OK)
-    archival.archive(web.repo, req, cnode, artype, prefix=name)
+
+    ctx = webutil.changectx(web.repo, req)
+    archival.archive(web.repo, req, cnode, artype, prefix=name,
+                     matchfn=scmutil.match(ctx, []),
+                     subrepos=web.configbool("web", "archivesubrepos"))
     return []
 
 
--- a/mercurial/hgweb/webutil.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/hgweb/webutil.py	Fri Dec 28 14:13:06 2012 +0100
@@ -140,13 +140,7 @@
     path = path.lstrip('/')
     return scmutil.canonpath(repo.root, '', path)
 
-def changectx(repo, req):
-    changeid = "tip"
-    if 'node' in req.form:
-        changeid = req.form['node'][0]
-    elif 'manifest' in req.form:
-        changeid = req.form['manifest'][0]
-
+def changeidctx (repo, changeid):
     try:
         ctx = repo[changeid]
     except error.RepoError:
@@ -155,6 +149,28 @@
 
     return ctx
 
+def changectx (repo, req):
+    changeid = "tip"
+    if 'node' in req.form:
+        changeid = req.form['node'][0]
+        ipos=changeid.find(':')
+        if ipos != -1:
+            changeid = changeid[(ipos + 1):]
+    elif 'manifest' in req.form:
+        changeid = req.form['manifest'][0]
+
+    return changeidctx(repo, changeid)
+
+def basechangectx(repo, req):
+    if 'node' in req.form:
+        changeid = req.form['node'][0]
+        ipos=changeid.find(':')
+        if ipos != -1:
+            changeid = changeid[:ipos]
+            return changeidctx(repo, changeid)
+
+    return None
+
 def filectx(repo, req):
     if 'file' not in req.form:
         raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
@@ -178,7 +194,7 @@
     if len(files) > max:
         yield tmpl('fileellipses')
 
-def diffs(repo, tmpl, ctx, files, parity, style):
+def diffs(repo, tmpl, ctx, basectx, files, parity, style):
 
     def countgen():
         start = 1
@@ -209,8 +225,11 @@
         m = match.always(repo.root, repo.getcwd())
 
     diffopts = patch.diffopts(repo.ui, untrusted=True)
-    parents = ctx.parents()
-    node1 = parents and parents[0].node() or nullid
+    if basectx is None:
+        parents = ctx.parents()
+        node1 = parents and parents[0].node() or nullid
+    else:
+        node1 = basectx.node()
     node2 = ctx.node()
 
     block = []
@@ -274,10 +293,10 @@
         for oc in s.get_grouped_opcodes(n=context):
             yield tmpl('comparisonblock', lines=getblock(oc))
 
-def diffstatgen(ctx):
+def diffstatgen(ctx, basectx):
     '''Generator function that provides the diffstat data.'''
 
-    stats = patch.diffstatdata(util.iterlines(ctx.diff()))
+    stats = patch.diffstatdata(util.iterlines(ctx.diff(basectx)))
     maxname, maxtotal, addtotal, removetotal, binary = patch.diffstatsum(stats)
     while True:
         yield stats, maxname, maxtotal, addtotal, removetotal, binary
--- a/mercurial/hook.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/hook.py	Fri Dec 28 14:13:06 2012 +0100
@@ -7,7 +7,7 @@
 
 from i18n import _
 import os, sys
-import extensions, util
+import extensions, util, demandimport
 
 def _pythonhook(ui, repo, name, hname, funcname, args, throw):
     '''call python hook. hook is callable object, looked up as
@@ -35,13 +35,17 @@
                 sys.path = sys.path[:] + [modpath]
                 modname = modfile
         try:
+            demandimport.disable()
             obj = __import__(modname)
+            demandimport.enable()
         except ImportError:
             e1 = sys.exc_type, sys.exc_value, sys.exc_traceback
             try:
                 # extensions are loaded with hgext_ prefix
                 obj = __import__("hgext_%s" % modname)
+                demandimport.enable()
             except ImportError:
+                demandimport.enable()
                 e2 = sys.exc_type, sys.exc_value, sys.exc_traceback
                 if ui.tracebackflag:
                     ui.warn(_('exception from first failed import attempt:\n'))
--- a/mercurial/ignore.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/ignore.py	Fri Dec 28 14:13:06 2012 +0100
@@ -46,12 +46,32 @@
                 pat = line
                 break
             elif line.startswith(s+':'):
-                pat = rels + line[len(s)+1:]
+                pat = rels + line[len(s) + 1:]
                 break
         patterns.append(pat)
 
     return patterns, warnings
 
+def readpats(root, files, warn):
+    '''return a dict mapping ignore-file-name to list-of-patterns'''
+
+    pats = {}
+    for f in files:
+        if f in pats:
+            continue
+        try:
+            pats[f] = []
+            fp = open(f)
+            pats[f], warnings = ignorepats(fp)
+            fp.close()
+            for warning in warnings:
+                warn("%s: %s\n" % (f, warning))
+        except IOError, inst:
+            if f != files[0]:
+                warn(_("skipping unreadable ignore file '%s': %s\n") %
+                     (f, inst.strerror))
+    return [(f, pats[f]) for f in files if f in pats]
+
 def ignore(root, files, warn):
     '''return matcher covering patterns in 'files'.
 
@@ -72,22 +92,10 @@
     glob:pattern   # non-rooted glob
     pattern        # pattern of the current default type'''
 
-    pats = {}
-    for f in files:
-        try:
-            pats[f] = []
-            fp = open(f)
-            pats[f], warnings = ignorepats(fp)
-            fp.close()
-            for warning in warnings:
-                warn("%s: %s\n" % (f, warning))
-        except IOError, inst:
-            if f != files[0]:
-                warn(_("skipping unreadable ignore file '%s': %s\n") %
-                     (f, inst.strerror))
+    pats = readpats(root, files, warn)
 
     allpats = []
-    for patlist in pats.values():
+    for f, patlist in pats:
         allpats.extend(patlist)
     if not allpats:
         return util.never
@@ -96,7 +104,7 @@
         ignorefunc = match.match(root, '', [], allpats)
     except util.Abort:
         # Re-raise an exception where the src is the right file
-        for f, patlist in pats.iteritems():
+        for f, patlist in pats:
             try:
                 match.match(root, '', [], patlist)
             except util.Abort, inst:
--- a/mercurial/localrepo.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/localrepo.py	Fri Dec 28 14:13:06 2012 +0100
@@ -4,9 +4,9 @@
 #
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
-from node import bin, hex, nullid, nullrev, short
+from node import hex, nullid, short
 from i18n import _
-import peer, changegroup, subrepo, discovery, pushkey, obsolete
+import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
 import lock, transaction, store, encoding, base85
 import scmutil, util, extensions, hook, error, revset
@@ -15,14 +15,49 @@
 import tags as tagsmod
 from lock import release
 import weakref, errno, os, time, inspect
+import branchmap
 propertycache = util.propertycache
 filecache = scmutil.filecache
 
-class storecache(filecache):
+class repofilecache(filecache):
+    """All filecache usage on repo are done for logic that should be unfiltered
+    """
+
+    def __get__(self, repo, type=None):
+        return super(repofilecache, self).__get__(repo.unfiltered(), type)
+    def __set__(self, repo, value):
+        return super(repofilecache, self).__set__(repo.unfiltered(), value)
+    def __delete__(self, repo):
+        return super(repofilecache, self).__delete__(repo.unfiltered())
+
+class storecache(repofilecache):
     """filecache for files in the store"""
     def join(self, obj, fname):
         return obj.sjoin(fname)
 
+class unfilteredpropertycache(propertycache):
+    """propertycache that apply to unfiltered repo only"""
+
+    def __get__(self, repo, type=None):
+        return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
+
+class filteredpropertycache(propertycache):
+    """propertycache that must take filtering in account"""
+
+    def cachevalue(self, obj, value):
+        object.__setattr__(obj, self.name, value)
+
+
+def hasunfilteredcache(repo, name):
+    """check if an repo and a unfilteredproperty cached value for <name>"""
+    return name in vars(repo.unfiltered())
+
+def unfilteredmethod(orig):
+    """decorate method that always need to be run on unfiltered version"""
+    def wrapper(repo, *args, **kwargs):
+        return orig(repo.unfiltered(), *args, **kwargs)
+    return wrapper
+
 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
 
@@ -194,7 +229,6 @@
 
 
         self._branchcache = None
-        self._branchcachetip = None
         self.filterpats = {}
         self._datafilters = {}
         self._transref = self._lockref = self._wlockref = None
@@ -205,6 +239,15 @@
         # Maps a property name to its util.filecacheentry
         self._filecache = {}
 
+        # hold sets of revision to be filtered
+        # should be cleared when something might have changed the filter value:
+        # - new changesets,
+        # - phase change,
+        # - new obsolescence marker,
+        # - working directory parent change,
+        # - bookmark changes
+        self.filteredrevcache = {}
+
     def close(self):
         pass
 
@@ -263,17 +306,28 @@
     def peer(self):
         return localpeer(self) # not cached to avoid reference cycle
 
-    @filecache('bookmarks')
+    def unfiltered(self):
+        """Return unfiltered version of the repository
+
+        Intended to be ovewritten by filtered repo."""
+        return self
+
+    def filtered(self, name):
+        """Return a filtered version of a repository"""
+        # build a new class with the mixin and the current class
+        # (possibily subclass of the repo)
+        class proxycls(repoview.repoview, self.unfiltered().__class__):
+            pass
+        return proxycls(self, name)
+
+    @repofilecache('bookmarks')
     def _bookmarks(self):
-        return bookmarks.read(self)
+        return bookmarks.bmstore(self)
 
-    @filecache('bookmarks.current')
+    @repofilecache('bookmarks.current')
     def _bookmarkcurrent(self):
         return bookmarks.readcurrent(self)
 
-    def _writebookmarks(self, marks):
-        bookmarks.write(self)
-
     def bookmarkheads(self, bookmark):
         name = bookmark.split('@', 1)[0]
         heads = []
@@ -295,7 +349,7 @@
             self.ui.warn(msg % len(list(store)))
         return store
 
-    @propertycache
+    @unfilteredpropertycache
     def hiddenrevs(self):
         """hiddenrevs: revs that should be hidden by command and tools
 
@@ -329,7 +383,7 @@
     def manifest(self):
         return manifest.manifest(self.sopener)
 
-    @filecache('dirstate')
+    @repofilecache('dirstate')
     def dirstate(self):
         warned = [0]
         def validate(node):
@@ -385,6 +439,7 @@
     def hook(self, name, throw=False, **args):
         return hook.hook(self.ui, self, name, throw, **args)
 
+    @unfilteredmethod
     def _tag(self, names, node, message, local, user, date, extra={}):
         if isinstance(names, str):
             names = (names,)
@@ -482,7 +537,7 @@
         self.tags() # instantiate the cache
         self._tag(names, node, message, local, user, date)
 
-    @propertycache
+    @filteredpropertycache
     def _tagscache(self):
         '''Returns a tagscache object that contains various tags related
         caches.'''
@@ -594,42 +649,27 @@
                 marks.append(bookmark)
         return sorted(marks)
 
-    def _branchtags(self, partial, lrev):
-        # TODO: rename this function?
-        tiprev = len(self) - 1
-        if lrev != tiprev:
-            ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
-            self._updatebranchcache(partial, ctxgen)
-            self._writebranchcache(partial, self.changelog.tip(), tiprev)
+    def _cacheabletip(self):
+        """tip-most revision stable enought to used in persistent cache
 
-        return partial
+        This function is overwritten by MQ to ensure we do not write cache for
+        a part of the history that will likely change.
 
-    def updatebranchcache(self):
-        tip = self.changelog.tip()
-        if self._branchcache is not None and self._branchcachetip == tip:
-            return
-
-        oldtip = self._branchcachetip
-        self._branchcachetip = tip
-        if oldtip is None or oldtip not in self.changelog.nodemap:
-            partial, last, lrev = self._readbranchcache()
-        else:
-            lrev = self.changelog.rev(oldtip)
-            partial = self._branchcache
-
-        self._branchtags(partial, lrev)
-        # this private cache holds all heads (not just the branch tips)
-        self._branchcache = partial
+        Efficient handling of filtered revision in branchcache should offer a
+        better alternative. But we are using this approach until it is ready.
+        """
+        cl = self.changelog
+        return cl.rev(cl.tip())
 
     def branchmap(self):
         '''returns a dictionary {branch: [branchheads]}'''
         if self.changelog.filteredrevs:
             # some changeset are excluded we can't use the cache
-            branchmap = {}
-            self._updatebranchcache(branchmap, (self[r] for r in self))
-            return branchmap
+            bmap = branchmap.branchcache()
+            bmap.update(self, (self[r] for r in self))
+            return bmap
         else:
-            self.updatebranchcache()
+            branchmap.updatecache(self)
             return self._branchcache
 
 
@@ -656,109 +696,6 @@
             bt[bn] = self._branchtip(heads)
         return bt
 
-    def _readbranchcache(self):
-        partial = {}
-        try:
-            f = self.opener("cache/branchheads")
-            lines = f.read().split('\n')
-            f.close()
-        except (IOError, OSError):
-            return {}, nullid, nullrev
-
-        try:
-            last, lrev = lines.pop(0).split(" ", 1)
-            last, lrev = bin(last), int(lrev)
-            if lrev >= len(self) or self[lrev].node() != last:
-                # invalidate the cache
-                raise ValueError('invalidating branch cache (tip differs)')
-            for l in lines:
-                if not l:
-                    continue
-                node, label = l.split(" ", 1)
-                label = encoding.tolocal(label.strip())
-                if not node in self:
-                    raise ValueError('invalidating branch cache because node '+
-                                     '%s does not exist' % node)
-                partial.setdefault(label, []).append(bin(node))
-        except KeyboardInterrupt:
-            raise
-        except Exception, inst:
-            if self.ui.debugflag:
-                self.ui.warn(str(inst), '\n')
-            partial, last, lrev = {}, nullid, nullrev
-        return partial, last, lrev
-
-    def _writebranchcache(self, branches, tip, tiprev):
-        try:
-            f = self.opener("cache/branchheads", "w", atomictemp=True)
-            f.write("%s %s\n" % (hex(tip), tiprev))
-            for label, nodes in branches.iteritems():
-                for node in nodes:
-                    f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
-            f.close()
-        except (IOError, OSError):
-            pass
-
-    def _updatebranchcache(self, partial, ctxgen):
-        """Given a branchhead cache, partial, that may have extra nodes or be
-        missing heads, and a generator of nodes that are at least a superset of
-        heads missing, this function updates partial to be correct.
-        """
-        # collect new branch entries
-        newbranches = {}
-        for c in ctxgen:
-            newbranches.setdefault(c.branch(), []).append(c.node())
-        # if older branchheads are reachable from new ones, they aren't
-        # really branchheads. Note checking parents is insufficient:
-        # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
-        for branch, newnodes in newbranches.iteritems():
-            bheads = partial.setdefault(branch, [])
-            # Remove candidate heads that no longer are in the repo (e.g., as
-            # the result of a strip that just happened).  Avoid using 'node in
-            # self' here because that dives down into branchcache code somewhat
-            # recursively.
-            bheadrevs = [self.changelog.rev(node) for node in bheads
-                         if self.changelog.hasnode(node)]
-            newheadrevs = [self.changelog.rev(node) for node in newnodes
-                           if self.changelog.hasnode(node)]
-            ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
-            # Remove duplicates - nodes that are in newheadrevs and are already
-            # in bheadrevs.  This can happen if you strip a node whose parent
-            # was already a head (because they're on different branches).
-            bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
-
-            # Starting from tip means fewer passes over reachable.  If we know
-            # the new candidates are not ancestors of existing heads, we don't
-            # have to examine ancestors of existing heads
-            if ctxisnew:
-                iterrevs = sorted(newheadrevs)
-            else:
-                iterrevs = list(bheadrevs)
-
-            # This loop prunes out two kinds of heads - heads that are
-            # superseded by a head in newheadrevs, and newheadrevs that are not
-            # heads because an existing head is their descendant.
-            while iterrevs:
-                latest = iterrevs.pop()
-                if latest not in bheadrevs:
-                    continue
-                ancestors = set(self.changelog.ancestors([latest],
-                                                         bheadrevs[0]))
-                if ancestors:
-                    bheadrevs = [b for b in bheadrevs if b not in ancestors]
-            partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
-
-        # There may be branches that cease to exist when the last commit in the
-        # branch was stripped.  This code filters them out.  Note that the
-        # branch that ceased to exist may not be in newbranches because
-        # newbranches is the set of candidate heads, which when you strip the
-        # last commit in a branch will be the parent branch.
-        for branch in partial.keys():
-            nodes = [head for head in partial[branch]
-                     if self.changelog.hasnode(head)]
-            if not nodes:
-                del partial[branch]
-
     def lookup(self, key):
         return self[key].node()
 
@@ -865,11 +802,11 @@
 
         return data
 
-    @propertycache
+    @unfilteredpropertycache
     def _encodefilterpats(self):
         return self._loadfilter('encode')
 
-    @propertycache
+    @unfilteredpropertycache
     def _decodefilterpats(self):
         return self._loadfilter('decode')
 
@@ -964,6 +901,7 @@
         finally:
             release(lock, wlock)
 
+    @unfilteredmethod # Until we get smarter cache management
     def _rollback(self, dryrun, force):
         ui = self.ui
         try:
@@ -1034,17 +972,19 @@
         return 0
 
     def invalidatecaches(self):
-        def delcache(name):
-            try:
-                delattr(self, name)
-            except AttributeError:
-                pass
+
+        if '_tagscache' in vars(self):
+            # can't use delattr on proxy
+            del self.__dict__['_tagscache']
 
-        delcache('_tagscache')
+        self.unfiltered()._branchcache = None # in UTF-8
+        self.invalidatevolatilesets()
 
-        self._branchcache = None # in UTF-8
-        self._branchcachetip = None
+    def invalidatevolatilesets(self):
+        self.filteredrevcache.clear()
         obsolete.clearobscaches(self)
+        if 'hiddenrevs' in vars(self):
+            del self.hiddenrevs
 
     def invalidatedirstate(self):
         '''Invalidates the dirstate, causing the next call to dirstate
@@ -1055,22 +995,23 @@
         rereads the dirstate. Use dirstate.invalidate() if you want to
         explicitly read the dirstate again (i.e. restoring it to a previous
         known good state).'''
-        if 'dirstate' in self.__dict__:
+        if hasunfilteredcache(self, 'dirstate'):
             for k in self.dirstate._filecache:
                 try:
                     delattr(self.dirstate, k)
                 except AttributeError:
                     pass
-            delattr(self, 'dirstate')
+            delattr(self.unfiltered(), 'dirstate')
 
     def invalidate(self):
+        unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
         for k in self._filecache:
             # dirstate is invalidated separately in invalidatedirstate()
             if k == 'dirstate':
                 continue
 
             try:
-                delattr(self, k)
+                delattr(unfiltered, k)
             except AttributeError:
                 pass
         self.invalidatecaches()
@@ -1111,7 +1052,7 @@
 
         def unlock():
             self.store.write()
-            if '_phasecache' in vars(self):
+            if hasunfilteredcache(self, '_phasecache'):
                 self._phasecache.write()
             for k, ce in self._filecache.items():
                 if k == 'dirstate':
@@ -1224,6 +1165,7 @@
 
         return fparent1
 
+    @unfilteredmethod
     def commit(self, text="", user=None, date=None, match=None, force=False,
                editor=False, extra={}):
         """Add a new revision to current repository.
@@ -1394,6 +1336,7 @@
         self._afterlock(commithook)
         return ret
 
+    @unfilteredmethod
     def commitctx(self, ctx, error=False):
         """Add a new revision to current repository.
         Revision information is passed via the context argument.
@@ -1468,13 +1411,14 @@
                 # if minimal phase was 0 we don't need to retract anything
                 phases.retractboundary(self, targetphase, [n])
             tr.close()
-            self.updatebranchcache()
+            branchmap.updatecache(self)
             return n
         finally:
             if tr:
                 tr.release()
             lock.release()
 
+    @unfilteredmethod
     def destroyed(self, newheadnodes=None):
         '''Inform the repository that nodes have been destroyed.
         Intended for use by strip and rollback, so there's a common
@@ -1490,12 +1434,11 @@
         # it, Otherwise, since nodes were destroyed, the cache is stale and this
         # will be caught the next time it is read.
         if newheadnodes:
-            tiprev = len(self) - 1
             ctxgen = (self[node] for node in newheadnodes
                       if self.changelog.hasnode(node))
-            self._updatebranchcache(self._branchcache, ctxgen)
-            self._writebranchcache(self._branchcache, self.changelog.tip(),
-                                   tiprev)
+            cache = self._branchcache
+            cache.update(self, ctxgen)
+            cache.write(self)
 
         # Ensure the persistent tag cache is updated.  Doing it now
         # means that the tag cache only has to worry about destroyed
@@ -1806,6 +1749,7 @@
                         if key.startswith('dump'):
                             data = base85.b85decode(remoteobs[key])
                             self.obsstore.mergemarkers(tr, data)
+                    self.invalidatevolatilesets()
             if tr is not None:
                 tr.close()
         finally:
@@ -1841,6 +1785,7 @@
 
         if not remote.canpush():
             raise util.Abort(_("destination does not support push"))
+        unfi = self.unfiltered()
         # get local lock as we might write phase data
         locallock = self.lock()
         try:
@@ -1852,40 +1797,43 @@
             try:
                 # discovery
                 fci = discovery.findcommonincoming
-                commoninc = fci(self, remote, force=force)
+                commoninc = fci(unfi, remote, force=force)
                 common, inc, remoteheads = commoninc
                 fco = discovery.findcommonoutgoing
-                outgoing = fco(self, remote, onlyheads=revs,
+                outgoing = fco(unfi, remote, onlyheads=revs,
                                commoninc=commoninc, force=force)
 
 
                 if not outgoing.missing:
                     # nothing to push
-                    scmutil.nochangesfound(self.ui, self, outgoing.excluded)
+                    scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
                     ret = None
                 else:
                     # something to push
                     if not force:
                         # if self.obsstore == False --> no obsolete
                         # then, save the iteration
-                        if self.obsstore:
+                        if unfi.obsstore:
                             # this message are here for 80 char limit reason
                             mso = _("push includes obsolete changeset: %s!")
                             msu = _("push includes unstable changeset: %s!")
                             msb = _("push includes bumped changeset: %s!")
+                            msd = _("push includes divergent changeset: %s!")
                             # If we are to push if there is at least one
                             # obsolete or unstable changeset in missing, at
                             # least one of the missinghead will be obsolete or
                             # unstable. So checking heads only is ok
                             for node in outgoing.missingheads:
-                                ctx = self[node]
+                                ctx = unfi[node]
                                 if ctx.obsolete():
                                     raise util.Abort(mso % ctx)
                                 elif ctx.unstable():
                                     raise util.Abort(msu % ctx)
                                 elif ctx.bumped():
                                     raise util.Abort(msb % ctx)
-                        discovery.checkheads(self, remote, outgoing,
+                                elif ctx.divergent():
+                                    raise util.Abort(msd % ctx)
+                        discovery.checkheads(unfi, remote, outgoing,
                                              remoteheads, newbranch,
                                              bool(inc))
 
@@ -1938,7 +1886,7 @@
                     cheads = [node for node in revs if node in common]
                     # and
                     # * commonheads parents on missing
-                    revset = self.set('%ln and parents(roots(%ln))',
+                    revset = unfi.set('%ln and parents(roots(%ln))',
                                      outgoing.commonheads,
                                      outgoing.missing)
                     cheads.extend(c.node() for c in revset)
@@ -1961,7 +1909,7 @@
                     # Get the list of all revs draft on remote by public here.
                     # XXX Beware that revset break if droots is not strictly
                     # XXX root we may want to ensure it is but it is costly
-                    outdated =  self.set('heads((%ln::%ln) and public())',
+                    outdated =  unfi.set('heads((%ln::%ln) and public())',
                                          droots, cheads)
                     for newremotehead in outdated:
                         r = remote.pushkey('phases',
@@ -1992,12 +1940,12 @@
         self.ui.debug("checking for updated bookmarks\n")
         rb = remote.listkeys('bookmarks')
         for k in rb.keys():
-            if k in self._bookmarks:
+            if k in unfi._bookmarks:
                 nr, nl = rb[k], hex(self._bookmarks[k])
-                if nr in self:
-                    cr = self[nr]
-                    cl = self[nl]
-                    if bookmarks.validdest(self, cr, cl):
+                if nr in unfi:
+                    cr = unfi[nr]
+                    cl = unfi[nl]
+                    if bookmarks.validdest(unfi, cr, cl):
                         r = remote.pushkey('bookmarks', k, nr, nl)
                         if r:
                             self.ui.status(_("updating bookmark %s\n") % k)
@@ -2033,7 +1981,7 @@
             bases = [nullid]
         csets, bases, heads = cl.nodesbetween(bases, heads)
         # We assume that all ancestors of bases are known
-        common = set(cl.ancestors([cl.rev(n) for n in bases]))
+        common = cl.ancestors([cl.rev(n) for n in bases])
         return self._changegroupsubset(common, csets, heads, source)
 
     def getlocalbundle(self, source, outgoing):
@@ -2059,8 +2007,8 @@
         """
         cl = self.changelog
         if common:
-            nm = cl.nodemap
-            common = [n for n in common if n in nm]
+            hasnode = cl.hasnode
+            common = [n for n in common if hasnode(n)]
         else:
             common = [nullid]
         if not heads:
@@ -2068,6 +2016,7 @@
         return self.getlocalbundle(source,
                                    discovery.outgoing(cl, common, heads))
 
+    @unfilteredmethod
     def _changegroupsubset(self, commonrevs, csets, heads, source):
 
         cl = self.changelog
@@ -2179,6 +2128,7 @@
         # to avoid a race we use changegroupsubset() (issue1320)
         return self.changegroupsubset(basenodes, self.heads(), source)
 
+    @unfilteredmethod
     def _changegroup(self, nodes, source):
         """Compute the changegroup of all nodes that we have that a recipient
         doesn't.  Return a chunkbuffer object whose read() method will return
@@ -2272,6 +2222,7 @@
 
         return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
 
+    @unfilteredmethod
     def addchangegroup(self, source, srctype, url, emptyok=False):
         """Add the changegroup returned by source.read() to this repo.
         srctype is a string like 'push', 'pull', or 'unbundle'.  url is
@@ -2410,7 +2361,7 @@
             self.ui.status(_("added %d changesets"
                              " with %d changes to %d files%s\n")
                              % (changesets, revisions, files, htext))
-            obsolete.clearobscaches(self)
+            self.invalidatevolatilesets()
 
             if changesets > 0:
                 p = lambda: cl.writepending() and self.root or ""
@@ -2444,7 +2395,11 @@
             tr.close()
 
             if changesets > 0:
-                self.updatebranchcache()
+                if srctype != 'strip':
+                    # During strip, branchcache is invalid but coming call to
+                    # `destroyed` will repair it.
+                    # In other case we can safely update cache on disk.
+                    branchmap.updatecache(self)
                 def runhooks():
                     # forcefully update the on-disk branch cache
                     self.ui.debug("updating the branch cache\n")
@@ -2538,12 +2493,14 @@
                 for bheads in rbranchmap.itervalues():
                     rbheads.extend(bheads)
 
-                self.branchcache = rbranchmap
                 if rbheads:
                     rtiprev = max((int(self.changelog.rev(node))
                             for node in rbheads))
-                    self._writebranchcache(self.branchcache,
-                            self[rtiprev].node(), rtiprev)
+                    cache = branchmap.branchcache(rbranchmap,
+                                                  self[rtiprev].node(),
+                                                  rtiprev)
+                    self._branchcache = cache
+                    cache.write(self)
             self.invalidate()
             return len(self.heads()) + 1
         finally:
@@ -2607,7 +2564,7 @@
             fp.write(text)
         finally:
             fp.close()
-        return self.pathto(fp.name[len(self.root)+1:])
+        return self.pathto(fp.name[len(self.root) + 1:])
 
 # used to avoid circular references so destructors work
 def aftertrans(files):
--- a/mercurial/manifest.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/manifest.py	Fri Dec 28 14:13:06 2012 +0100
@@ -117,15 +117,23 @@
         # apply the changes collected during the bisect loop to our addlist
         # return a delta suitable for addrevision
         def addlistdelta(addlist, x):
-            # start from the bottom up
-            # so changes to the offsets don't mess things up.
-            for start, end, content in reversed(x):
+            # for large addlist arrays, building a new array is cheaper
+            # than repeatedly modifying the existing one
+            currentposition = 0
+            newaddlist = array.array('c')
+
+            for start, end, content in x:
+                newaddlist += addlist[currentposition:start]
                 if content:
-                    addlist[start:end] = array.array('c', content)
-                else:
-                    del addlist[start:end]
-            return "".join(struct.pack(">lll", start, end, len(content))
+                    newaddlist += array.array('c', content)
+
+                currentposition = end
+
+            newaddlist += addlist[currentposition:]
+
+            deltatext = "".join(struct.pack(">lll", start, end, len(content))
                            + content for start, end, content in x)
+            return deltatext, newaddlist
 
         def checkforbidden(l):
             for f in l:
@@ -194,7 +202,8 @@
             if dstart is not None:
                 delta.append([dstart, dend, "".join(dline)])
             # apply the delta to the addlist, and get a delta for addrevision
-            cachedelta = (self.rev(p1), addlistdelta(addlist, delta))
+            deltatext, addlist = addlistdelta(addlist, delta)
+            cachedelta = (self.rev(p1), deltatext)
             arraytext = addlist
             text = util.buffer(arraytext)
 
--- a/mercurial/mdiff.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/mdiff.py	Fri Dec 28 14:13:06 2012 +0100
@@ -7,7 +7,7 @@
 
 from i18n import _
 import bdiff, mpatch, util
-import re, struct
+import re, struct, base85, zlib
 
 def splitnewlines(text):
     '''like str.splitlines, but only split on newlines.'''
@@ -142,20 +142,7 @@
             yield s, type
         yield s1, '='
 
-def diffline(revs, a, b, opts):
-    parts = ['diff']
-    if opts.git:
-        parts.append('--git')
-    if revs and not opts.git:
-        parts.append(' '.join(["-r %s" % rev for rev in revs]))
-    if opts.git:
-        parts.append('a/%s' % a)
-        parts.append('b/%s' % b)
-    else:
-        parts.append(a)
-    return ' '.join(parts) + '\n'
-
-def unidiff(a, ad, b, bd, fn1, fn2, r=None, opts=defaultopts):
+def unidiff(a, ad, b, bd, fn1, fn2, opts=defaultopts):
     def datetag(date, fn=None):
         if not opts.git and not opts.nodates:
             return '\t%s\n' % date
@@ -206,9 +193,6 @@
         if l[ln][-1] != '\n':
             l[ln] += "\n\ No newline at end of file\n"
 
-    if r:
-        l.insert(0, diffline(r, fn1, fn2, opts))
-
     return "".join(l)
 
 # creates a headerless unified diff
@@ -314,6 +298,41 @@
         for x in yieldhunk(hunk):
             yield x
 
+def b85diff(to, tn):
+    '''print base85-encoded binary diff'''
+    def fmtline(line):
+        l = len(line)
+        if l <= 26:
+            l = chr(ord('A') + l - 1)
+        else:
+            l = chr(l - 26 + ord('a') - 1)
+        return '%c%s\n' % (l, base85.b85encode(line, True))
+
+    def chunk(text, csize=52):
+        l = len(text)
+        i = 0
+        while i < l:
+            yield text[i:i + csize]
+            i += csize
+
+    if to is None:
+        to = ''
+    if tn is None:
+        tn = ''
+
+    if to == tn:
+        return ''
+
+    # TODO: deltas
+    ret = []
+    ret.append('GIT binary patch\n')
+    ret.append('literal %s\n' % len(tn))
+    for l in chunk(zlib.compress(tn)):
+        ret.append(fmtline(l))
+    ret.append('\n')
+
+    return ''.join(ret)
+
 def patchtext(bin):
     pos = 0
     t = []
--- a/mercurial/merge.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/merge.py	Fri Dec 28 14:13:06 2012 +0100
@@ -213,14 +213,15 @@
         repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
         action.append((f, m) + args)
 
-    action, copy = [], {}
+    action, copy, movewithdir = [], {}, {}
 
     if overwrite:
         pa = p1
     elif pa == p2: # backwards
         pa = p1.p1()
     elif pa and repo.ui.configbool("merge", "followcopies", True):
-        copy, diverge, renamedelete = copies.mergecopies(repo, p1, p2, pa)
+        ret = copies.mergecopies(repo, p1, p2, pa)
+        copy, movewithdir, diverge, renamedelete = ret
         for of, fl in diverge.iteritems():
             act("divergent renames", "dr", of, fl)
         for of, fl in renamedelete.iteritems():
@@ -233,6 +234,7 @@
 
     m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
     copied = set(copy.values())
+    copied.update(movewithdir.values())
 
     if '.hgsubstate' in m1:
         # check whether sub state is modified
@@ -259,14 +261,14 @@
                 act("versions differ", "m", f, f, f, rflags, False)
         elif f in copied: # files we'll deal with on m2 side
             pass
-        elif f in copy:
+        elif f in movewithdir: # directory rename
+            f2 = movewithdir[f]
+            act("remote renamed directory to " + f2, "d", f, None, f2,
+                m1.flags(f))
+        elif f in copy: # case 2 A,B/B/B or case 4,21 A/B/B
             f2 = copy[f]
-            if f2 not in m2: # directory rename
-                act("remote renamed directory to " + f2, "d",
-                    f, None, f2, m1.flags(f))
-            else: # case 2 A,B/B/B or case 4,21 A/B/B
-                act("local copied/moved to " + f2, "m",
-                    f, f2, f, fmerge(f, f2, f2), False)
+            act("local copied/moved to " + f2, "m", f, f2, f,
+                fmerge(f, f2, f2), False)
         elif f in ma: # clean, a different, no remote
             if n != ma[f]:
                 if repo.ui.promptchoice(
@@ -286,12 +288,13 @@
             continue
         if f in m1 or f in copied: # files already visited
             continue
-        if f in copy:
+        if f in movewithdir:
+            f2 = movewithdir[f]
+            act("local renamed directory to " + f2, "d", None, f, f2,
+                m2.flags(f))
+        elif f in copy:
             f2 = copy[f]
-            if f2 not in m1: # directory rename
-                act("local renamed directory to " + f2, "d",
-                    None, f, f2, m2.flags(f))
-            elif f2 in m2: # rename case 1, A/A,B/A
+            if f2 in m2: # rename case 1, A/A,B/A
                 act("remote copied to " + f, "m",
                     f2, f, f, fmerge(f2, f, f2), False)
             else: # case 3,20 A/B/A
@@ -379,11 +382,10 @@
             if f == '.hgsubstate': # subrepo states need updating
                 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
             try:
-                util.unlinkpath(repo.wjoin(f))
+                util.unlinkpath(repo.wjoin(f), ignoremissing=True)
             except OSError, inst:
-                if inst.errno != errno.ENOENT:
-                    repo.ui.warn(_("update failed to remove %s: %s!\n") %
-                                 (f, inst.strerror))
+                repo.ui.warn(_("update failed to remove %s: %s!\n") %
+                             (f, inst.strerror))
             removed += 1
         elif m == "m": # merge
             if f == '.hgsubstate': # subrepo states need updating
@@ -448,6 +450,27 @@
 
     return updated, merged, removed, unresolved
 
+def calculateupdates(repo, tctx, mctx, ancestor, branchmerge, force, partial):
+    "Calculate the actions needed to merge mctx into tctx"
+    action = []
+    folding = not util.checkcase(repo.path)
+    if folding:
+        # collision check is not needed for clean update
+        if (not branchmerge and
+            (force or not tctx.dirty(missing=True, branch=False))):
+            _checkcollision(mctx, None)
+        else:
+            _checkcollision(mctx, (tctx, ancestor))
+    if not force:
+        _checkunknown(repo, tctx, mctx)
+    if tctx.rev() is None:
+        action += _forgetremoved(tctx, mctx, branchmerge)
+    action += manifestmerge(repo, tctx, mctx,
+                            ancestor,
+                            force and not branchmerge,
+                            partial)
+    return action
+
 def recordupdates(repo, action, branchmerge):
     "record merge actions to the dirstate"
 
@@ -609,19 +632,7 @@
                 pa = p1
 
         ### calculate phase
-        action = []
-        folding = not util.checkcase(repo.path)
-        if folding:
-            # collision check is not needed for clean update
-            if (not branchmerge and
-                (force or not wc.dirty(missing=True, branch=False))):
-                _checkcollision(p2, None)
-            else:
-                _checkcollision(p2, (wc, pa))
-        if not force:
-            _checkunknown(repo, wc, p2)
-        action += _forgetremoved(wc, p2, branchmerge)
-        action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
+        action = calculateupdates(repo, wc, p2, pa, branchmerge, force, partial)
 
         ### apply phase
         if not branchmerge: # just jump to the new rev
--- a/mercurial/obsolete.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/obsolete.py	Fri Dec 28 14:13:06 2012 +0100
@@ -402,6 +402,200 @@
                     seen.add(suc)
                     remaining.add(suc)
 
+def successorssets(repo, initialnode, cache=None):
+    """Return all set of successors of initial nodes
+
+    Successors set of changeset A are a group of revision that succeed A. It
+    succeed A as a consistent whole, each revision being only partial
+    replacement.  Successors set contains non-obsolete changeset only.
+
+    In most cases a changeset A have zero (changeset pruned) or a single
+    successors set that contains a single successor (changeset A replaced by
+    A')
+
+    When changeset is split, it results successors set containing more than
+    a single element. Divergent rewriting will result in multiple successors
+    sets.
+
+    They are returned as a list of tuples containing all valid successors sets.
+
+    Final successors unknown locally are considered plain prune (obsoleted
+    without successors).
+
+    The optional `cache` parameter is a dictionary that may contains
+    precomputed successors sets. It is meant to reuse the computation of
+    previous call to `successorssets` when multiple calls are made at the same
+    time. The cache dictionary is updated in place. The caller is responsible
+    for its live spawn. Code that makes multiple calls to `successorssets`
+    *must* use this cache mechanism or suffer terrible performances."""
+
+    succmarkers = repo.obsstore.successors
+
+    # Stack of nodes we search successors sets for
+    toproceed = [initialnode]
+    # set version of above list for fast loop detection
+    # element added to "toproceed" must be added here
+    stackedset = set(toproceed)
+    if cache is None:
+        cache = {}
+
+    # This while loop is the flattened version of a recursive search for
+    # successors sets
+    #
+    # def successorssets(x):
+    #    successors = directsuccessors(x)
+    #    ss = [[]]
+    #    for succ in directsuccessors(x):
+    #        # product as in itertools cartesian product
+    #        ss = product(ss, successorssets(succ))
+    #    return ss
+    #
+    # But we can not use plain recursive calls here:
+    # - that would blow the python call stack
+    # - obsolescence markers may have cycles, we need to handle them.
+    #
+    # The `toproceed` list act as our call stack. Every node we search
+    # successors set for are stacked there.
+    #
+    # The `stackedset` is set version of this stack used to check if a node is
+    # already stacked. This check is used to detect cycles and prevent infinite
+    # loop.
+    #
+    # successors set of all nodes are stored in the `cache` dictionary.
+    #
+    # After this while loop ends we use the cache to return the successors sets
+    # for the node requested by the caller.
+    while toproceed:
+        # Every iteration tries to compute the successors sets of the topmost
+        # node of the stack: CURRENT.
+        #
+        # There are four possible outcomes:
+        #
+        # 1) We already know the successors sets of CURRENT:
+        #    -> mission accomplished, pop it from the stack.
+        # 2) Node is not obsolete:
+        #    -> the node is its own successors sets. Add it to the cache.
+        # 3) We do not know successors set of direct successors of CURRENT:
+        #    -> We add those successors to the stack.
+        # 4) We know successors sets of all direct successors of CURRENT:
+        #    -> We can compute CURRENT successors set and add it to the
+        #       cache.
+        #
+        current = toproceed[-1]
+        if current in cache:
+            # case (1): We already know the successors sets
+            stackedset.remove(toproceed.pop())
+        elif current not in succmarkers:
+            # case (2): The node is not obsolete.
+            if current in repo:
+                # We have a valid last successors.
+                cache[current] = [(current,)]
+            else:
+                # Final obsolete version is unknown locally.
+                # Do not count that as a valid successors
+                cache[current] = []
+        else:
+            # cases (3) and (4)
+            #
+            # We proceed in two phases. Phase 1 aims to distinguish case (3)
+            # from case (4):
+            #
+            #     For each direct successors of CURRENT, we check whether its
+            #     successors sets are known. If they are not, we stack the
+            #     unknown node and proceed to the next iteration of the while
+            #     loop. (case 3)
+            #
+            #     During this step, we may detect obsolescence cycles: a node
+            #     with unknown successors sets but already in the call stack.
+            #     In such a situation, we arbitrary set the successors sets of
+            #     the node to nothing (node pruned) to break the cycle.
+            #
+            #     If no break was encountered we proceeed to phase 2.
+            #
+            # Phase 2 computes successors sets of CURRENT (case 4); see details
+            # in phase 2 itself.
+            #
+            # Note the two levels of iteration in each phase.
+            # - The first one handles obsolescence markers using CURRENT as
+            #   precursor (successors markers of CURRENT).
+            #
+            #   Having multiple entry here means divergence.
+            #
+            # - The second one handles successors defined in each marker.
+            #
+            #   Having none means pruned node, multiple successors means split,
+            #   single successors are standard replacement.
+            #
+            for mark in succmarkers[current]:
+                for suc in mark[1]:
+                    if suc not in cache:
+                        if suc in stackedset:
+                            # cycle breaking
+                            cache[suc] = []
+                        else:
+                            # case (3) If we have not computed successors sets
+                            # of one of those successors we add it to the
+                            # `toproceed` stack and stop all work for this
+                            # iteration.
+                            toproceed.append(suc)
+                            stackedset.add(suc)
+                            break
+                else:
+                    continue
+                break
+            else:
+                # case (4): we know all successors sets of all direct
+                # successors
+                #
+                # Successors set contributed by each marker depends on the
+                # successors sets of all its "successors" node.
+                #
+                # Each different marker is a divergence in the obsolescence
+                # history. It contributes successors sets dictinct from other
+                # markers.
+                #
+                # Within a marker, a successor may have divergent successors
+                # sets. In such a case, the marker will contribute multiple
+                # divergent successors sets. If multiple successors have
+                # divergents successors sets, a cartesian product is used.
+                #
+                # At the end we post-process successors sets to remove
+                # duplicated entry and successors set that are strict subset of
+                # another one.
+                succssets = []
+                for mark in succmarkers[current]:
+                    # successors sets contributed by this marker
+                    markss = [[]]
+                    for suc in mark[1]:
+                        # cardinal product with previous successors
+                        productresult = []
+                        for prefix in markss:
+                            for suffix in cache[suc]:
+                                newss = list(prefix)
+                                for part in suffix:
+                                    # do not duplicated entry in successors set
+                                    # first entry wins.
+                                    if part not in newss:
+                                        newss.append(part)
+                                productresult.append(newss)
+                        markss = productresult
+                    succssets.extend(markss)
+                # remove duplicated and subset
+                seen = []
+                final = []
+                candidate = sorted(((set(s), s) for s in succssets if s),
+                                   key=lambda x: len(x[1]), reverse=True)
+                for setversion, listversion in candidate:
+                    for seenset in seen:
+                        if setversion.issubset(seenset):
+                            break
+                    else:
+                        final.append(listversion)
+                        seen.append(setversion)
+                final.reverse() # put small successors set first
+                cache[current] = final
+    return cache[initialnode]
+
 def _knownrevs(repo, nodes):
     """yield revision numbers of known nodes passed in parameters
 
@@ -426,6 +620,7 @@
     """Return the set of revision that belong to the <name> set
 
     Such access may compute the set and cache it for future use"""
+    repo = repo.unfiltered()
     if not repo.obsstore:
         return ()
     if name not in repo.obsstore.caches:
@@ -489,6 +684,28 @@
     query = '%ld - obsolete() - public()'
     return set(repo.revs(query, _knownrevs(repo, successors)))
 
+@cachefor('divergent')
+def _computedivergentset(repo):
+    """the set of rev that compete to be the final successors of some revision.
+    """
+    divergent = set()
+    obsstore = repo.obsstore
+    newermap = {}
+    for ctx in repo.set('(not public()) - obsolete()'):
+        mark = obsstore.precursors.get(ctx.node(), ())
+        toprocess = set(mark)
+        while toprocess:
+            prec = toprocess.pop()[0]
+            if prec not in newermap:
+                successorssets(repo, prec, newermap)
+            newer = [n for n in newermap[prec] if n]
+            if len(newer) > 1:
+                divergent.add(ctx.rev())
+                break
+            toprocess.update(obsstore.precursors.get(prec, ()))
+    return divergent
+
+
 def createmarkers(repo, relations, flag=0, metadata=None):
     """Add obsolete markers between changesets in a repo
 
@@ -521,6 +738,7 @@
             if nprec in nsucs:
                 raise util.Abort("changeset %s cannot obsolete itself" % prec)
             repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
+            repo.filteredrevcache.clear()
         tr.close()
     finally:
         tr.release()
--- a/mercurial/osutil.c	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/osutil.c	Fri Dec 28 14:13:06 2012 +0100
@@ -276,6 +276,16 @@
 	return -1;
 }
 
+static PyObject *makestat(const struct stat *st)
+{
+	PyObject *stat;
+
+	stat = PyObject_CallObject((PyObject *)&listdir_stat_type, NULL);
+	if (stat)
+		memcpy(&((struct listdir_stat *)stat)->st, st, sizeof(*st));
+	return stat;
+}
+
 static PyObject *_listdir(char *path, int pathlen, int keepstat, char *skip)
 {
 	PyObject *list, *elem, *stat, *ret = NULL;
@@ -351,10 +361,9 @@
 		}
 
 		if (keepstat) {
-			stat = PyObject_CallObject((PyObject *)&listdir_stat_type, NULL);
+			stat = makestat(&st);
 			if (!stat)
 				goto error;
-			memcpy(&((struct listdir_stat *)stat)->st, &st, sizeof(st));
 			elem = Py_BuildValue("siN", ent->d_name, kind, stat);
 		} else
 			elem = Py_BuildValue("si", ent->d_name, kind);
@@ -380,6 +389,55 @@
 	return ret;
 }
 
+static PyObject *statfiles(PyObject *self, PyObject *args)
+{
+	PyObject *names, *stats;
+	Py_ssize_t i, count;
+
+	if (!PyArg_ParseTuple(args, "O:statfiles", &names))
+		return NULL;
+
+	count = PySequence_Length(names);
+	if (count == -1) {
+		PyErr_SetString(PyExc_TypeError, "not a sequence");
+		return NULL;
+	}
+
+	stats = PyList_New(count);
+	if (stats == NULL)
+		return NULL;
+
+	for (i = 0; i < count; i++) {
+		PyObject *stat;
+		struct stat st;
+		int ret, kind;
+		char *path;
+
+		path = PyString_AsString(PySequence_GetItem(names, i));
+		if (path == NULL) {
+			PyErr_SetString(PyExc_TypeError, "not a string");
+			goto bail;
+		}
+		ret = lstat(path, &st);
+		kind = st.st_mode & S_IFMT;
+		if (ret != -1 && (kind == S_IFREG || kind == S_IFLNK)) {
+			stat = makestat(&st);
+			if (stat == NULL)
+				goto bail;
+			PyList_SET_ITEM(stats, i, stat);
+		} else {
+			Py_INCREF(Py_None);
+			PyList_SET_ITEM(stats, i, Py_None);
+		}
+	}
+
+	return stats;
+
+bail:
+	Py_DECREF(stats);
+	return NULL;
+}
+
 #endif /* ndef _WIN32 */
 
 static PyObject *listdir(PyObject *self, PyObject *args, PyObject *kwargs)
@@ -544,6 +602,10 @@
 	{"posixfile", (PyCFunction)posixfile, METH_VARARGS | METH_KEYWORDS,
 	 "Open a file with POSIX-like semantics.\n"
 "On error, this function may raise either a WindowsError or an IOError."},
+#else
+	{"statfiles", (PyCFunction)statfiles, METH_VARARGS | METH_KEYWORDS,
+	 "stat a series of files or symlinks\n"
+"Returns None for non-existent entries and entries of other types.\n"},
 #endif
 #ifdef __APPLE__
 	{
--- a/mercurial/patch.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/patch.py	Fri Dec 28 14:13:06 2012 +0100
@@ -6,7 +6,7 @@
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
 
-import cStringIO, email.Parser, os, errno, re
+import cStringIO, email.Parser, os, errno, re, posixpath
 import tempfile, zlib, shutil
 
 from i18n import _
@@ -439,11 +439,7 @@
                 util.setflags(self._join(fname), False, True)
 
     def unlink(self, fname):
-        try:
-            util.unlinkpath(self._join(fname))
-        except OSError, inst:
-            if inst.errno != errno.ENOENT:
-                raise
+        util.unlinkpath(self._join(fname), ignoremissing=True)
 
     def writerej(self, fname, failed, total, lines):
         fname = fname + ".rej"
@@ -1007,7 +1003,7 @@
 
             bot = min(fuzz, bot)
             top = min(fuzz, top)
-            return old[top:len(old)-bot], new[top:len(new)-bot], top
+            return old[top:len(old) - bot], new[top:len(new) - bot], top
         return old, new, 0
 
     def fuzzit(self, fuzz, toponly):
@@ -1514,44 +1510,6 @@
     finally:
         fp.close()
 
-def b85diff(to, tn):
-    '''print base85-encoded binary diff'''
-    def gitindex(text):
-        if not text:
-            return hex(nullid)
-        l = len(text)
-        s = util.sha1('blob %d\0' % l)
-        s.update(text)
-        return s.hexdigest()
-
-    def fmtline(line):
-        l = len(line)
-        if l <= 26:
-            l = chr(ord('A') + l - 1)
-        else:
-            l = chr(l - 26 + ord('a') - 1)
-        return '%c%s\n' % (l, base85.b85encode(line, True))
-
-    def chunk(text, csize=52):
-        l = len(text)
-        i = 0
-        while i < l:
-            yield text[i:i + csize]
-            i += csize
-
-    tohash = gitindex(to)
-    tnhash = gitindex(tn)
-    if tohash == tnhash:
-        return ""
-
-    # TODO: deltas
-    ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
-           (tohash, tnhash, len(tn))]
-    for l in chunk(zlib.compress(tn)):
-        ret.append(fmtline(l))
-    ret.append('\n')
-    return ''.join(ret)
-
 class GitDiffRequired(Exception):
     pass
 
@@ -1622,9 +1580,8 @@
         return []
 
     revs = None
-    if not repo.ui.quiet:
-        hexfunc = repo.ui.debugflag and hex or short
-        revs = [hexfunc(node) for node in [node1, node2] if node]
+    hexfunc = repo.ui.debugflag and hex or short
+    revs = [hexfunc(node) for node in [node1, node2] if node]
 
     copy = {}
     if opts.git or opts.upgrade:
@@ -1690,17 +1647,45 @@
     '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
     return difflabel(diff, *args, **kw)
 
-
-def _addmodehdr(header, omode, nmode):
-    if omode != nmode:
-        header.append('old mode %s\n' % omode)
-        header.append('new mode %s\n' % nmode)
-
 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
             copy, getfilectx, opts, losedatafn, prefix):
 
     def join(f):
-        return os.path.join(prefix, f)
+        return posixpath.join(prefix, f)
+
+    def addmodehdr(header, omode, nmode):
+        if omode != nmode:
+            header.append('old mode %s\n' % omode)
+            header.append('new mode %s\n' % nmode)
+
+    def addindexmeta(meta, revs):
+        if opts.git:
+            i = len(revs)
+            if i==2:
+                meta.append('index %s..%s\n' % tuple(revs))
+            elif i==3:
+                meta.append('index %s,%s..%s\n' % tuple(revs))
+
+    def gitindex(text):
+        if not text:
+            return hex(nullid)
+        l = len(text)
+        s = util.sha1('blob %d\0' % l)
+        s.update(text)
+        return s.hexdigest()
+
+    def diffline(a, b, revs):
+        if opts.git:
+            line = 'diff --git a/%s b/%s\n' % (a, b)
+        elif not repo.ui.quiet:
+            if revs:
+                revinfo = ' '.join(["-r %s" % rev for rev in revs])
+                line = 'diff %s %s\n' % (revinfo, a)
+            else:
+                line = 'diff %s\n' % a
+        else:
+            line = ''
+        return line
 
     date1 = util.datestr(ctx1.date())
     man1 = ctx1.manifest()
@@ -1733,7 +1718,7 @@
                         else:
                             a = copyto[f]
                         omode = gitmode[man1.flags(a)]
-                        _addmodehdr(header, omode, mode)
+                        addmodehdr(header, omode, mode)
                         if a in removed and a not in gone:
                             op = 'rename'
                             gone.add(a)
@@ -1779,22 +1764,24 @@
                 nflag = ctx2.flags(f)
                 binary = util.binary(to) or util.binary(tn)
                 if opts.git:
-                    _addmodehdr(header, gitmode[oflag], gitmode[nflag])
+                    addmodehdr(header, gitmode[oflag], gitmode[nflag])
                     if binary:
                         dodiff = 'binary'
                 elif binary or nflag != oflag:
                     losedatafn(f)
-            if opts.git:
-                header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
 
         if dodiff:
+            if opts.git or revs:
+                header.insert(0, diffline(join(a), join(b), revs))
             if dodiff == 'binary':
-                text = b85diff(to, tn)
+                text = mdiff.b85diff(to, tn)
+                if text:
+                    addindexmeta(header, [gitindex(to), gitindex(tn)])
             else:
                 text = mdiff.unidiff(to, date1,
                                     # ctx2 date may be dynamic
                                     tn, util.datestr(ctx2.date()),
-                                    join(a), join(b), revs, opts=opts)
+                                    join(a), join(b), opts=opts)
             if header and (text or len(header) > 1):
                 yield ''.join(header)
             if text:
--- a/mercurial/phases.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/phases.py	Fri Dec 28 14:13:06 2012 +0100
@@ -104,7 +104,6 @@
 from node import nullid, nullrev, bin, hex, short
 from i18n import _
 import util, error
-import obsolete
 
 allphases = public, draft, secret = range(3)
 trackedphases = allphases[1:]
@@ -139,6 +138,7 @@
     Return (roots, dirty) where dirty is true if roots differ from
     what is being stored.
     """
+    repo = repo.unfiltered()
     dirty = False
     roots = [set() for i in allphases]
     try:
@@ -184,6 +184,7 @@
 
     def getphaserevs(self, repo, rebuild=False):
         if rebuild or self._phaserevs is None:
+            repo = repo.unfiltered()
             revs = [public] * len(repo.changelog)
             for phase in trackedphases:
                 roots = map(repo.changelog.rev, self.phaseroots[phase])
@@ -228,6 +229,7 @@
         # Be careful to preserve shallow-copied values: do not update
         # phaseroots values, replace them.
 
+        repo = repo.unfiltered()
         delroots = [] # set of root deleted by this path
         for phase in xrange(targetphase + 1, len(allphases)):
             # filter nodes that are not in a compatible phase already
@@ -245,12 +247,13 @@
             # declare deleted root in the target phase
             if targetphase != 0:
                 self.retractboundary(repo, targetphase, delroots)
-        obsolete.clearobscaches(repo)
+        repo.invalidatevolatilesets()
 
     def retractboundary(self, repo, targetphase, nodes):
         # Be careful to preserve shallow-copied values: do not update
         # phaseroots values, replace them.
 
+        repo = repo.unfiltered()
         currentroots = self.phaseroots[targetphase]
         newroots = [n for n in nodes
                     if self.phase(repo, repo[n].rev()) < targetphase]
@@ -262,7 +265,7 @@
             ctxs = repo.set('roots(%ln::)', currentroots)
             currentroots.intersection_update(ctx.node() for ctx in ctxs)
             self._updateroots(targetphase, currentroots)
-        obsolete.clearobscaches(repo)
+        repo.invalidatevolatilesets()
 
 def advanceboundary(repo, targetphase, nodes):
     """Add nodes to a phase changing other nodes phases if necessary.
@@ -316,6 +319,7 @@
 
 def pushphase(repo, nhex, oldphasestr, newphasestr):
     """List phases root for serialization over pushkey"""
+    repo = repo.unfiltered()
     lock = repo.lock()
     try:
         currentphase = repo[nhex].phase()
@@ -340,6 +344,7 @@
 
     Accept unknown element input
     """
+    repo = repo.unfiltered()
     # build list from dictionary
     draftroots = []
     nodemap = repo.changelog.nodemap # to filter unknown nodes
@@ -367,6 +372,7 @@
 
     * `heads`: define the first subset
     * `roots`: define the second we subtract from the first"""
+    repo = repo.unfiltered()
     revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
                       heads, roots, roots, heads)
     return [c.node() for c in revset]
--- a/mercurial/posix.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/posix.py	Fri Dec 28 14:13:06 2012 +0100
@@ -7,7 +7,7 @@
 
 from i18n import _
 import encoding
-import os, sys, errno, stat, getpass, pwd, grp, tempfile, unicodedata
+import os, sys, errno, stat, getpass, pwd, grp, socket, tempfile, unicodedata
 
 posixfile = open
 normpath = os.path.normpath
@@ -352,12 +352,18 @@
 def setsignalhandler():
     pass
 
+_wantedkinds = set([stat.S_IFREG, stat.S_IFLNK])
+
 def statfiles(files):
-    'Stat each file in files and yield stat or None if file does not exist.'
+    '''Stat each file in files. Yield each stat, or None if a file does not
+    exist or has a type we don't care about.'''
     lstat = os.lstat
+    getkind = stat.S_IFMT
     for nf in files:
         try:
             st = lstat(nf)
+            if getkind(st.st_mode) not in _wantedkinds:
+                st = None
         except OSError, err:
             if err.errno not in (errno.ENOENT, errno.ENOTDIR):
                 raise
@@ -437,9 +443,13 @@
 def makedir(path, notindexed):
     os.mkdir(path)
 
-def unlinkpath(f):
+def unlinkpath(f, ignoremissing=False):
     """unlink and remove the directory if it is empty"""
-    os.unlink(f)
+    try:
+        os.unlink(f)
+    except OSError, e:
+        if not (ignoremissing and e.errno == errno.ENOENT):
+            raise
     # try removing directories that might now be empty
     try:
         os.removedirs(os.path.dirname(f))
@@ -477,3 +487,43 @@
 
 def executablepath():
     return None # available on Windows only
+
+class unixdomainserver(socket.socket):
+    def __init__(self, join, subsystem):
+        '''Create a unix domain socket with the given prefix.'''
+        super(unixdomainserver, self).__init__(socket.AF_UNIX)
+        sockname = subsystem + '.sock'
+        self.realpath = self.path = join(sockname)
+        if os.path.islink(self.path):
+            if os.path.exists(self.path):
+                self.realpath = os.readlink(self.path)
+            else:
+                os.unlink(self.path)
+        try:
+            self.bind(self.realpath)
+        except socket.error, err:
+            if err.args[0] == 'AF_UNIX path too long':
+                tmpdir = tempfile.mkdtemp(prefix='hg-%s-' % subsystem)
+                self.realpath = os.path.join(tmpdir, sockname)
+                try:
+                    self.bind(self.realpath)
+                    os.symlink(self.realpath, self.path)
+                except (OSError, socket.error):
+                    self.cleanup()
+                    raise
+            else:
+                raise
+        self.listen(5)
+
+    def cleanup(self):
+        def okayifmissing(f, path):
+            try:
+                f(path)
+            except OSError, err:
+                if err.errno != errno.ENOENT:
+                    raise
+
+        okayifmissing(os.unlink, self.path)
+        if self.realpath != self.path:
+            okayifmissing(os.unlink, self.realpath)
+            okayifmissing(os.rmdir, os.path.dirname(self.realpath))
--- a/mercurial/repair.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/repair.py	Fri Dec 28 14:13:06 2012 +0100
@@ -6,7 +6,7 @@
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
 
-from mercurial import changegroup, bookmarks
+from mercurial import changegroup, branchmap
 from mercurial.node import short
 from mercurial.i18n import _
 import os
@@ -56,10 +56,11 @@
     return s
 
 def strip(ui, repo, nodelist, backup="all", topic='backup'):
+    repo = repo.unfiltered()
     # It simplifies the logic around updating the branchheads cache if we only
     # have to consider the effect of the stripped revisions and not revisions
     # missing because the cache is out-of-date.
-    repo.updatebranchcache()
+    branchmap.updatecache(repo)
 
     cl = repo.changelog
     # TODO handle undo of merge sets
@@ -111,8 +112,10 @@
         saverevs.difference_update(descendants)
     savebases = [cl.node(r) for r in saverevs]
     stripbases = [cl.node(r) for r in tostrip]
-    newbmtarget = repo.revs('sort(heads((::%ld) - (%ld)), -rev)',
-                            tostrip, tostrip)
+
+    # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
+    # is much faster
+    newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
     if newbmtarget:
         newbmtarget = repo[newbmtarget[0]].node()
     else:
@@ -181,7 +184,7 @@
 
         for m in updatebm:
             bm[m] = repo[newbmtarget].node()
-        bookmarks.write(repo)
+        bm.write()
     except: # re-raises
         if backupfile:
             ui.warn(_("strip failed, full bundle stored in '%s'\n")
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/repoview.py	Fri Dec 28 14:13:06 2012 +0100
@@ -0,0 +1,108 @@
+# repoview.py - Filtered view of a localrepo object
+#
+# Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
+#                Logilab SA        <contact@logilab.fr>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import copy
+import phases
+
+def computeunserved(repo):
+    """compute the set of revision that should be filtered when used a server
+
+    Secret and hidden changeset should not pretend to be here."""
+    assert not repo.changelog.filteredrevs
+    # fast path in simple case to avoid impact of non optimised code
+    if phases.hassecret(repo) or repo.obsstore:
+        return frozenset(repo.revs('hidden() + secret()'))
+    return ()
+
+# function to compute filtered set
+filtertable = {'unserved': computeunserved}
+
+def filteredrevs(repo, filtername):
+    """returns set of filtered revision for this filter name"""
+    if filtername not in repo.filteredrevcache:
+        func = filtertable[filtername]
+        repo.filteredrevcache[filtername] = func(repo.unfiltered())
+    return repo.filteredrevcache[filtername]
+
+class repoview(object):
+    """Provide a read/write view of a repo through a filtered changelog
+
+    This object is used to access a filtered version of a repository without
+    altering the original repository object itself. We can not alter the
+    original object for two main reasons:
+    - It prevents the use of a repo with multiple filters at the same time. In
+      particular when multiple threads are involved.
+    - It makes scope of the filtering harder to control.
+
+    This object behaves very closely to the original repository. All attribute
+    operations are done on the original repository:
+    - An access to `repoview.someattr` actually returns `repo.someattr`,
+    - A write to `repoview.someattr` actually sets value of `repo.someattr`,
+    - A deletion of `repoview.someattr` actually drops `someattr`
+      from `repo.__dict__`.
+
+    The only exception is the `changelog` property. It is overridden to return
+    a (surface) copy of `repo.changelog` with some revisions filtered. The
+    `filtername` attribute of the view control the revisions that need to be
+    filtered.  (the fact the changelog is copied is an implementation detail).
+
+    Unlike attributes, this object intercepts all method calls. This means that
+    all methods are run on the `repoview` object with the filtered `changelog`
+    property. For this purpose the simple `repoview` class must be mixed with
+    the actual class of the repository. This ensures that the resulting
+    `repoview` object have the very same methods than the repo object. This
+    leads to the property below.
+
+        repoview.method() --> repo.__class__.method(repoview)
+
+    The inheritance has to be done dynamically because `repo` can be of any
+    subclasses of `localrepo`. Eg: `bundlerepo` or `httprepo`.
+    """
+
+    def __init__(self, repo, filtername):
+        object.__setattr__(self, '_unfilteredrepo', repo)
+        object.__setattr__(self, 'filtername', filtername)
+
+    # not a cacheproperty on purpose we shall implement a proper cache later
+    @property
+    def changelog(self):
+        """return a filtered version of the changeset
+
+        this changelog must not be used for writing"""
+        # some cache may be implemented later
+        cl = copy.copy(self._unfilteredrepo.changelog)
+        cl.filteredrevs = filteredrevs(self._unfilteredrepo, self.filtername)
+        return cl
+
+    def unfiltered(self):
+        """Return an unfiltered version of a repo"""
+        return self._unfilteredrepo
+
+    def filtered(self, name):
+        """Return a filtered version of a repository"""
+        if name == self.filtername:
+            return self
+        return self.unfiltered().filtered(name)
+
+    # everything access are forwarded to the proxied repo
+    def __getattr__(self, attr):
+        return getattr(self._unfilteredrepo, attr)
+
+    def __setattr__(self, attr, value):
+        return setattr(self._unfilteredrepo, attr, value)
+
+    def __delattr__(self, attr):
+        return delattr(self._unfilteredrepo, attr)
+
+    # The `requirement` attribut is initialiazed during __init__. But
+    # __getattr__ won't be called as it also exists on the class. We need
+    # explicit forwarding to main repo here
+    @property
+    def requirements(self):
+        return self._unfilteredrepo.requirements
+
--- a/mercurial/revlog.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/revlog.py	Fri Dec 28 14:13:06 2012 +0100
@@ -257,11 +257,14 @@
         return iter(xrange(len(self)))
     def revs(self, start=0, stop=None):
         """iterate over all rev in this revlog (from start to stop)"""
-        if stop is None:
-            stop = len(self)
+        step = 1
+        if stop is not None:
+            if start > stop:
+                step = -1
+            stop += step
         else:
-            stop += 1
-        return xrange(start, stop)
+            stop = len(self)
+        return xrange(start, stop, step)
 
     @util.propertycache
     def nodemap(self):
@@ -338,33 +341,14 @@
         return len(t)
     size = rawsize
 
-    def ancestors(self, revs, stoprev=0):
+    def ancestors(self, revs, stoprev=0, inclusive=False):
         """Generate the ancestors of 'revs' in reverse topological order.
         Does not generate revs lower than stoprev.
 
-        Yield a sequence of revision numbers starting with the parents
-        of each revision in revs, i.e., each revision is *not* considered
-        an ancestor of itself.  Results are in breadth-first order:
-        parents of each rev in revs, then parents of those, etc.  Result
-        does not include the null revision."""
-        visit = util.deque(revs)
-        seen = set([nullrev])
-        while visit:
-            for parent in self.parentrevs(visit.popleft()):
-                if parent < stoprev:
-                    continue
-                if parent not in seen:
-                    visit.append(parent)
-                    seen.add(parent)
-                    yield parent
+        See the documentation for ancestor.lazyancestors for more details."""
 
-    def incancestors(self, revs, stoprev=0):
-        """Identical to ancestors() except it also generates the
-        revisions, 'revs'"""
-        for rev in revs:
-            yield rev
-        for rev in self.ancestors(revs, stoprev):
-            yield rev
+        return ancestor.lazyancestors(self, revs, stoprev=stoprev,
+                                      inclusive=inclusive)
 
     def descendants(self, revs):
         """Generate the descendants of 'revs' in revision order.
@@ -429,6 +413,29 @@
         missing.sort()
         return has, [self.node(r) for r in missing]
 
+    def findmissingrevs(self, common=None, heads=None):
+        """Return the revision numbers of the ancestors of heads that
+        are not ancestors of common.
+
+        More specifically, return a list of revision numbers corresponding to
+        nodes N such that every N satisfies the following constraints:
+
+          1. N is an ancestor of some node in 'heads'
+          2. N is not an ancestor of any node in 'common'
+
+        The list is sorted by revision number, meaning it is
+        topologically sorted.
+
+        'heads' and 'common' are both lists of revision numbers.  If heads is
+        not supplied, uses all of the revlog's heads.  If common is not
+        supplied, uses nullid."""
+        if common is None:
+            common = [nullrev]
+        if heads is None:
+            heads = self.headrevs()
+
+        return ancestor.missingancestors(heads, common, self.parentrevs)
+
     def findmissing(self, common=None, heads=None):
         """Return the ancestors of heads that are not ancestors of common.
 
@@ -444,8 +451,16 @@
         'heads' and 'common' are both lists of node IDs.  If heads is
         not supplied, uses all of the revlog's heads.  If common is not
         supplied, uses nullid."""
-        _common, missing = self.findcommonmissing(common, heads)
-        return missing
+        if common is None:
+            common = [nullid]
+        if heads is None:
+            heads = self.heads()
+
+        common = [self.rev(n) for n in common]
+        heads = [self.rev(n) for n in heads]
+
+        return [self.node(r) for r in
+                ancestor.missingancestors(heads, common, self.parentrevs)]
 
     def nodesbetween(self, roots=None, heads=None):
         """Return a topological path from 'roots' to 'heads'.
--- a/mercurial/revset.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/revset.py	Fri Dec 28 14:13:06 2012 +0100
@@ -442,6 +442,19 @@
     bumped = obsmod.getrevs(repo, 'bumped')
     return [r for r in subset if r in bumped]
 
+def bundle(repo, subset, x):
+    """``bundle()``
+    Changesets in the bundle.
+
+    Bundle must be specified by the -R option."""
+
+    try:
+        bundlenodes = repo.changelog.bundlenodes
+    except AttributeError:
+        raise util.Abort(_("no bundle provided - specify with -R"))
+    revs = set(repo[n].rev() for n in bundlenodes)
+    return [r for r in subset if r in revs]
+
 def checkstatus(repo, subset, pat, field):
     m = None
     s = []
@@ -475,8 +488,13 @@
 
 def _children(repo, narrow, parentset):
     cs = set()
+    if not parentset:
+        return cs
     pr = repo.changelog.parentrevs
+    minrev = min(parentset)
     for r in narrow:
+        if r <= minrev:
+            continue
         for p in pr(r):
             if p in parentset:
                 cs.add(r)
@@ -628,6 +646,15 @@
 
     return [r for r in subset if r in dests]
 
+def divergent(repo, subset, x):
+    """``divergent()``
+    Final successors of changesets with an alternative set of final successors.
+    """
+    # i18n: "divergent" is a keyword
+    getargs(x, 0, 0, _("divergent takes no arguments"))
+    divergent = obsmod.getrevs(repo, 'divergent')
+    return [r for r in subset if r in divergent]
+
 def draft(repo, subset, x):
     """``draft()``
     Changeset in draft phase."""
@@ -1513,6 +1540,7 @@
     "branch": branch,
     "branchpoint": branchpoint,
     "bumped": bumped,
+    "bundle": bundle,
     "children": children,
     "closed": closed,
     "contains": contains,
@@ -1522,6 +1550,7 @@
     "descendants": descendants,
     "_firstdescendants": _firstdescendants,
     "destination": destination,
+    "divergent": divergent,
     "draft": draft,
     "extinct": extinct,
     "extra": extra,
--- a/mercurial/scmutil.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/scmutil.py	Fri Dec 28 14:13:06 2012 +0100
@@ -279,37 +279,38 @@
             mode += "b" # for that other OS
 
         nlink = -1
-        dirname, basename = util.split(f)
-        # If basename is empty, then the path is malformed because it points
-        # to a directory. Let the posixfile() call below raise IOError.
-        if basename and mode not in ('r', 'rb'):
-            if atomictemp:
-                if not os.path.isdir(dirname):
-                    util.makedirs(dirname, self.createmode)
-                return util.atomictempfile(f, mode, self.createmode)
-            try:
-                if 'w' in mode:
-                    util.unlink(f)
+        if mode not in ('r', 'rb'):
+            dirname, basename = util.split(f)
+            # If basename is empty, then the path is malformed because it points
+            # to a directory. Let the posixfile() call below raise IOError.
+            if basename:
+                if atomictemp:
+                    if not os.path.isdir(dirname):
+                        util.makedirs(dirname, self.createmode)
+                    return util.atomictempfile(f, mode, self.createmode)
+                try:
+                    if 'w' in mode:
+                        util.unlink(f)
+                        nlink = 0
+                    else:
+                        # nlinks() may behave differently for files on Windows
+                        # shares if the file is open.
+                        fd = util.posixfile(f)
+                        nlink = util.nlinks(f)
+                        if nlink < 1:
+                            nlink = 2 # force mktempcopy (issue1922)
+                        fd.close()
+                except (OSError, IOError), e:
+                    if e.errno != errno.ENOENT:
+                        raise
                     nlink = 0
-                else:
-                    # nlinks() may behave differently for files on Windows
-                    # shares if the file is open.
-                    fd = util.posixfile(f)
-                    nlink = util.nlinks(f)
-                    if nlink < 1:
-                        nlink = 2 # force mktempcopy (issue1922)
-                    fd.close()
-            except (OSError, IOError), e:
-                if e.errno != errno.ENOENT:
-                    raise
-                nlink = 0
-                if not os.path.isdir(dirname):
-                    util.makedirs(dirname, self.createmode)
-            if nlink > 0:
-                if self._trustnlink is None:
-                    self._trustnlink = nlink > 1 or util.checknlink(f)
-                if nlink > 1 or not self._trustnlink:
-                    util.rename(util.mktempcopy(f), f)
+                    if not os.path.isdir(dirname):
+                        util.makedirs(dirname, self.createmode)
+                if nlink > 0:
+                    if self._trustnlink is None:
+                        self._trustnlink = nlink > 1 or util.checknlink(f)
+                    if nlink > 1 or not self._trustnlink:
+                        util.rename(util.mktempcopy(f), f)
         fp = util.posixfile(f, mode)
         if nlink == 0:
             self._fixfilemode(f)
@@ -633,13 +634,13 @@
                 start, end = spec.split(_revrangesep, 1)
                 start = revfix(repo, start, 0)
                 end = revfix(repo, end, len(repo) - 1)
-                step = start > end and -1 or 1
+                rangeiter = repo.changelog.revs(start, end)
                 if not seen and not l:
                     # by far the most common case: revs = ["-1:0"]
-                    l = range(start, end + step, step)
+                    l = list(rangeiter)
                     # defer syncing seen until next iteration
                     continue
-                newrevs = set(xrange(start, end + step, step))
+                newrevs = set(rangeiter)
                 if seen:
                     newrevs.difference_update(seen)
                     seen.update(newrevs)
--- a/mercurial/statichttprepo.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/statichttprepo.py	Fri Dec 28 14:13:06 2012 +0100
@@ -135,7 +135,6 @@
         self._tags = None
         self.nodetagscache = None
         self._branchcache = None
-        self._branchcachetip = None
         self.encodepats = None
         self.decodepats = None
 
--- a/mercurial/store.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/store.py	Fri Dec 28 14:13:06 2012 +0100
@@ -76,7 +76,7 @@
     cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
     for x in (range(32) + range(126, 256) + winreserved):
         cmap[chr(x)] = "~%02x" % x
-    for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
+    for x in range(ord("A"), ord("Z") + 1) + [ord(e)]:
         cmap[chr(x)] = e + chr(x).lower()
     dmap = {}
     for k, v in cmap.iteritems():
@@ -128,7 +128,7 @@
     cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
     for x in (range(32) + range(126, 256) + winreserved):
         cmap[chr(x)] = "~%02x" % x
-    for x in range(ord("A"), ord("Z")+1):
+    for x in range(ord("A"), ord("Z") + 1):
         cmap[chr(x)] = chr(x).lower()
     return lambda s: "".join([cmap[c] for c in s])
 
--- a/mercurial/subrepo.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/subrepo.py	Fri Dec 28 14:13:06 2012 +0100
@@ -14,6 +14,23 @@
 
 nullstate = ('', '', 'empty')
 
+class SubrepoAbort(error.Abort):
+    """Exception class used to avoid handling a subrepo error more than once"""
+
+def annotatesubrepoerror(func):
+    def decoratedmethod(self, *args, **kargs):
+        try:
+            res = func(self, *args, **kargs)
+        except SubrepoAbort, ex:
+            # This exception has already been handled
+            raise ex
+        except error.Abort, ex:
+            errormsg = _('%s (in subrepo %s)') % (str(ex), subrelpath(self))
+            # avoid handling this exception by raising a SubrepoAbort exception
+            raise SubrepoAbort(errormsg, hint=ex.hint)
+        return res
+    return decoratedmethod
+
 def state(ctx, ui):
     """return a state dict, mapping subrepo paths configured in .hgsub
     to tuple: (source from .hgsub, revision from .hgsubstate, kind
@@ -244,8 +261,7 @@
         if repo.ui.config('paths', 'default'):
             return repo.ui.config('paths', 'default')
     if abort:
-        raise util.Abort(_("default path for subrepository %s not found") %
-            reporelpath(repo))
+        raise util.Abort(_("default path for subrepository not found"))
 
 def itersubrepos(ctx1, ctx2):
     """find subrepos in ctx1 or ctx2"""
@@ -402,6 +418,7 @@
                 self._repo.ui.setconfig(s, k, v)
         self._initrepo(r, state[0], create)
 
+    @annotatesubrepoerror
     def _initrepo(self, parentrepo, source, create):
         self._repo._subparent = parentrepo
         self._repo._subsource = source
@@ -422,10 +439,12 @@
                 addpathconfig('default-push', defpushpath)
             fp.close()
 
+    @annotatesubrepoerror
     def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
         return cmdutil.add(ui, self._repo, match, dryrun, listsubrepos,
                            os.path.join(prefix, self._path), explicitonly)
 
+    @annotatesubrepoerror
     def status(self, rev2, **opts):
         try:
             rev1 = self._state[1]
@@ -437,6 +456,7 @@
                                % (inst, subrelpath(self)))
             return [], [], [], [], [], [], []
 
+    @annotatesubrepoerror
     def diff(self, ui, diffopts, node2, match, prefix, **opts):
         try:
             node1 = node.bin(self._state[1])
@@ -446,12 +466,13 @@
                 node2 = node.bin(node2)
             cmdutil.diffordiffstat(ui, self._repo, diffopts,
                                    node1, node2, match,
-                                   prefix=os.path.join(prefix, self._path),
+                                   prefix=posixpath.join(prefix, self._path),
                                    listsubrepos=True, **opts)
         except error.RepoLookupError, inst:
             self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
                                % (inst, subrelpath(self)))
 
+    @annotatesubrepoerror
     def archive(self, ui, archiver, prefix, match=None):
         self._get(self._state + ('hg',))
         abstractsubrepo.archive(self, ui, archiver, prefix, match)
@@ -463,6 +484,7 @@
             submatch = matchmod.narrowmatcher(subpath, match)
             s.archive(ui, archiver, os.path.join(prefix, self._path), submatch)
 
+    @annotatesubrepoerror
     def dirty(self, ignoreupdate=False):
         r = self._state[1]
         if r == '' and not ignoreupdate: # no state recorded
@@ -479,6 +501,7 @@
     def checknested(self, path):
         return self._repo._checknested(self._repo.wjoin(path))
 
+    @annotatesubrepoerror
     def commit(self, text, user, date):
         # don't bother committing in the subrepo if it's only been
         # updated
@@ -490,6 +513,7 @@
             return self._repo['.'].hex() # different version checked out
         return node.hex(n)
 
+    @annotatesubrepoerror
     def remove(self):
         # we can't fully delete the repository as it may contain
         # local-only history
@@ -519,12 +543,14 @@
                 bookmarks.updatefromremote(self._repo.ui, self._repo, other,
                                            srcurl)
 
+    @annotatesubrepoerror
     def get(self, state, overwrite=False):
         self._get(state)
         source, revision, kind = state
         self._repo.ui.debug("getting subrepo %s\n" % self._path)
         hg.updaterepo(self._repo, revision, overwrite)
 
+    @annotatesubrepoerror
     def merge(self, state):
         self._get(state)
         cur = self._repo['.']
@@ -551,6 +577,7 @@
         else:
             mergefunc()
 
+    @annotatesubrepoerror
     def push(self, opts):
         force = opts.get('force')
         newbranch = opts.get('new_branch')
@@ -569,12 +596,15 @@
         other = hg.peer(self._repo, {'ssh': ssh}, dsturl)
         return self._repo.push(other, force, newbranch=newbranch)
 
+    @annotatesubrepoerror
     def outgoing(self, ui, dest, opts):
         return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
 
+    @annotatesubrepoerror
     def incoming(self, ui, source, opts):
         return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
 
+    @annotatesubrepoerror
     def files(self):
         rev = self._state[1]
         ctx = self._repo[rev]
@@ -593,10 +623,12 @@
         ctx = self._repo[None]
         return ctx.walk(match)
 
+    @annotatesubrepoerror
     def forget(self, ui, match, prefix):
         return cmdutil.forget(ui, self._repo, match,
                               os.path.join(prefix, self._path), True)
 
+    @annotatesubrepoerror
     def revert(self, ui, substate, *pats, **opts):
         # reverting a subrepo is a 2 step process:
         # 1. if the no_backup is not set, revert all modified
@@ -751,6 +783,7 @@
                 pass
         return rev
 
+    @annotatesubrepoerror
     def commit(self, text, user, date):
         # user and date are out of our hands since svn is centralized
         changed, extchanged, missing = self._wcchanged()
@@ -778,6 +811,7 @@
         self._ui.status(self._svncommand(['update', '-r', newrev])[0])
         return newrev
 
+    @annotatesubrepoerror
     def remove(self):
         if self.dirty():
             self._ui.warn(_('not removing repo %s because '
@@ -802,6 +836,7 @@
         except OSError:
             pass
 
+    @annotatesubrepoerror
     def get(self, state, overwrite=False):
         if overwrite:
             self._svncommand(['revert', '--recursive'])
@@ -822,6 +857,7 @@
             raise util.Abort((status or err).splitlines()[-1])
         self._ui.status(status)
 
+    @annotatesubrepoerror
     def merge(self, state):
         old = self._state[1]
         new = state[1]
@@ -835,6 +871,7 @@
         # push is a no-op for SVN
         return True
 
+    @annotatesubrepoerror
     def files(self):
         output = self._svncommand(['list', '--recursive', '--xml'])[0]
         doc = xml.dom.minidom.parseString(output)
@@ -1021,6 +1058,7 @@
             raise util.Abort(_("revision %s does not exist in subrepo %s\n") %
                                (revision, self._relpath))
 
+    @annotatesubrepoerror
     def dirty(self, ignoreupdate=False):
         if self._gitmissing():
             return self._state[1] != ''
@@ -1037,6 +1075,7 @@
     def basestate(self):
         return self._gitstate()
 
+    @annotatesubrepoerror
     def get(self, state, overwrite=False):
         source, revision, kind = state
         if not revision:
@@ -1120,6 +1159,7 @@
             # a real merge would be required, just checkout the revision
             rawcheckout()
 
+    @annotatesubrepoerror
     def commit(self, text, user, date):
         if self._gitmissing():
             raise util.Abort(_("subrepo %s is missing") % self._relpath)
@@ -1137,6 +1177,7 @@
         # circumstances
         return self._gitstate()
 
+    @annotatesubrepoerror
     def merge(self, state):
         source, revision, kind = state
         self._fetch(source, revision)
@@ -1159,6 +1200,7 @@
         else:
             mergefunc()
 
+    @annotatesubrepoerror
     def push(self, opts):
         force = opts.get('force')
 
@@ -1198,6 +1240,7 @@
                           (self._relpath, self._state[1]))
             return False
 
+    @annotatesubrepoerror
     def remove(self):
         if self._gitmissing():
             return
@@ -1247,6 +1290,7 @@
         ui.progress(_('archiving (%s)') % relpath, None)
 
 
+    @annotatesubrepoerror
     def status(self, rev2, **opts):
         rev1 = self._state[1]
         if self._gitmissing() or not rev1:
--- a/mercurial/templater.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/templater.py	Fri Dec 28 14:13:06 2012 +0100
@@ -8,6 +8,7 @@
 from i18n import _
 import sys, os, re
 import util, config, templatefilters, parser, error
+import types
 
 # template parsing
 
@@ -140,6 +141,10 @@
         v = context._defaults.get(key, '')
     if util.safehasattr(v, '__call__'):
         return v(**mapping)
+    if isinstance(v, types.GeneratorType):
+        v = list(v)
+        mapping[key] = v
+        return v
     return v
 
 def buildfilter(exp, context):
@@ -179,6 +184,7 @@
     for i in d:
         if isinstance(i, dict):
             lm.update(i)
+            lm['originalnode'] = mapping.get('node')
             yield runtemplate(context, lm, ctmpl)
         else:
             # v is not an iterable of dicts, this happen when 'key'
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/templates/atom/branchentry.tmpl	Fri Dec 28 14:13:06 2012 +0100
@@ -0,0 +1,8 @@
+ <entry>
+  <title>{branch|escape}</title>
+  <link rel="alternate" href="{urlbase}{url}rev/{node|short}"/>
+  <id>{urlbase}{url}#branch-{node}</id>
+  <updated>{date|rfc3339date}</updated>
+  <published>{date|rfc3339date}</published>
+  <content type="text"><![CDATA[{branch|strip|escape|addbreaks}]]></content>
+ </entry>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/templates/atom/branches.tmpl	Fri Dec 28 14:13:06 2012 +0100
@@ -0,0 +1,11 @@
+{header}
+ <id>{urlbase}{url}</id>
+ <link rel="self" href="{urlbase}{url}atom-tags"/>
+ <link rel="alternate" href="{urlbase}{url}tags"/>
+ <title>{repo|escape}: branches</title>
+ <summary>{repo|escape} branch history</summary>
+ <author><name>Mercurial SCM</name></author>
+ {latestentry%feedupdated}
+
+ {entries%branchentry}
+</feed>
--- a/mercurial/templates/atom/map	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/templates/atom/map	Fri Dec 28 14:13:06 2012 +0100
@@ -10,4 +10,6 @@
 tagentry = tagentry.tmpl
 bookmarks = bookmarks.tmpl
 bookmarkentry = bookmarkentry.tmpl
+branches = branches.tmpl
+branchentry = branchentry.tmpl
 error = error.tmpl
--- a/mercurial/templates/gitweb/branches.tmpl	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/templates/gitweb/branches.tmpl	Fri Dec 28 14:13:06 2012 +0100
@@ -1,9 +1,9 @@
 {header}
 <title>{repo|escape}: Branches</title>
 <link rel="alternate" type="application/atom+xml"
-   href="{url}atom-tags" title="Atom feed for {repo|escape}"/>
+   href="{url}atom-branches" title="Atom feed for {repo|escape}"/>
 <link rel="alternate" type="application/rss+xml"
-   href="{url}rss-tags" title="RSS feed for {repo|escape}"/>
+   href="{url}rss-branches" title="RSS feed for {repo|escape}"/>
 </head>
 <body>
 
--- a/mercurial/templates/gitweb/map	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/templates/gitweb/map	Fri Dec 28 14:13:06 2012 +0100
@@ -294,7 +294,12 @@
     <td>{contact|obfuscate}</td>
     <td class="age">{lastchange|rfc822date}</td>
     <td class="indexlinks">{archives%indexarchiveentry}</td>
-    <td><div class="rss_logo"><a href="{url}rss-log">RSS</a> <a href="{url}atom-log">Atom</a></div></td>
+    <td>{if(isdirectory, '',
+            '<div class="rss_logo">
+                <a href="{url}rss-log">RSS</a> <a href="{url}atom-log">Atom</a>
+            </div>'
+            )}
+    </td>
   </tr>\n'
 indexarchiveentry = ' <a href="{url}archive/{node|short}{extension}">{type|escape}</a> '
 index = index.tmpl
--- a/mercurial/templates/monoblue/branches.tmpl	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/templates/monoblue/branches.tmpl	Fri Dec 28 14:13:06 2012 +0100
@@ -1,7 +1,7 @@
 {header}
     <title>{repo|escape}: Branches</title>
-    <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
-    <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+    <link rel="alternate" type="application/atom+xml" href="{url}atom-branches" title="Atom feed for {repo|escape}"/>
+    <link rel="alternate" type="application/rss+xml" href="{url}rss-branches" title="RSS feed for {repo|escape}"/>
 </head>
 
 <body>
--- a/mercurial/templates/monoblue/map	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/templates/monoblue/map	Fri Dec 28 14:13:06 2012 +0100
@@ -247,10 +247,11 @@
     <td class="age">{lastchange|rfc822date}</td>
     <td class="indexlinks">{archives%indexarchiveentry}</td>
     <td>
-      <div class="rss_logo">
-        <a href="{url}rss-log">RSS</a>
-        <a href="{url}atom-log">Atom</a>
-      </div>
+        {if(isdirectory, '',
+            '<div class="rss_logo">
+                <a href="{url}rss-log">RSS</a> <a href="{url}atom-log">Atom</a>
+            </div>'
+            )}
     </td>
   </tr>\n'
 indexarchiveentry = '<a href="{url}archive/{node|short}{extension}">{type|escape}</a> '
--- a/mercurial/templates/paper/branches.tmpl	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/templates/paper/branches.tmpl	Fri Dec 28 14:13:06 2012 +0100
@@ -1,9 +1,9 @@
 {header}
 <title>{repo|escape}: branches</title>
 <link rel="alternate" type="application/atom+xml"
-   href="{url}atom-tags" title="Atom feed for {repo|escape}: branches" />
+   href="{url}atom-branches" title="Atom feed for {repo|escape}: branches" />
 <link rel="alternate" type="application/rss+xml"
-   href="{url}rss-tags" title="RSS feed for {repo|escape}: branches" />
+   href="{url}rss-branches" title="RSS feed for {repo|escape}: branches" />
 </head>
 <body>
 
--- a/mercurial/templates/paper/changeset.tmpl	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/templates/paper/changeset.tmpl	Fri Dec 28 14:13:06 2012 +0100
@@ -74,6 +74,14 @@
     </div>
   </td>
 </tr>
+<tr>
+ <th class="author">change baseline</th>
+ <td class="author">{parent%changesetbaseline}</td>
+</tr>
+<tr>
+ <th class="author">current baseline</th>
+ <td class="author"><a href="{url}rev/{currentbaseline|short}{sessionvars%urlparameter}">{currentbaseline|short}</a></td>
+</tr>
 </table>
 
 <div class="overflow">
--- a/mercurial/templates/paper/map	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/templates/paper/map	Fri Dec 28 14:13:06 2012 +0100
@@ -101,6 +101,8 @@
 
 changesetparent = '<a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a> '
 
+changesetbaseline = '<a href="{url}rev/{node|short}:{originalnode|short}{sessionvars%urlparameter}">{node|short}</a> '
+
 filerevparent = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{rename%filerename}{node|short}</a> '
 filerevchild = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a> '
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/templates/rss/branchentry.tmpl	Fri Dec 28 14:13:06 2012 +0100
@@ -0,0 +1,6 @@
+<item>
+    <title>{branch|escape}</title>
+    <link>{urlbase}{url}rev/{node|short}</link>
+    <description><![CDATA[{branch|strip|escape|addbreaks}]]></description>
+    <pubDate>{date|rfc822date}</pubDate>
+</item>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/templates/rss/branches.tmpl	Fri Dec 28 14:13:06 2012 +0100
@@ -0,0 +1,6 @@
+{header}
+    <title>{repo|escape}: branches</title>
+    <description>{repo|escape} branch history</description>
+    {entries%branchentry}
+  </channel>
+</rss>
--- a/mercurial/templates/rss/map	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/templates/rss/map	Fri Dec 28 14:13:06 2012 +0100
@@ -9,4 +9,6 @@
 tagentry = tagentry.tmpl
 bookmarks = bookmarks.tmpl
 bookmarkentry = bookmarkentry.tmpl
+branches = branches.tmpl
+branchentry = branchentry.tmpl
 error = error.tmpl
--- a/mercurial/ui.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/ui.py	Fri Dec 28 14:13:06 2012 +0100
@@ -613,7 +613,7 @@
         ('&None', 'E&xec', 'Sym&link') Responses are case insensitive.
         If ui is not interactive, the default is returned.
         """
-        resps = [s[s.index('&')+1].lower() for s in choices]
+        resps = [s[s.index('&') + 1].lower() for s in choices]
         while True:
             r = self.prompt(msg, resps[default])
             if r.lower() in resps:
--- a/mercurial/util.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/util.py	Fri Dec 28 14:13:06 2012 +0100
@@ -64,7 +64,7 @@
 spawndetached = platform.spawndetached
 split = platform.split
 sshargs = platform.sshargs
-statfiles = platform.statfiles
+statfiles = getattr(osutil, 'statfiles', platform.statfiles)
 termwidth = platform.termwidth
 testpid = platform.testpid
 umask = platform.umask
@@ -244,9 +244,12 @@
         self.name = func.__name__
     def __get__(self, obj, type=None):
         result = self.func(obj)
-        setattr(obj, self.name, result)
+        self.cachevalue(obj, result)
         return result
 
+    def cachevalue(self, obj, value):
+        setattr(obj, self.name, value)
+
 def pipefilter(s, cmd):
     '''filter string S through command CMD, returning its output'''
     p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
--- a/mercurial/verify.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/verify.py	Fri Dec 28 14:13:06 2012 +0100
@@ -25,6 +25,7 @@
     return f
 
 def _verify(repo):
+    repo = repo.unfiltered()
     mflinkrevs = {}
     filelinkrevs = {}
     filenodes = {}
--- a/mercurial/windows.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/mercurial/windows.py	Fri Dec 28 14:13:06 2012 +0100
@@ -7,7 +7,7 @@
 
 from i18n import _
 import osutil, encoding
-import errno, msvcrt, os, re, sys, _winreg
+import errno, msvcrt, os, re, stat, sys, _winreg
 
 import win32
 executablepath = win32.executablepath
@@ -213,10 +213,15 @@
             return executable
     return findexisting(os.path.expanduser(os.path.expandvars(command)))
 
+_wantedkinds = set([stat.S_IFREG, stat.S_IFLNK])
+
 def statfiles(files):
-    '''Stat each file in files and yield stat or None if file does not exist.
+    '''Stat each file in files. Yield each stat, or None if a file
+    does not exist or has a type we don't care about.
+
     Cluster and cache stat per directory to minimize number of OS stat calls.'''
     dircache = {} # dirname -> filename -> status | None if file does not exist
+    getkind = stat.S_IFMT
     for nf in files:
         nf  = normcase(nf)
         dir, base = os.path.split(nf)
@@ -226,7 +231,8 @@
         if cache is None:
             try:
                 dmap = dict([(normcase(n), s)
-                    for n, k, s in osutil.listdir(dir, True)])
+                             for n, k, s in osutil.listdir(dir, True)
+                             if getkind(s.st_mode) in _wantedkinds])
             except OSError, err:
                 # handle directory not found in Python version prior to 2.5
                 # Python <= 2.4 returns native Windows code 3 in errno
@@ -269,9 +275,13 @@
             break
         head, tail = os.path.split(head)
 
-def unlinkpath(f):
+def unlinkpath(f, ignoremissing=False):
     """unlink and remove the directory if it is empty"""
-    unlink(f)
+    try:
+        unlink(f)
+    except OSError, e:
+        if not (ignoremissing and e.errno == errno.ENOENT):
+            raise
     # try removing directories that might now be empty
     try:
         _removedirs(os.path.dirname(f))
--- a/setup.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/setup.py	Fri Dec 28 14:13:06 2012 +0100
@@ -151,6 +151,8 @@
            if not e.startswith(b('Not trusting file')) \
               and not e.startswith(b('warning: Not importing'))]
     if err:
+        print >> sys.stderr, "stderr from '%s':" % (' '.join(cmd))
+        print >> sys.stderr, '\n'.join(['  ' + e for e in err])
         return ''
     return out
 
--- a/tests/autodiff.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/autodiff.py	Fri Dec 28 14:13:06 2012 +0100
@@ -35,7 +35,7 @@
     for chunk in it:
         ui.write(chunk)
     for fn in sorted(brokenfiles):
-        ui.write('data lost for: %s\n' % fn)
+        ui.write(('data lost for: %s\n' % fn))
 
 cmdtable = {
     "autodiff":
--- a/tests/run-tests.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/run-tests.py	Fri Dec 28 14:13:06 2012 +0100
@@ -55,6 +55,8 @@
 import re
 import threading
 import killdaemons as killmod
+import cPickle as pickle
+import Queue as queue
 
 processlock = threading.Lock()
 
@@ -93,7 +95,8 @@
 if 'java' in sys.platform:
     IMPL_PATH = 'JYTHONPATH'
 
-requiredtools = ["python", "diff", "grep", "unzip", "gunzip", "bunzip2", "sed"]
+requiredtools = [os.path.basename(sys.executable), "diff", "grep", "unzip",
+                 "gunzip", "bunzip2", "sed"]
 
 defaults = {
     'jobs': ('HGTEST_JOBS', 1),
@@ -162,6 +165,8 @@
     parser.add_option("-p", "--port", type="int",
         help="port on which servers should listen"
              " (default: $%s or %d)" % defaults['port'])
+    parser.add_option("--compiler", type="string",
+        help="compiler to build with")
     parser.add_option("--pure", action="store_true",
         help="use pure Python code instead of C extensions")
     parser.add_option("-R", "--restart", action="store_true",
@@ -175,6 +180,8 @@
     parser.add_option("-t", "--timeout", type="int",
         help="kill errant tests after TIMEOUT seconds"
              " (default: $%s or %d)" % defaults['timeout'])
+    parser.add_option("--time", action="store_true",
+        help="time how long each test takes")
     parser.add_option("--tmpdir", type="string",
         help="run tests in the given temporary directory"
              " (implies --keep-tmpdir)")
@@ -263,6 +270,10 @@
             sys.stderr.write(
                 'warning: --timeout option ignored with --debug\n')
         options.timeout = 0
+        if options.time:
+            sys.stderr.write(
+                'warning: --time option ignored with --debug\n')
+        options.time = False
     if options.py3k_warnings:
         if sys.version_info[:2] < (2, 6) or sys.version_info[:2] >= (3, 0):
             parser.error('--py3k-warnings can only be used on Python 2.6+')
@@ -317,7 +328,7 @@
     # Before we go any further, check for pre-requisite tools
     # stuff from coreutils (cat, rm, etc) are not tested
     for p in requiredtools:
-        if os.name == 'nt':
+        if os.name == 'nt' and not p.endswith('.exe'):
             p += '.exe'
         found = findprogram(p)
         if found:
@@ -352,18 +363,33 @@
             return
     else:
         exename = 'python'
-    vlog('# Making python executable in test path use correct Python')
-    mypython = os.path.join(BINDIR, exename)
-    try:
-        os.symlink(sys.executable, mypython)
-    except AttributeError:
-        # windows fallback
-        shutil.copyfile(sys.executable, mypython)
-        shutil.copymode(sys.executable, mypython)
+        if sys.platform == 'win32':
+            exename = 'python.exe'
+    if getattr(os, 'symlink', None):
+        vlog("# Making python executable in test path a symlink to '%s'" %
+             sys.executable)
+        mypython = os.path.join(BINDIR, exename)
+        try:
+            os.symlink(sys.executable, mypython)
+        except OSError, err:
+            # child processes may race, which is harmless
+            if err.errno != errno.EEXIST:
+                raise
+    else:
+        vlog("# Modifying search path to find %s in '%s'" % (exename, exedir))
+        path = os.environ['PATH'].split(os.pathsep)
+        while exedir in path:
+            path.remove(exedir)
+        os.environ['PATH'] = os.pathsep.join([exedir] + path)
+        if not findprogram(exename):
+            print "WARNING: Cannot find %s in search path" % exename
 
 def installhg(options):
     vlog("# Performing temporary installation of HG")
     installerrs = os.path.join("tests", "install.err")
+    compiler = ''
+    if options.compiler:
+        compiler = '--compiler ' + options.compiler
     pure = options.pure and "--pure" or ""
 
     # Run installer in hg root
@@ -377,12 +403,14 @@
         # least on Windows for now, deal with .pydistutils.cfg bugs
         # when they happen.
         nohome = ''
-    cmd = ('%s setup.py %s clean --all'
-           ' build --build-base="%s"'
-           ' install --force --prefix="%s" --install-lib="%s"'
-           ' --install-scripts="%s" %s >%s 2>&1'
-           % (sys.executable, pure, os.path.join(HGTMP, "build"),
-              INST, PYTHONDIR, BINDIR, nohome, installerrs))
+    cmd = ('%(exe)s setup.py %(pure)s clean --all'
+           ' build %(compiler)s --build-base="%(base)s"'
+           ' install --force --prefix="%(prefix)s" --install-lib="%(libdir)s"'
+           ' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
+           % dict(exe=sys.executable, pure=pure, compiler=compiler,
+                  base=os.path.join(HGTMP, "build"),
+                  prefix=INST, libdir=PYTHONDIR, bindir=BINDIR,
+                  nohome=nohome, logfile=installerrs))
     vlog("# Running", cmd)
     if os.system(cmd) == 0:
         if not options.verbose:
@@ -447,6 +475,14 @@
         fn = os.path.join(INST, '..', '.coverage')
         os.environ['COVERAGE_FILE'] = fn
 
+def outputtimes(options):
+    vlog('# Producing time report')
+    times.sort(key=lambda t: (t[1], t[0]), reverse=True)
+    cols = '%7.3f   %s'
+    print '\n%-7s   %s' % ('Time', 'Test')
+    for test, timetaken in times:
+        print cols % (timetaken, test)
+
 def outputcoverage(options):
 
     vlog('# Producing coverage report')
@@ -891,9 +927,16 @@
         replacements.append((re.escape(testtmp), '$TESTTMP'))
 
     os.mkdir(testtmp)
+    if options.time:
+        starttime = time.time()
     ret, out = runner(testpath, testtmp, options, replacements)
+    if options.time:
+        endtime = time.time()
+        times.append((test, endtime - starttime))
     vlog("# Ret was:", ret)
 
+    killdaemons()
+
     mark = '.'
 
     skipped = (ret == SKIPPED_STATUS)
@@ -964,8 +1007,6 @@
         sys.stdout.flush()
         iolock.release()
 
-    killdaemons()
-
     if not options.keep_tmpdir:
         shutil.rmtree(testtmp, True)
     if skipped:
@@ -1003,6 +1044,8 @@
     if INST:
         installhg(options)
         _checkhglib("Testing")
+    else:
+        usecorrectpython()
 
     optcopy = dict(options.__dict__)
     optcopy['jobs'] = 1
@@ -1045,7 +1088,13 @@
                 blacklisted.append(test)
             else:
                 job.append(test)
-    fps = {}
+
+    waitq = queue.Queue()
+
+    # windows lacks os.wait, so we must emulate it
+    def waitfor(proc, rfd):
+        fp = os.fdopen(rfd, 'rb')
+        return lambda: waitq.put((proc.pid, proc.wait(), fp))
 
     for j, job in enumerate(jobs):
         if not job:
@@ -1056,29 +1105,32 @@
         childopts += ['--tmpdir', childtmp]
         cmdline = [PYTHON, sys.argv[0]] + opts + childopts + job
         vlog(' '.join(cmdline))
-        fps[os.spawnvp(os.P_NOWAIT, cmdline[0], cmdline)] = os.fdopen(rfd, 'r')
+        proc = subprocess.Popen(cmdline, executable=cmdline[0])
+        threading.Thread(target=waitfor(proc, rfd)).start()
         os.close(wfd)
     signal.signal(signal.SIGINT, signal.SIG_IGN)
     failures = 0
-    tested, skipped, failed = 0, 0, 0
+    passed, skipped, failed = 0, 0, 0
     skips = []
     fails = []
-    while fps:
-        pid, status = os.wait()
-        fp = fps.pop(pid)
-        l = fp.read().splitlines()
+    for job in jobs:
+        if not job:
+            continue
+        pid, status, fp = waitq.get()
         try:
-            test, skip, fail = map(int, l[:3])
-        except ValueError:
-            test, skip, fail = 0, 0, 0
-        split = -fail or len(l)
-        for s in l[3:split]:
-            skips.append(s.split(" ", 1))
-        for s in l[split:]:
-            fails.append(s.split(" ", 1))
-        tested += test
-        skipped += skip
-        failed += fail
+            childresults = pickle.load(fp)
+        except pickle.UnpicklingError:
+            pass
+        else:
+            passed += len(childresults['p'])
+            skipped += len(childresults['s'])
+            failed += len(childresults['f'])
+            skips.extend(childresults['s'])
+            fails.extend(childresults['f'])
+        if options.time:
+            childtimes = pickle.load(fp)
+            times.extend(childtimes)
+
         vlog('pid %d exited, status %d' % (pid, status))
         failures |= status
     print
@@ -1093,17 +1145,20 @@
 
     _checkhglib("Tested")
     print "# Ran %d tests, %d skipped, %d failed." % (
-        tested, skipped, failed)
+        passed + failed, skipped, failed)
 
+    if options.time:
+        outputtimes(options)
     if options.anycoverage:
         outputcoverage(options)
     sys.exit(failures != 0)
 
 results = dict(p=[], f=[], s=[], i=[])
 resultslock = threading.Lock()
+times = []
 iolock = threading.Lock()
 
-def runqueue(options, tests, results):
+def runqueue(options, tests):
     for test in tests:
         ret = runone(options, test)
         if options.first and ret is not None and not ret:
@@ -1118,6 +1173,8 @@
         if INST:
             installhg(options)
             _checkhglib("Testing")
+        else:
+            usecorrectpython()
 
         if options.restart:
             orig = list(tests)
@@ -1129,7 +1186,7 @@
                 print "running all tests"
                 tests = orig
 
-        runqueue(options, tests, results)
+        runqueue(options, tests)
 
         failed = len(results['f'])
         tested = len(results['p']) + failed
@@ -1137,12 +1194,10 @@
         ignored = len(results['i'])
 
         if options.child:
-            fp = os.fdopen(options.child, 'w')
-            fp.write('%d\n%d\n%d\n' % (tested, skipped, failed))
-            for s in results['s']:
-                fp.write("%s %s\n" % s)
-            for s in results['f']:
-                fp.write("%s %s\n" % s)
+            fp = os.fdopen(options.child, 'wb')
+            pickle.dump(results, fp, pickle.HIGHEST_PROTOCOL)
+            if options.time:
+                pickle.dump(times, fp, pickle.HIGHEST_PROTOCOL)
             fp.close()
         else:
             print
@@ -1153,6 +1208,8 @@
             _checkhglib("Tested")
             print "# Ran %d tests, %d skipped, %d failed." % (
                 tested, skipped + ignored, failed)
+            if options.time:
+                outputtimes(options)
 
         if options.anycoverage:
             outputcoverage(options)
@@ -1170,9 +1227,9 @@
 
         checktools()
 
-    if len(args) == 0:
-        args = os.listdir(".")
-    args.sort()
+        if len(args) == 0:
+            args = os.listdir(".")
+        args.sort()
 
     tests = args
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-ancestor.py	Fri Dec 28 14:13:06 2012 +0100
@@ -0,0 +1,106 @@
+from mercurial import ancestor
+
+# graph is a dict of child->parent adjacency lists for this graph:
+# o  13
+# |
+# | o  12
+# | |
+# | | o    11
+# | | |\
+# | | | | o  10
+# | | | | |
+# | o---+ |  9
+# | | | | |
+# o | | | |  8
+#  / / / /
+# | | o |  7
+# | | | |
+# o---+ |  6
+#  / / /
+# | | o  5
+# | |/
+# | o  4
+# | |
+# o |  3
+# | |
+# | o  2
+# |/
+# o  1
+# |
+# o  0
+
+graph = {0: [-1], 1: [0], 2: [1], 3: [1], 4: [2], 5: [4], 6: [4],
+         7: [4], 8: [-1], 9: [6, 7], 10: [5], 11: [3, 7], 12: [9],
+         13: [8]}
+pfunc = graph.get
+
+class mockchangelog(object):
+    parentrevs = graph.get
+
+def runmissingancestors(revs, bases):
+    print "%% ancestors of %s and not of %s" % (revs, bases)
+    print ancestor.missingancestors(revs, bases, pfunc)
+
+def test_missingancestors():
+    # Empty revs
+    runmissingancestors([], [1])
+    runmissingancestors([], [])
+
+    # If bases is empty, it's the same as if it were [nullrev]
+    runmissingancestors([12], [])
+
+    # Trivial case: revs == bases
+    runmissingancestors([0], [0])
+    runmissingancestors([4, 5, 6], [6, 5, 4])
+
+    # With nullrev
+    runmissingancestors([-1], [12])
+    runmissingancestors([12], [-1])
+
+    # 9 is a parent of 12. 7 is a parent of 9, so an ancestor of 12. 6 is an
+    # ancestor of 12 but not of 7.
+    runmissingancestors([12], [9])
+    runmissingancestors([9], [12])
+    runmissingancestors([12, 9], [7])
+    runmissingancestors([7, 6], [12])
+
+    # More complex cases
+    runmissingancestors([10], [11, 12])
+    runmissingancestors([11], [10])
+    runmissingancestors([11], [10, 12])
+    runmissingancestors([12], [10])
+    runmissingancestors([12], [11])
+    runmissingancestors([10, 11, 12], [13])
+    runmissingancestors([13], [10, 11, 12])
+
+def genlazyancestors(revs, stoprev=0, inclusive=False):
+    print ("%% lazy ancestor set for %s, stoprev = %s, inclusive = %s" %
+           (revs, stoprev, inclusive))
+    return ancestor.lazyancestors(mockchangelog, revs, stoprev=stoprev,
+                                  inclusive=inclusive)
+
+def printlazyancestors(s, l):
+    print [n for n in l if n in s]
+
+def test_lazyancestors():
+    # Empty revs
+    s = genlazyancestors([])
+    printlazyancestors(s, [3, 0, -1])
+
+    # Standard example
+    s = genlazyancestors([11, 13])
+    printlazyancestors(s, [11, 13, 7, 9, 8, 3, 6, 4, 1, -1, 0])
+
+    # Including revs
+    s = genlazyancestors([11, 13], inclusive=True)
+    printlazyancestors(s, [11, 13, 7, 9, 8, 3, 6, 4, 1, -1, 0])
+
+    # Test with stoprev
+    s = genlazyancestors([11, 13], stoprev=6)
+    printlazyancestors(s, [11, 13, 7, 9, 8, 3, 6, 4, 1, -1, 0])
+    s = genlazyancestors([11, 13], stoprev=6, inclusive=True)
+    printlazyancestors(s, [11, 13, 7, 9, 8, 3, 6, 4, 1, -1, 0])
+
+if __name__ == '__main__':
+    test_missingancestors()
+    test_lazyancestors()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-ancestor.py.out	Fri Dec 28 14:13:06 2012 +0100
@@ -0,0 +1,46 @@
+% ancestors of [] and not of [1]
+[]
+% ancestors of [] and not of []
+[]
+% ancestors of [12] and not of []
+[0, 1, 2, 4, 6, 7, 9, 12]
+% ancestors of [0] and not of [0]
+[]
+% ancestors of [4, 5, 6] and not of [6, 5, 4]
+[]
+% ancestors of [-1] and not of [12]
+[]
+% ancestors of [12] and not of [-1]
+[0, 1, 2, 4, 6, 7, 9, 12]
+% ancestors of [12] and not of [9]
+[12]
+% ancestors of [9] and not of [12]
+[]
+% ancestors of [12, 9] and not of [7]
+[6, 9, 12]
+% ancestors of [7, 6] and not of [12]
+[]
+% ancestors of [10] and not of [11, 12]
+[5, 10]
+% ancestors of [11] and not of [10]
+[3, 7, 11]
+% ancestors of [11] and not of [10, 12]
+[3, 11]
+% ancestors of [12] and not of [10]
+[6, 7, 9, 12]
+% ancestors of [12] and not of [11]
+[6, 9, 12]
+% ancestors of [10, 11, 12] and not of [13]
+[0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12]
+% ancestors of [13] and not of [10, 11, 12]
+[8, 13]
+% lazy ancestor set for [], stoprev = 0, inclusive = False
+[]
+% lazy ancestor set for [11, 13], stoprev = 0, inclusive = False
+[7, 8, 3, 4, 1, 0]
+% lazy ancestor set for [11, 13], stoprev = 0, inclusive = True
+[11, 13, 7, 8, 3, 4, 1, 0]
+% lazy ancestor set for [11, 13], stoprev = 6, inclusive = False
+[7, 8]
+% lazy ancestor set for [11, 13], stoprev = 6, inclusive = True
+[11, 13, 7, 8]
--- a/tests/test-bookmarks.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-bookmarks.t	Fri Dec 28 14:13:06 2012 +0100
@@ -40,9 +40,9 @@
   summary:     0
   
 
-second bookmark for rev 0
+second bookmark for rev 0, command should work even with ui.strict on
 
-  $ hg bookmark X2
+  $ hg --config ui.strict=1 bookmark X2
 
 bookmark rev -1 again
 
--- a/tests/test-bundle.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-bundle.t	Fri Dec 28 14:13:06 2012 +0100
@@ -444,6 +444,33 @@
   added 1 changesets with 1 changes to 1 files
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
+View full contents of the bundle
+  $ hg -R test bundle --base null -r 3  ../partial.hg
+  4 changesets found
+  $ cd test
+  $ hg -R ../../partial.hg log -r "bundle()"
+  changeset:   0:f9ee2f85a263
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     0.0
+  
+  changeset:   1:34c2bf6b0626
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     0.1
+  
+  changeset:   2:e38ba6f5b7e0
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     0.2
+  
+  changeset:   3:eebf5a27f8ca
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     0.3
+  
+  $ cd ..
+
 test for 540d1059c802
 
 test for 540d1059c802
--- a/tests/test-check-code-hg.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-check-code-hg.t	Fri Dec 28 14:13:06 2012 +0100
@@ -5,163 +5,7 @@
   >     echo "skipped: not a Mercurial working dir" >&2
   >     exit 80
   > fi
-  $ hg manifest | xargs "$check_code" || echo 'FAILURE IS NOT AN OPTION!!!'
 
-  $ hg manifest | xargs "$check_code" --warnings --nolineno --per-file=0 || true
-  hgext/convert/cvsps.py:0:
-   >                     ui.write('Ancestors: %s\n' % (','.join(r)))
-   warning: unwrapped ui message
-  hgext/convert/cvsps.py:0:
-   >                     ui.write('Parent: %d\n' % cs.parents[0].id)
-   warning: unwrapped ui message
-  hgext/convert/cvsps.py:0:
-   >                     ui.write('Parents: %s\n' %
-   warning: unwrapped ui message
-  hgext/convert/cvsps.py:0:
-   >                 ui.write('Branchpoints: %s \n' % ', '.join(branchpoints))
-   warning: unwrapped ui message
-  hgext/convert/cvsps.py:0:
-   >             ui.write('Author: %s\n' % cs.author)
-   warning: unwrapped ui message
-  hgext/convert/cvsps.py:0:
-   >             ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
-   warning: unwrapped ui message
-  hgext/convert/cvsps.py:0:
-   >             ui.write('Date: %s\n' % util.datestr(cs.date,
-   warning: unwrapped ui message
-  hgext/convert/cvsps.py:0:
-   >             ui.write('Log:\n')
-   warning: unwrapped ui message
-  hgext/convert/cvsps.py:0:
-   >             ui.write('Members: \n')
-   warning: unwrapped ui message
-  hgext/convert/cvsps.py:0:
-   >             ui.write('PatchSet %d \n' % cs.id)
-   warning: unwrapped ui message
-  hgext/convert/cvsps.py:0:
-   >             ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
-   warning: unwrapped ui message
-  hgext/hgk.py:0:
-   >         ui.write("parent %s\n" % p)
-   warning: unwrapped ui message
-  hgext/hgk.py:0:
-   >         ui.write('k=%s\nv=%s\n' % (name, value))
-   warning: unwrapped ui message
-  hgext/hgk.py:0:
-   >     ui.write("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1]))
-   warning: unwrapped ui message
-  hgext/hgk.py:0:
-   >     ui.write("branch %s\n\n" % ctx.branch())
-   warning: unwrapped ui message
-  hgext/hgk.py:0:
-   >     ui.write("committer %s %s %s\n" % (committer, int(date[0]), date[1]))
-   warning: unwrapped ui message
-  hgext/hgk.py:0:
-   >     ui.write("revision %d\n" % ctx.rev())
-   warning: unwrapped ui message
-  hgext/hgk.py:0:
-   >     ui.write("tree %s\n" % short(ctx.changeset()[0]))
-   warning: unwrapped ui message
-  hgext/patchbomb.py:0:
-   >             ui.write('Subject: %s\n' % subj)
-   warning: unwrapped ui message
-  hgext/patchbomb.py:0:
-   >         ui.write('From: %s\n' % sender)
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >                 ui.note('branch %s\n' % data)
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >                 ui.note('node %s\n' % str(data))
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >                 ui.note('tag %s\n' % name)
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >                 ui.write("unpruned common: %s\n" % " ".join([short(n)
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >             ui.write("format: id, p1, p2, cset, delta base, len(delta)\n")
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >             ui.write("local is subset\n")
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >             ui.write("remote is subset\n")
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >             ui.write('deltas against other : ' + fmt % pcfmt(numother,
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >             ui.write('deltas against p1    : ' + fmt % pcfmt(nump1, numdeltas))
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >             ui.write('deltas against p2    : ' + fmt % pcfmt(nump2, numdeltas))
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >         ui.write("common heads: %s\n" % " ".join([short(n) for n in common]))
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >         ui.write("match: %s\n" % m(d[0]))
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >         ui.write('deltas against prev  : ' + fmt % pcfmt(numprev, numdeltas))
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >         ui.write('path %s\n' % k)
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >         ui.write('uncompressed data size (min/max/avg) : %d / %d / %d\n'
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >     ui.write("digraph G {\n")
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >     ui.write("internal: %s %s\n" % d)
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >     ui.write("standard: %s\n" % util.datestr(d))
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >     ui.write('avg chain length  : ' + fmt % avgchainlen)
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >     ui.write('case-sensitive: %s\n' % (util.checkcase('.debugfsinfo')
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >     ui.write('compression ratio : ' + fmt % compratio)
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >     ui.write('delta size (min/max/avg)             : %d / %d / %d\n'
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >     ui.write('exec: %s\n' % (util.checkexec(path) and 'yes' or 'no'))
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >     ui.write('flags  : %s\n' % ', '.join(flags))
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >     ui.write('format : %d\n' % format)
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >     ui.write('full revision size (min/max/avg)     : %d / %d / %d\n'
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >     ui.write('revision size : ' + fmt2 % totalsize)
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >     ui.write('revisions     : ' + fmt2 % numrevs)
-   warning: unwrapped ui message
-   warning: unwrapped ui message
-  mercurial/commands.py:0:
-   >     ui.write('symlink: %s\n' % (util.checklink(path) and 'yes' or 'no'))
-   warning: unwrapped ui message
-  tests/autodiff.py:0:
-   >         ui.write('data lost for: %s\n' % fn)
-   warning: unwrapped ui message
-  tests/test-ui-color.py:0:
-   > testui.warn('warning\n')
-   warning: unwrapped ui message
-  tests/test-ui-color.py:0:
-   > testui.write('buffered\n')
-   warning: unwrapped ui message
+New errors are not allowed. Warnings are strongly discouraged.
+
+  $ hg manifest | xargs "$check_code" --warnings --nolineno --per-file=0
--- a/tests/test-convert-cvs.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-convert-cvs.t	Fri Dec 28 14:13:06 2012 +0100
@@ -69,9 +69,16 @@
   $TESTTMP/cvsrepo/src/b/c,v  <--  *c (glob)
   $ cd ..
 
-convert fresh repo
+convert fresh repo and also check localtimezone option
+
+NOTE: This doesn't check all time zones -- it merely determines that
+the configuration option is taking effect.
 
-  $ hg convert src src-hg
+An arbitrary (U.S.) time zone is used here.  TZ=US/Hawaii is selected
+since it does not use DST (unlike other U.S. time zones) and is always
+a fixed difference from UTC.
+
+  $ TZ=US/Hawaii hg convert --config convert.localtimezone=True src src-hg
   initializing destination src-hg repository
   connecting to $TESTTMP/cvsrepo
   scanning source...
@@ -161,7 +168,7 @@
 
 convert again
 
-  $ hg convert src src-hg
+  $ TZ=US/Hawaii hg convert --config convert.localtimezone=True src src-hg
   connecting to $TESTTMP/cvsrepo
   scanning source...
   collecting CVS rlog
@@ -221,7 +228,7 @@
 
 convert again
 
-  $ hg convert src src-hg
+  $ TZ=US/Hawaii hg convert --config convert.localtimezone=True src src-hg
   connecting to $TESTTMP/cvsrepo
   scanning source...
   collecting CVS rlog
@@ -239,7 +246,7 @@
 
 convert again with --filemap
 
-  $ hg convert --filemap filemap src src-filemap
+  $ TZ=US/Hawaii hg convert --config convert.localtimezone=True --filemap filemap src src-filemap
   connecting to $TESTTMP/cvsrepo
   scanning source...
   collecting CVS rlog
@@ -286,7 +293,7 @@
 
 convert again
 
-  $ hg convert --config convert.cvsps.fuzz=2 src src-hg
+  $ TZ=US/Hawaii hg convert --config convert.cvsps.fuzz=2 --config convert.localtimezone=True src src-hg
   connecting to $TESTTMP/cvsrepo
   scanning source...
   collecting CVS rlog
@@ -300,25 +307,25 @@
   2 funny
   1 fuzzy
   0 fuzzy
-  $ hg -R src-hg glog --template '{rev} ({branches}) {desc} files: {files}\n'
-  o  8 (branch) fuzzy files: b/c
+  $ hg -R src-hg glog --template '{rev} ({branches}) {desc} date: {date|date} files: {files}\n'
+  o  8 (branch) fuzzy date: * -1000 files: b/c (glob)
   |
-  o  7 (branch) fuzzy files: a
+  o  7 (branch) fuzzy date: * -1000 files: a (glob)
   |
   o  6 (branch) funny
   |  ----------------------------
-  |  log message files: a
-  o  5 (branch) ci2 files: b/c
+  |  log message date: * -1000 files: a (glob)
+  o  5 (branch) ci2 date: * -1000 files: b/c (glob)
   
-  o  4 () ci1 files: a b/c
+  o  4 () ci1 date: * -1000 files: a b/c (glob)
   |
-  o  3 () update tags files: .hgtags
+  o  3 () update tags date: * +0000 files: .hgtags (glob)
   |
-  o  2 () ci0 files: b/c
+  o  2 () ci0 date: * -1000 files: b/c (glob)
   |
-  | o  1 (INITIAL) import files:
+  | o  1 (INITIAL) import date: * -1000 files: (glob)
   |/
-  o  0 () Initial revision files: a b/c
+  o  0 () Initial revision date: * -1000 files: a b/c (glob)
   
 
 testing debugcvsps
--- a/tests/test-convert-git.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-convert-git.t	Fri Dec 28 14:13:06 2012 +0100
@@ -298,3 +298,50 @@
   $ hg convert git-repo4 git-repo4-broken-hg 2>&1 | \
   >     grep 'abort:' | sed 's/abort:.*/abort:/g'
   abort:
+
+test sub modules
+
+  $ mkdir git-repo5
+  $ cd git-repo5
+  $ git init-db >/dev/null 2>/dev/null
+  $ echo 'sub' >> foo
+  $ git add foo
+  $ commit -a -m 'addfoo'
+  $ BASE=${PWD}
+  $ cd ..
+  $ mkdir git-repo6
+  $ cd git-repo6
+  $ git init-db >/dev/null 2>/dev/null
+  $ git submodule add ${BASE} >/dev/null 2>/dev/null
+  $ commit -a -m 'addsubmodule' >/dev/null 2>/dev/null
+  $ cd ..
+
+convert sub modules
+  $ hg convert git-repo6 git-repo6-hg
+  initializing destination git-repo6-hg repository
+  scanning source...
+  sorting...
+  converting...
+  0 addsubmodule
+  updating bookmarks
+  $ hg -R git-repo6-hg log -v
+  changeset:   0:* (glob)
+  bookmark:    master
+  tag:         tip
+  user:        nottest <test@example.org>
+  date:        Mon Jan 01 00:00:23 2007 +0000
+  files:       .hgsub .hgsubstate
+  description:
+  addsubmodule
+  
+  committer: test <test@example.org>
+  
+  
+
+  $ cd git-repo6-hg
+  $ hg up >/dev/null 2>/dev/null
+  $ cat .hgsubstate
+  * git-repo5 (glob)
+  $ cd git-repo5
+  $ cat foo
+  sub
--- a/tests/test-convert-svn-source.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-convert-svn-source.t	Fri Dec 28 14:13:06 2012 +0100
@@ -63,9 +63,16 @@
   Committed revision 5.
   $ cd ..
 
-Convert to hg once
+Convert to hg once and also test localtimezone option
+
+NOTE: This doesn't check all time zones -- it merely determines that
+the configuration option is taking effect.
 
-  $ hg convert "$SVNREPOURL/proj%20B" B-hg
+An arbitrary (U.S.) time zone is used here.  TZ=US/Hawaii is selected
+since it does not use DST (unlike other U.S. time zones) and is always
+a fixed difference from UTC.
+
+  $ TZ=US/Hawaii hg convert --config convert.localtimezone=True "$SVNREPOURL/proj%20B" B-hg
   initializing destination B-hg repository
   scanning source...
   sorting...
@@ -109,7 +116,7 @@
 
 Test incremental conversion
 
-  $ hg convert "$SVNREPOURL/proj%20B" B-hg
+  $ TZ=US/Hawaii hg convert --config convert.localtimezone=True "$SVNREPOURL/proj%20B" B-hg
   scanning source...
   sorting...
   converting...
@@ -118,22 +125,22 @@
   updating tags
 
   $ cd B-hg
-  $ hg glog --template '{rev} {desc|firstline} files: {files}\n'
-  o  7 update tags files: .hgtags
+  $ hg glog --template '{rev} {desc|firstline} date: {date|date} files: {files}\n'
+  o  7 update tags date: * +0000 files: .hgtags (glob)
   |
-  o  6 work in progress files: letter2.txt
+  o  6 work in progress date: * -1000 files: letter2.txt (glob)
   |
-  o  5 second letter files: letter .txt letter2.txt
+  o  5 second letter date: * -1000 files: letter .txt letter2.txt (glob)
   |
-  o  4 update tags files: .hgtags
+  o  4 update tags date: * +0000 files: .hgtags (glob)
   |
-  o  3 nice day files: letter .txt
+  o  3 nice day date: * -1000 files: letter .txt (glob)
   |
-  o  2 world files: letter .txt
+  o  2 world date: * -1000 files: letter .txt (glob)
   |
-  o  1 hello files: letter .txt
+  o  1 hello date: * -1000 files: letter .txt (glob)
   |
-  o  0 init projB files:
+  o  0 init projB date: * -1000 files: (glob)
   
   $ hg tags -q
   tip
--- a/tests/test-convert.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-convert.t	Fri Dec 28 14:13:06 2012 +0100
@@ -172,6 +172,10 @@
                     will add the most recent revision on the branch indicated in
                     the regex as the second parent of the changeset. Default is
                     "{{mergefrombranch ([-\w]+)}}"
+      convert.localtimezone
+                    use local time (as determined by the TZ environment
+                    variable) for changeset date/times. The default is False
+                    (use UTC).
       hook.cvslog   Specify a Python function to be called at the end of
                     gathering the CVS log. The function is passed a list with
                     the log entries, and can modify the entries in-place, or add
@@ -211,6 +215,10 @@
       convert.svn.trunk
                     specify the name of the trunk branch. The default is
                     "trunk".
+      convert.localtimezone
+                    use local time (as determined by the TZ environment
+                    variable) for changeset date/times. The default is False
+                    (use UTC).
   
       Source history can be retrieved starting at a specific revision, instead
       of being integrally converted. Only single branch conversions are
--- a/tests/test-copy-move-merge.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-copy-move-merge.t	Fri Dec 28 14:13:06 2012 +0100
@@ -25,8 +25,8 @@
      b
      c
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     c -> a *
-     b -> a *
+     src: 'a' -> dst: 'c' *
+     src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    overwrite: False, partial: False
--- a/tests/test-debugcomplete.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-debugcomplete.t	Fri Dec 28 14:13:06 2012 +0100
@@ -96,6 +96,7 @@
   debugsetparents
   debugstate
   debugsub
+  debugsuccessorssets
   debugwalk
   debugwireargs
 
@@ -246,6 +247,7 @@
   debugsetparents: 
   debugstate: nodates, datesort
   debugsub: rev
+  debugsuccessorssets: 
   debugwalk: include, exclude
   debugwireargs: three, four, five, ssh, remotecmd, insecure
   graft: rev, continue, edit, log, currentdate, currentuser, date, user, tool, dry-run
--- a/tests/test-double-merge.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-double-merge.t	Fri Dec 28 14:13:06 2012 +0100
@@ -30,7 +30,7 @@
     unmatched files in other:
      bar
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     bar -> foo *
+     src: 'foo' -> dst: 'bar' *
     checking for directory renames
   resolving manifests
    overwrite: False, partial: False
--- a/tests/test-eolfilename.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-eolfilename.t	Fri Dec 28 14:13:06 2012 +0100
@@ -68,9 +68,9 @@
   $ touch "$A"
   $ touch "$B"
   $ hg status --color=always
-  \x1b[0;35;1;4m? foo\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mfoo\x1b[0m (esc)
   \x1b[0;35;1;4mbar\x1b[0m (esc)
-  \x1b[0;35;1;4m? foo\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mfoo\x1b[0m (esc)
   \x1b[0;35;1;4mbar.baz\x1b[0m (esc)
 
   $ cd ..
--- a/tests/test-graft.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-graft.t	Fri Dec 28 14:13:06 2012 +0100
@@ -131,7 +131,7 @@
     unmatched files in local:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     b -> a *
+     src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    overwrite: False, partial: False
--- a/tests/test-hgk.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-hgk.t	Fri Dec 28 14:13:06 2012 +0100
@@ -11,7 +11,6 @@
   tree a0c8bcbbb45c
   parent 000000000000
   author test 0 0
-  committer test 0 0
   revision 0
   branch default
   
--- a/tests/test-hgweb-commands.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-hgweb-commands.t	Fri Dec 28 14:13:06 2012 +0100
@@ -441,6 +441,14 @@
       </div>
     </td>
   </tr>
+  <tr>
+   <th class="author">change baseline</th>
+   <td class="author"></td>
+  </tr>
+  <tr>
+   <th class="author">current baseline</th>
+   <td class="author"><a href="/rev/000000000000">000000000000</a></td>
+  </tr>
   </table>
   
   <div class="overflow">
--- a/tests/test-hgweb-diffs.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-hgweb-diffs.t	Fri Dec 28 14:13:06 2012 +0100
@@ -139,6 +139,14 @@
       </div>
     </td>
   </tr>
+  <tr>
+   <th class="author">change baseline</th>
+   <td class="author"></td>
+  </tr>
+  <tr>
+   <th class="author">current baseline</th>
+   <td class="author"><a href="/rev/000000000000">000000000000</a></td>
+  </tr>
   </table>
   
   <div class="overflow">
@@ -400,6 +408,14 @@
       </div>
     </td>
   </tr>
+  <tr>
+   <th class="author">change baseline</th>
+   <td class="author"></td>
+  </tr>
+  <tr>
+   <th class="author">current baseline</th>
+   <td class="author"><a href="/rev/000000000000">000000000000</a></td>
+  </tr>
   </table>
   
   <div class="overflow">
--- a/tests/test-hgweb-removed.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-hgweb-removed.t	Fri Dec 28 14:13:06 2012 +0100
@@ -112,6 +112,14 @@
       </div>
     </td>
   </tr>
+  <tr>
+   <th class="author">change baseline</th>
+   <td class="author"><a href="/rev/cb9a9f314b8b:c78f6c5cbea9">cb9a9f314b8b</a> </td>
+  </tr>
+  <tr>
+   <th class="author">current baseline</th>
+   <td class="author"><a href="/rev/cb9a9f314b8b">cb9a9f314b8b</a></td>
+  </tr>
   </table>
   
   <div class="overflow">
--- a/tests/test-inotify-issue1208.t	Fri Dec 28 14:10:35 2012 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,38 +0,0 @@
-
-  $ "$TESTDIR/hghave" inotify || exit 80
-  $ echo "[extensions]" >> $HGRCPATH
-  $ echo "inotify=" >> $HGRCPATH
-  $ p="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
-  $ hg init $p
-  $ cd $p
-
-fail
-
-  $ ln -sf doesnotexist .hg/inotify.sock
-  $ hg st
-  abort: inotify-server: cannot start: .hg/inotify.sock is a broken symlink
-  inotify-client: could not start inotify server: child process failed to start
-  $ hg inserve
-  abort: inotify-server: cannot start: .hg/inotify.sock is a broken symlink
-  [255]
-  $ rm .hg/inotify.sock
-
-inserve
-
-  $ hg inserve -d --pid-file=hg.pid
-  $ cat hg.pid >> "$DAEMON_PIDS"
-
-status
-
-  $ hg status
-  ? hg.pid
-
-if we try to start twice the server, make sure we get a correct error
-
-  $ hg inserve -d --pid-file=hg2.pid
-  abort: inotify-server: cannot start: socket is already bound
-  abort: child process failed to start
-  [255]
-  $ kill `cat hg.pid`
-
-  $ cd ..
--- a/tests/test-inotify.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-inotify.t	Fri Dec 28 14:13:06 2012 +0100
@@ -160,3 +160,23 @@
   $ kill `cat hg.pid`
 
   $ cd ..
+
+Ensure that if the repo is in a directory whose name is too long, the
+unix domain socket is reached through a symlink (issue1208).
+
+  $ mkdir 0_3456789_10_456789_20_456789_30_456789_40_456789_50_45678_
+  $ cd 0_3456789_10_456789_20_456789_30_456789_40_456789_50_45678_
+  $ mkdir 60_456789_70_456789_80_456789_90_456789_100_56789_
+  $ cd 60_456789_70_456789_80_456789_90_456789_100_56789_
+
+  $ hg --config inotify.pidfile=hg3.pid clone -q ../../repo1
+  $ readlink repo1/.hg/inotify.sock
+  */inotify.sock (glob)
+
+Trying to start the server a second time should fail as usual.
+
+  $ hg --cwd repo1 inserve
+  abort: inotify-server: cannot start: socket is already bound
+  [255]
+
+  $ kill `cat hg3.pid`
--- a/tests/test-issue672.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-issue672.t	Fri Dec 28 14:13:06 2012 +0100
@@ -29,7 +29,7 @@
     unmatched files in other:
      1a
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     1a -> 1 
+     src: '1' -> dst: '1a' 
     checking for directory renames
   resolving manifests
    overwrite: False, partial: False
@@ -60,7 +60,7 @@
     unmatched files in local:
      1a
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     1a -> 1 *
+     src: '1' -> dst: '1a' *
     checking for directory renames
   resolving manifests
    overwrite: False, partial: False
@@ -83,7 +83,7 @@
     unmatched files in other:
      1a
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     1a -> 1 *
+     src: '1' -> dst: '1a' *
     checking for directory renames
   resolving manifests
    overwrite: False, partial: False
--- a/tests/test-largefiles-cache.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-largefiles-cache.t	Fri Dec 28 14:13:06 2012 +0100
@@ -47,8 +47,7 @@
   $ hg update
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   getting changed largefiles
-  large: can't get file locally
-  (no default or default-push path set in hgrc)
+  error getting 7f7097b041ccf68cc5561e9600da4655d21c6d18 from file:$TESTTMP/mirror for large: can't get file locally
   0 largefiles updated, 0 removed
   $ hg status
   ! large
@@ -65,8 +64,7 @@
   $ hg update
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   getting changed largefiles
-  large: can't get file locally
-  (no default or default-push path set in hgrc)
+  error getting 7f7097b041ccf68cc5561e9600da4655d21c6d18 from file:$TESTTMP/mirror for large: can't get file locally
   0 largefiles updated, 0 removed
   $ hg status
   ! large
--- a/tests/test-largefiles.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-largefiles.t	Fri Dec 28 14:13:06 2012 +0100
@@ -17,8 +17,8 @@
   > EOF
 
 Create the repo with a couple of revisions of both large and normal
-files, testing that status correctly shows largefiles and that summary output
-is correct.
+files.
+Test status and dirstate of largefiles and that summary output is correct.
 
   $ hg init a
   $ cd a
@@ -35,6 +35,17 @@
   A normal1
   A sub/large2
   A sub/normal2
+  $ touch large1 sub/large2
+  $ sleep 1
+  $ hg st
+  $ hg debugstate --nodates
+  n 644         41 .hglf/large1
+  n 644         41 .hglf/sub/large2
+  n 644          8 normal1
+  n 644          8 sub/normal2
+  $ hg debugstate --large
+  n 644          7 large1
+  n 644          7 sub/large2
   $ echo normal11 > normal1
   $ echo normal22 > sub/normal2
   $ echo large11 > large1
@@ -79,15 +90,25 @@
   C sub/normal2
   $ rm sub/unknown
 
-Test exit codes for remove warning cases (modified and still exiting)
+Test messages and exit codes for remove warning cases
 
   $ hg remove -A large1
-  not removing large1: file still exists (use forget to undo)
+  not removing large1: file still exists
   [1]
   $ echo 'modified' > large1
   $ hg remove large1
-  not removing large1: file is modified (use forget to undo)
+  not removing large1: file is modified (use -f to force removal)
   [1]
+  $ echo 'new' > normalnew
+  $ hg add normalnew
+  $ echo 'new' > largenew
+  $ hg add --large normalnew
+  normalnew already tracked!
+  $ hg remove normalnew largenew
+  not removing largenew: file is untracked
+  not removing normalnew: file has been marked for add (use forget to undo)
+  [1]
+  $ rm normalnew largenew
   $ hg up -Cq
 
 Remove both largefiles and normal files.
@@ -895,24 +916,15 @@
   M sub/normal4
   M sub2/large6
   saved backup bundle to $TESTTMP/d/.hg/strip-backup/f574fb32bb45-backup.hg (glob)
-  large3: can't get file locally
-  (no default or default-push path set in hgrc)
-  sub/large4: can't get file locally
-  (no default or default-push path set in hgrc)
-  large1: can't get file locally
-  (no default or default-push path set in hgrc)
-  sub/large2: can't get file locally
-  (no default or default-push path set in hgrc)
-  sub/large2: can't get file locally
-  (no default or default-push path set in hgrc)
-  large1: can't get file locally
-  (no default or default-push path set in hgrc)
-  sub/large2: can't get file locally
-  (no default or default-push path set in hgrc)
-  large1: can't get file locally
-  (no default or default-push path set in hgrc)
-  sub/large2: can't get file locally
-  (no default or default-push path set in hgrc)
+  error getting eb7338044dc27f9bc59b8dd5a246b065ead7a9c4 from file:$TESTTMP/b for large3: can't get file locally
+  error getting eb7338044dc27f9bc59b8dd5a246b065ead7a9c4 from file:$TESTTMP/b for sub/large4: can't get file locally
+  error getting eb7338044dc27f9bc59b8dd5a246b065ead7a9c4 from file:$TESTTMP/b for large1: can't get file locally
+  error getting eb7338044dc27f9bc59b8dd5a246b065ead7a9c4 from file:$TESTTMP/b for sub/large2: can't get file locally
+  error getting eb7338044dc27f9bc59b8dd5a246b065ead7a9c4 from file:$TESTTMP/b for sub/large2: can't get file locally
+  error getting 5f78770c0e77ba4287ad6ef3071c9bf9c379742f from file:$TESTTMP/b for large1: can't get file locally
+  error getting eb7338044dc27f9bc59b8dd5a246b065ead7a9c4 from file:$TESTTMP/b for sub/large2: can't get file locally
+  error getting 4669e532d5b2c093a78eca010077e708a071bb64 from file:$TESTTMP/b for large1: can't get file locally
+  error getting 1deebade43c8c498a3c8daddac0244dc55d1331d from file:$TESTTMP/b for sub/large2: can't get file locally
   0 additional largefiles cached
   9 largefiles failed to download
   nothing to rebase
@@ -1457,7 +1469,33 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
-  $ rm -rf empty
+
+Clone over http, with largefiles being pulled on update, not on clone.
+
+  $ hg clone -q http://localhost:$HGPORT2/ http-clone -U
+
+  $ hg -R http-clone --debug up --config largefiles.usercache=http-clone-usercache
+  resolving manifests
+   overwrite: False, partial: False
+   ancestor: 000000000000, local: 000000000000+, remote: cf03e5bb9936
+   .hglf/f1: remote created -> g
+  updating: .hglf/f1 1/1 files (100.00%)
+  getting .hglf/f1
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  getting changed largefiles
+  using http://localhost:$HGPORT2/
+  sending capabilities command
+  getting largefiles: 0/1 lfile (0.00%)
+  getting f1:02a439e5c31c526465ab1a0ca1f431f76b827b90
+  sending batch command
+  sending getlfile command
+  found 02a439e5c31c526465ab1a0ca1f431f76b827b90 in store
+  1 largefiles updated, 0 removed
+
+  $ ls http-clone-usercache/*
+  http-clone-usercache/02a439e5c31c526465ab1a0ca1f431f76b827b90
+
+  $ rm -rf empty http-clone http-clone-usercache
 
 used all HGPORTs, kill all daemons
   $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
--- a/tests/test-lfconvert.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-lfconvert.t	Fri Dec 28 14:13:06 2012 +0100
@@ -349,8 +349,7 @@
   $ rm largefiles-repo/.hg/largefiles/*
   $ hg lfconvert --to-normal issue3519 normalized3519
   initializing destination normalized3519
-  large: can't get file locally
-  (no default or default-push path set in hgrc)
+  error getting 2e000fa7e85759c7f4c254d4d9c33ef481e459a7 from file:$TESTTMP/largefiles-repo for large: can't get file locally
   abort: missing largefile 'large' from revision d4892ec57ce212905215fad1d9018f56b99202ad
   [255]
 
--- a/tests/test-mq-qrefresh.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-mq-qrefresh.t	Fri Dec 28 14:13:06 2012 +0100
@@ -209,6 +209,7 @@
   $ hg add orphanchild
   $ hg qrefresh nonexistentfilename # clear patch
   nonexistentfilename: * (glob)
+  $ hg diff -c qtip
   $ hg qrefresh --short 1/base
   $ hg qrefresh --short 2/base
 
--- a/tests/test-mq.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-mq.t	Fri Dec 28 14:13:06 2012 +0100
@@ -198,11 +198,11 @@
 status --mq with color (issue2096)
 
   $ hg status --mq --config extensions.color= --config color.mode=ansi --color=always
-  \x1b[0;32;1mA .hgignore\x1b[0m (esc)
-  \x1b[0;32;1mA A\x1b[0m (esc)
-  \x1b[0;32;1mA B\x1b[0m (esc)
-  \x1b[0;32;1mA series\x1b[0m (esc)
-  \x1b[0;35;1;4m? flaf\x1b[0m (esc)
+  \x1b[0;32;1mA \x1b[0m\x1b[0;32;1m.hgignore\x1b[0m (esc)
+  \x1b[0;32;1mA \x1b[0m\x1b[0;32;1mA\x1b[0m (esc)
+  \x1b[0;32;1mA \x1b[0m\x1b[0;32;1mB\x1b[0m (esc)
+  \x1b[0;32;1mA \x1b[0m\x1b[0;32;1mseries\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mflaf\x1b[0m (esc)
 
 try the --mq option on a command provided by an extension
 
@@ -1110,8 +1110,14 @@
   $ hg qpop
   popping baz
   now at: bar
+
+test qdel/qrm
+
   $ hg qdel baz
-
+  $ echo p >> .hg/patches/series
+  $ hg qrm p
+  $ hg qser
+  bar
 
 create a git patch
 
--- a/tests/test-mv-cp-st-diff.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-mv-cp-st-diff.t	Fri Dec 28 14:13:06 2012 +0100
@@ -21,11 +21,15 @@
   $ hg add x/y
   $ hg ci -m2
   $ cd ..
+
   $ show()
   > {
-  >     echo "- $2: $1"
+  >     echo "# $2:"
+  >     echo
+  >     echo "% hg st -C $1"
   >     hg st -C $1
   >     echo
+  >     echo "% hg diff --git $1"
   >     hg diff --git $1
   >     echo
   > }
@@ -35,24 +39,28 @@
 $1 - first commit
 $2 - second commit
 $3 - working dir action
-$4 - test description
 
   $ tb()
   > {
-  >     hg clone t t2 ; cd t2
+  >     hg clone -q t t2 ; cd t2
   >     hg co -q -C 0
   > 
+  >     echo % add a $count
   >     add a $count
   >     count=`expr $count + 1`
+  >     echo % hg ci -m "t0"
   >     hg ci -m "t0"
+  >     echo % $1
   >     $1
+  >     echo % hg ci -m "t1"
   >     hg ci -m "t1"
+  >     echo % $2
   >     $2
+  >     echo % hg ci -m "t2"
   >     hg ci -m "t2"
+  >     echo % $3
   >     $3
-  > 
-  >     echo "** $4 **"
-  >     echo "** $1 / $2 / $3"
+  >     echo
   >     show "" "working to parent"
   >     show "--rev 0" "working to root"
   >     show "--rev 2" "working to branch"
@@ -64,26 +72,39 @@
   >     cd ..
   >     rm -rf t2
   > }
-  $ tb "add a a1" "add a a2" "hg mv a b" "rename in working dir"
-  updating to branch default
-  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+rename in working dir
+
+  $ tb "add a a1" "add a a2" "hg mv a b"
+  % add a 0
+  % hg ci -m t0
   created new head
-  ** rename in working dir **
-  ** add a a1 / add a a2 / hg mv a b
-  - working to parent: 
+  % add a a1
+  % hg ci -m t1
+  % add a a2
+  % hg ci -m t2
+  % hg mv a b
+  
+  # working to parent:
+  
+  % hg st -C 
   A b
     a
   R a
   
+  % hg diff --git 
   diff --git a/a b/b
   rename from a
   rename to b
   
-  - working to root: --rev 0
+  # working to root:
+  
+  % hg st -C --rev 0
   A b
     a
   R a
   
+  % hg diff --git --rev 0
   diff --git a/a b/b
   rename from a
   rename to b
@@ -95,12 +116,15 @@
   +a1
   +a2
   
-  - working to branch: --rev 2
+  # working to branch:
+  
+  % hg st -C --rev 2
   A b
     a
   R a
   R x/y
   
+  % hg diff --git --rev 2
   diff --git a/a b/b
   rename from a
   rename to b
@@ -120,9 +144,12 @@
   @@ -1,1 +0,0 @@
   -y1
   
-  - root to parent: --rev 0 --rev .
+  # root to parent:
+  
+  % hg st -C --rev 0 --rev .
   M a
   
+  % hg diff --git --rev 0 --rev .
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -132,9 +159,12 @@
   +a1
   +a2
   
-  - parent to root: --rev . --rev 0
+  # parent to root:
+  
+  % hg st -C --rev . --rev 0
   M a
   
+  % hg diff --git --rev . --rev 0
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -144,10 +174,13 @@
   -a1
   -a2
   
-  - branch to parent: --rev 2 --rev .
+  # branch to parent:
+  
+  % hg st -C --rev 2 --rev .
   M a
   R x/y
   
+  % hg diff --git --rev 2 --rev .
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -165,10 +198,13 @@
   @@ -1,1 +0,0 @@
   -y1
   
-  - parent to branch: --rev . --rev 2
+  # parent to branch:
+  
+  % hg st -C --rev . --rev 2
   M a
   A x/y
   
+  % hg diff --git --rev . --rev 2
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -187,25 +223,37 @@
   +y1
   
   
-  $ tb "add a a1" "add a a2" "hg cp a b" "copy in working dir"
-  updating to branch default
-  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+copy in working dir
+
+  $ tb "add a a1" "add a a2" "hg cp a b"
+  % add a 1
+  % hg ci -m t0
   created new head
-  ** copy in working dir **
-  ** add a a1 / add a a2 / hg cp a b
-  - working to parent: 
+  % add a a1
+  % hg ci -m t1
+  % add a a2
+  % hg ci -m t2
+  % hg cp a b
+  
+  # working to parent:
+  
+  % hg st -C 
   A b
     a
   
+  % hg diff --git 
   diff --git a/a b/b
   copy from a
   copy to b
   
-  - working to root: --rev 0
+  # working to root:
+  
+  % hg st -C --rev 0
   M a
   A b
     a
   
+  % hg diff --git --rev 0
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -225,12 +273,15 @@
   +a1
   +a2
   
-  - working to branch: --rev 2
+  # working to branch:
+  
+  % hg st -C --rev 2
   M a
   A b
     a
   R x/y
   
+  % hg diff --git --rev 2
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -260,9 +311,12 @@
   @@ -1,1 +0,0 @@
   -y1
   
-  - root to parent: --rev 0 --rev .
+  # root to parent:
+  
+  % hg st -C --rev 0 --rev .
   M a
   
+  % hg diff --git --rev 0 --rev .
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -272,9 +326,12 @@
   +a1
   +a2
   
-  - parent to root: --rev . --rev 0
+  # parent to root:
+  
+  % hg st -C --rev . --rev 0
   M a
   
+  % hg diff --git --rev . --rev 0
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -284,10 +341,13 @@
   -a1
   -a2
   
-  - branch to parent: --rev 2 --rev .
+  # branch to parent:
+  
+  % hg st -C --rev 2 --rev .
   M a
   R x/y
   
+  % hg diff --git --rev 2 --rev .
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -305,10 +365,13 @@
   @@ -1,1 +0,0 @@
   -y1
   
-  - parent to branch: --rev . --rev 2
+  # parent to branch:
+  
+  % hg st -C --rev . --rev 2
   M a
   A x/y
   
+  % hg diff --git --rev . --rev 2
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -327,15 +390,24 @@
   +y1
   
   
-  $ tb "hg mv a b" "add b b1" "add b w" "single rename"
-  updating to branch default
-  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+single rename
+
+  $ tb "hg mv a b" "add b b1" "add b w"
+  % add a 2
+  % hg ci -m t0
   created new head
-  ** single rename **
-  ** hg mv a b / add b b1 / add b w
-  - working to parent: 
+  % hg mv a b
+  % hg ci -m t1
+  % add b b1
+  % hg ci -m t2
+  % add b w
+  
+  # working to parent:
+  
+  % hg st -C 
   M b
   
+  % hg diff --git 
   diff --git a/b b/b
   --- a/b
   +++ b/b
@@ -345,11 +417,14 @@
    b1
   +w
   
-  - working to root: --rev 0
+  # working to root:
+  
+  % hg st -C --rev 0
   A b
     a
   R a
   
+  % hg diff --git --rev 0
   diff --git a/a b/b
   rename from a
   rename to b
@@ -361,12 +436,15 @@
   +b1
   +w
   
-  - working to branch: --rev 2
+  # working to branch:
+  
+  % hg st -C --rev 2
   A b
     a
   R a
   R x/y
   
+  % hg diff --git --rev 2
   diff --git a/a b/b
   rename from a
   rename to b
@@ -386,11 +464,14 @@
   @@ -1,1 +0,0 @@
   -y1
   
-  - root to parent: --rev 0 --rev .
+  # root to parent:
+  
+  % hg st -C --rev 0 --rev .
   A b
     a
   R a
   
+  % hg diff --git --rev 0 --rev .
   diff --git a/a b/b
   rename from a
   rename to b
@@ -401,11 +482,14 @@
   +2
   +b1
   
-  - parent to root: --rev . --rev 0
+  # parent to root:
+  
+  % hg st -C --rev . --rev 0
   A a
     b
   R b
   
+  % hg diff --git --rev . --rev 0
   diff --git a/b b/a
   rename from b
   rename to a
@@ -416,12 +500,15 @@
   -2
   -b1
   
-  - branch to parent: --rev 2 --rev .
+  # branch to parent:
+  
+  % hg st -C --rev 2 --rev .
   A b
     a
   R a
   R x/y
   
+  % hg diff --git --rev 2 --rev .
   diff --git a/a b/b
   rename from a
   rename to b
@@ -440,12 +527,15 @@
   @@ -1,1 +0,0 @@
   -y1
   
-  - parent to branch: --rev . --rev 2
+  # parent to branch:
+  
+  % hg st -C --rev . --rev 2
   A a
     b
   A x/y
   R b
   
+  % hg diff --git --rev . --rev 2
   diff --git a/b b/a
   rename from b
   rename to a
@@ -465,15 +555,24 @@
   +y1
   
   
-  $ tb "hg cp a b" "add b b1" "add a w" "single copy"
-  updating to branch default
-  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+single copy
+
+  $ tb "hg cp a b" "add b b1" "add a w"
+  % add a 3
+  % hg ci -m t0
   created new head
-  ** single copy **
-  ** hg cp a b / add b b1 / add a w
-  - working to parent: 
+  % hg cp a b
+  % hg ci -m t1
+  % add b b1
+  % hg ci -m t2
+  % add a w
+  
+  # working to parent:
+  
+  % hg st -C 
   M a
   
+  % hg diff --git 
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -482,11 +581,14 @@
    3
   +w
   
-  - working to root: --rev 0
+  # working to root:
+  
+  % hg st -C --rev 0
   M a
   A b
     a
   
+  % hg diff --git --rev 0
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -504,12 +606,15 @@
   +3
   +b1
   
-  - working to branch: --rev 2
+  # working to branch:
+  
+  % hg st -C --rev 2
   M a
   A b
     a
   R x/y
   
+  % hg diff --git --rev 2
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -537,11 +642,14 @@
   @@ -1,1 +0,0 @@
   -y1
   
-  - root to parent: --rev 0 --rev .
+  # root to parent:
+  
+  % hg st -C --rev 0 --rev .
   M a
   A b
     a
   
+  % hg diff --git --rev 0 --rev .
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -558,11 +666,13 @@
   +3
   +b1
   
-  - parent to root: --rev . --rev 0
+  # parent to root:
+  
+  % hg st -C --rev . --rev 0
   M a
-    b
   R b
   
+  % hg diff --git --rev . --rev 0
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -578,12 +688,15 @@
   -3
   -b1
   
-  - branch to parent: --rev 2 --rev .
+  # branch to parent:
+  
+  % hg st -C --rev 2 --rev .
   M a
   A b
     a
   R x/y
   
+  % hg diff --git --rev 2 --rev .
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -610,12 +723,14 @@
   @@ -1,1 +0,0 @@
   -y1
   
-  - parent to branch: --rev . --rev 2
+  # parent to branch:
+  
+  % hg st -C --rev . --rev 2
   M a
-    b
   A x/y
   R b
   
+  % hg diff --git --rev . --rev 2
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -640,26 +755,38 @@
   +y1
   
   
-  $ tb "hg mv a b" "hg mv b c" "hg mv c d" "rename chain"
-  updating to branch default
-  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+rename chain
+
+  $ tb "hg mv a b" "hg mv b c" "hg mv c d"
+  % add a 4
+  % hg ci -m t0
   created new head
-  ** rename chain **
-  ** hg mv a b / hg mv b c / hg mv c d
-  - working to parent: 
+  % hg mv a b
+  % hg ci -m t1
+  % hg mv b c
+  % hg ci -m t2
+  % hg mv c d
+  
+  # working to parent:
+  
+  % hg st -C 
   A d
     c
   R c
   
+  % hg diff --git 
   diff --git a/c b/d
   rename from c
   rename to d
   
-  - working to root: --rev 0
+  # working to root:
+  
+  % hg st -C --rev 0
   A d
     a
   R a
   
+  % hg diff --git --rev 0
   diff --git a/a b/d
   rename from a
   rename to d
@@ -669,12 +796,15 @@
    a
   +4
   
-  - working to branch: --rev 2
+  # working to branch:
+  
+  % hg st -C --rev 2
   A d
     a
   R a
   R x/y
   
+  % hg diff --git --rev 2
   diff --git a/a b/d
   rename from a
   rename to d
@@ -692,11 +822,14 @@
   @@ -1,1 +0,0 @@
   -y1
   
-  - root to parent: --rev 0 --rev .
+  # root to parent:
+  
+  % hg st -C --rev 0 --rev .
   A c
     a
   R a
   
+  % hg diff --git --rev 0 --rev .
   diff --git a/a b/c
   rename from a
   rename to c
@@ -706,11 +839,14 @@
    a
   +4
   
-  - parent to root: --rev . --rev 0
+  # parent to root:
+  
+  % hg st -C --rev . --rev 0
   A a
     c
   R c
   
+  % hg diff --git --rev . --rev 0
   diff --git a/c b/a
   rename from c
   rename to a
@@ -720,12 +856,15 @@
    a
   -4
   
-  - branch to parent: --rev 2 --rev .
+  # branch to parent:
+  
+  % hg st -C --rev 2 --rev .
   A c
     a
   R a
   R x/y
   
+  % hg diff --git --rev 2 --rev .
   diff --git a/a b/c
   rename from a
   rename to c
@@ -743,12 +882,15 @@
   @@ -1,1 +0,0 @@
   -y1
   
-  - parent to branch: --rev . --rev 2
+  # parent to branch:
+  
+  % hg st -C --rev . --rev 2
   A a
     c
   A x/y
   R c
   
+  % hg diff --git --rev . --rev 2
   diff --git a/c b/a
   rename from c
   rename to a
@@ -767,21 +909,32 @@
   +y1
   
   
-  $ tb "hg cp a b" "hg cp b c" "hg cp c d" "copy chain"
-  updating to branch default
-  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+copy chain
+
+  $ tb "hg cp a b" "hg cp b c" "hg cp c d"
+  % add a 5
+  % hg ci -m t0
   created new head
-  ** copy chain **
-  ** hg cp a b / hg cp b c / hg cp c d
-  - working to parent: 
+  % hg cp a b
+  % hg ci -m t1
+  % hg cp b c
+  % hg ci -m t2
+  % hg cp c d
+  
+  # working to parent:
+  
+  % hg st -C 
   A d
     c
   
+  % hg diff --git 
   diff --git a/c b/d
   copy from c
   copy to d
   
-  - working to root: --rev 0
+  # working to root:
+  
+  % hg st -C --rev 0
   M a
   A b
     a
@@ -790,6 +943,7 @@
   A d
     a
   
+  % hg diff --git --rev 0
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -821,7 +975,9 @@
    a
   +5
   
-  - working to branch: --rev 2
+  # working to branch:
+  
+  % hg st -C --rev 2
   M a
   A b
     a
@@ -831,6 +987,7 @@
     a
   R x/y
   
+  % hg diff --git --rev 2
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -876,13 +1033,16 @@
   @@ -1,1 +0,0 @@
   -y1
   
-  - root to parent: --rev 0 --rev .
+  # root to parent:
+  
+  % hg st -C --rev 0 --rev .
   M a
   A b
     a
   A c
     a
   
+  % hg diff --git --rev 0 --rev .
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -906,12 +1066,14 @@
    a
   +5
   
-  - parent to root: --rev . --rev 0
+  # parent to root:
+  
+  % hg st -C --rev . --rev 0
   M a
-    b
   R b
   R c
   
+  % hg diff --git --rev . --rev 0
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -933,7 +1095,9 @@
   -a
   -5
   
-  - branch to parent: --rev 2 --rev .
+  # branch to parent:
+  
+  % hg st -C --rev 2 --rev .
   M a
   A b
     a
@@ -941,6 +1105,7 @@
     a
   R x/y
   
+  % hg diff --git --rev 2 --rev .
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -976,13 +1141,15 @@
   @@ -1,1 +0,0 @@
   -y1
   
-  - parent to branch: --rev . --rev 2
+  # parent to branch:
+  
+  % hg st -C --rev . --rev 2
   M a
-    b
   A x/y
   R b
   R c
   
+  % hg diff --git --rev . --rev 2
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -1013,24 +1180,36 @@
   +y1
   
   
-  $ tb "add a a1" "hg mv a b" "hg mv b a" "circular rename"
-  updating to branch default
-  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+circular rename
+
+  $ tb "add a a1" "hg mv a b" "hg mv b a"
+  % add a 6
+  % hg ci -m t0
   created new head
-  ** circular rename **
-  ** add a a1 / hg mv a b / hg mv b a
-  - working to parent: 
+  % add a a1
+  % hg ci -m t1
+  % hg mv a b
+  % hg ci -m t2
+  % hg mv b a
+  
+  # working to parent:
+  
+  % hg st -C 
   A a
     b
   R b
   
+  % hg diff --git 
   diff --git a/b b/a
   rename from b
   rename to a
   
-  - working to root: --rev 0
+  # working to root:
+  
+  % hg st -C --rev 0
   M a
   
+  % hg diff --git --rev 0
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -1039,10 +1218,13 @@
   +6
   +a1
   
-  - working to branch: --rev 2
+  # working to branch:
+  
+  % hg st -C --rev 2
   M a
   R x/y
   
+  % hg diff --git --rev 2
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -1059,11 +1241,14 @@
   @@ -1,1 +0,0 @@
   -y1
   
-  - root to parent: --rev 0 --rev .
+  # root to parent:
+  
+  % hg st -C --rev 0 --rev .
   A b
     a
   R a
   
+  % hg diff --git --rev 0 --rev .
   diff --git a/a b/b
   rename from a
   rename to b
@@ -1074,11 +1259,14 @@
   +6
   +a1
   
-  - parent to root: --rev . --rev 0
+  # parent to root:
+  
+  % hg st -C --rev . --rev 0
   A a
     b
   R b
   
+  % hg diff --git --rev . --rev 0
   diff --git a/b b/a
   rename from b
   rename to a
@@ -1089,12 +1277,15 @@
   -6
   -a1
   
-  - branch to parent: --rev 2 --rev .
+  # branch to parent:
+  
+  % hg st -C --rev 2 --rev .
   A b
     a
   R a
   R x/y
   
+  % hg diff --git --rev 2 --rev .
   diff --git a/a b/b
   rename from a
   rename to b
@@ -1113,12 +1304,15 @@
   @@ -1,1 +0,0 @@
   -y1
   
-  - parent to branch: --rev . --rev 2
+  # parent to branch:
+  
+  % hg st -C --rev . --rev 2
   A a
     b
   A x/y
   R b
   
+  % hg diff --git --rev . --rev 2
   diff --git a/b b/a
   rename from b
   rename to a
@@ -1138,16 +1332,25 @@
   +y1
   
   
-  $ tb "hg mv x y" "add y/x x1" "add y/x x2" "directory move"
-  updating to branch default
-  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+directory move
+
+  $ tb "hg mv x y" "add y/x x1" "add y/x x2"
+  % add a 7
+  % hg ci -m t0
   created new head
+  % hg mv x y
   moving x/x to y/x (glob)
-  ** directory move **
-  ** hg mv x y / add y/x x1 / add y/x x2
-  - working to parent: 
+  % hg ci -m t1
+  % add y/x x1
+  % hg ci -m t2
+  % add y/x x2
+  
+  # working to parent:
+  
+  % hg st -C 
   M y/x
   
+  % hg diff --git 
   diff --git a/y/x b/y/x
   --- a/y/x
   +++ b/y/x
@@ -1156,12 +1359,15 @@
    x1
   +x2
   
-  - working to root: --rev 0
+  # working to root:
+  
+  % hg st -C --rev 0
   M a
   A y/x
     x/x
   R x/x
   
+  % hg diff --git --rev 0
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -1178,13 +1384,16 @@
   +x1
   +x2
   
-  - working to branch: --rev 2
+  # working to branch:
+  
+  % hg st -C --rev 2
   M a
   A y/x
     x/x
   R x/x
   R x/y
   
+  % hg diff --git --rev 2
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -1209,12 +1418,15 @@
   +x1
   +x2
   
-  - root to parent: --rev 0 --rev .
+  # root to parent:
+  
+  % hg st -C --rev 0 --rev .
   M a
   A y/x
     x/x
   R x/x
   
+  % hg diff --git --rev 0 --rev .
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -1230,12 +1442,15 @@
    x
   +x1
   
-  - parent to root: --rev . --rev 0
+  # parent to root:
+  
+  % hg st -C --rev . --rev 0
   M a
   A x/x
     y/x
   R y/x
   
+  % hg diff --git --rev . --rev 0
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -1251,13 +1466,16 @@
    x
   -x1
   
-  - branch to parent: --rev 2 --rev .
+  # branch to parent:
+  
+  % hg st -C --rev 2 --rev .
   M a
   A y/x
     x/x
   R x/x
   R x/y
   
+  % hg diff --git --rev 2 --rev .
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -1281,13 +1499,16 @@
    x
   +x1
   
-  - parent to branch: --rev . --rev 2
+  # parent to branch:
+  
+  % hg st -C --rev . --rev 2
   M a
   A x/x
     y/x
   A x/y
   R y/x
   
+  % hg diff --git --rev . --rev 2
   diff --git a/a b/a
   --- a/a
   +++ b/a
@@ -1318,14 +1539,14 @@
 
   $ hg init unrelated
   $ cd unrelated
-  $ add a a
+  $ echo a >> a
   $ hg ci -Am adda
   adding a
   $ hg mv a b
   $ hg ci -m movea
   $ hg up -C null
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
-  $ add a a
+  $ echo a >> a
   $ hg ci -Am addunrelateda
   adding a
   created new head
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-obsolete-divergent.t	Fri Dec 28 14:13:06 2012 +0100
@@ -0,0 +1,441 @@
+Test file dedicated to testing the divergent troubles from obsolete changeset.
+
+This is the most complexe troubles from far so we isolate it in a dedicated
+file.
+
+Enable obsolete
+
+  $ cat > obs.py << EOF
+  > import mercurial.obsolete
+  > mercurial.obsolete._enabled = True
+  > EOF
+  $ cat >> $HGRCPATH << EOF
+  > [ui]
+  > logtemplate = {rev}:{node|short} {desc}\n
+  > [extensions]
+  > obs=${TESTTMP}/obs.py
+  > [alias]
+  > debugobsolete = debugobsolete -d '0 0'
+  > [phases]
+  > publish=False
+  > EOF
+
+
+  $ mkcommit() {
+  >    echo "$1" > "$1"
+  >    hg add "$1"
+  >    hg ci -m "$1"
+  > }
+  $ getid() {
+  >    hg id --debug -ir "desc('$1')"
+  > }
+
+setup repo
+
+  $ hg init reference
+  $ cd reference
+  $ mkcommit base
+  $ mkcommit A_0
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit A_1
+  created new head
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit A_2
+  created new head
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ cd ..
+
+
+  $ newcase() {
+  >    hg clone -u 0 -q reference $1
+  >    cd $1
+  > }
+
+direct divergence
+-----------------
+
+A_1 have two direct and divergent successors A_1 and A_1
+
+  $ newcase direct
+  $ hg debugobsolete `getid A_0` `getid A_1`
+  $ hg debugobsolete `getid A_0` `getid A_2`
+  $ hg log -G --hidden
+  o  3:392fd25390da A_2
+  |
+  | o  2:82623d38b9ba A_1
+  |/
+  | x  1:007dc284c1f8 A_0
+  |/
+  @  0:d20a80d4def3 base
+  
+  $ hg debugsuccessorssets 'all()'
+  d20a80d4def3
+      d20a80d4def3
+  007dc284c1f8
+      392fd25390da
+      82623d38b9ba
+  82623d38b9ba
+      82623d38b9ba
+  392fd25390da
+      392fd25390da
+  $ hg log -r 'divergent()'
+  2:82623d38b9ba A_1
+  3:392fd25390da A_2
+
+check that mercurial refuse to push
+
+  $ hg init ../other
+  $ hg push ../other
+  pushing to ../other
+  searching for changes
+  abort: push includes divergent changeset: 392fd25390da!
+  [255]
+
+  $ cd ..
+
+
+indirect divergence with known changeset
+-------------------------------------------
+
+  $ newcase indirect_known
+  $ hg debugobsolete `getid A_0` `getid A_1`
+  $ hg debugobsolete `getid A_0` `getid A_2`
+  $ mkcommit A_3
+  created new head
+  $ hg debugobsolete `getid A_2` `getid A_3`
+  $ hg log -G --hidden
+  @  4:01f36c5a8fda A_3
+  |
+  | x  3:392fd25390da A_2
+  |/
+  | o  2:82623d38b9ba A_1
+  |/
+  | x  1:007dc284c1f8 A_0
+  |/
+  o  0:d20a80d4def3 base
+  
+  $ hg debugsuccessorssets 'all()'
+  d20a80d4def3
+      d20a80d4def3
+  007dc284c1f8
+      01f36c5a8fda
+      82623d38b9ba
+  82623d38b9ba
+      82623d38b9ba
+  392fd25390da
+      01f36c5a8fda
+  01f36c5a8fda
+      01f36c5a8fda
+  $ hg log -r 'divergent()'
+  2:82623d38b9ba A_1
+  4:01f36c5a8fda A_3
+  $ cd ..
+
+
+indirect divergence with known changeset
+-------------------------------------------
+
+  $ newcase indirect_unknown
+  $ hg debugobsolete `getid A_0` aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+  $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid A_1`
+  $ hg debugobsolete `getid A_0` `getid A_2`
+  $ hg log -G --hidden
+  o  3:392fd25390da A_2
+  |
+  | o  2:82623d38b9ba A_1
+  |/
+  | x  1:007dc284c1f8 A_0
+  |/
+  @  0:d20a80d4def3 base
+  
+  $ hg debugsuccessorssets 'all()'
+  d20a80d4def3
+      d20a80d4def3
+  007dc284c1f8
+      392fd25390da
+      82623d38b9ba
+  82623d38b9ba
+      82623d38b9ba
+  392fd25390da
+      392fd25390da
+  $ hg log -r 'divergent()'
+  2:82623d38b9ba A_1
+  3:392fd25390da A_2
+  $ cd ..
+
+do not take unknown node in account if they are final
+-----------------------------------------------------
+
+  $ newcase final-unknown
+  $ hg debugobsolete `getid A_0` `getid A_1`
+  $ hg debugobsolete `getid A_1` `getid A_2`
+  $ hg debugobsolete `getid A_0` bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
+  $ hg debugobsolete bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb cccccccccccccccccccccccccccccccccccccccc
+  $ hg debugobsolete `getid A_1` dddddddddddddddddddddddddddddddddddddddd
+
+  $ hg debugsuccessorssets 'desc('A_0')'
+  007dc284c1f8
+      392fd25390da
+
+  $ cd ..
+
+divergence that converge again is not divergence anymore
+-----------------------------------------------------
+
+  $ newcase converged_divergence
+  $ hg debugobsolete `getid A_0` `getid A_1`
+  $ hg debugobsolete `getid A_0` `getid A_2`
+  $ mkcommit A_3
+  created new head
+  $ hg debugobsolete `getid A_1` `getid A_3`
+  $ hg debugobsolete `getid A_2` `getid A_3`
+  $ hg log -G --hidden
+  @  4:01f36c5a8fda A_3
+  |
+  | x  3:392fd25390da A_2
+  |/
+  | x  2:82623d38b9ba A_1
+  |/
+  | x  1:007dc284c1f8 A_0
+  |/
+  o  0:d20a80d4def3 base
+  
+  $ hg debugsuccessorssets 'all()'
+  d20a80d4def3
+      d20a80d4def3
+  007dc284c1f8
+      01f36c5a8fda
+  82623d38b9ba
+      01f36c5a8fda
+  392fd25390da
+      01f36c5a8fda
+  01f36c5a8fda
+      01f36c5a8fda
+  $ hg log -r 'divergent()'
+  $ cd ..
+
+split is not divergences
+-----------------------------
+
+  $ newcase split
+  $ hg debugobsolete `getid A_0` `getid A_1` `getid A_2`
+  $ hg log -G --hidden
+  o  3:392fd25390da A_2
+  |
+  | o  2:82623d38b9ba A_1
+  |/
+  | x  1:007dc284c1f8 A_0
+  |/
+  @  0:d20a80d4def3 base
+  
+  $ hg debugsuccessorssets 'all()'
+  d20a80d4def3
+      d20a80d4def3
+  007dc284c1f8
+      82623d38b9ba 392fd25390da
+  82623d38b9ba
+      82623d38b9ba
+  392fd25390da
+      392fd25390da
+  $ hg log -r 'divergent()'
+
+Even when subsequente rewriting happen
+
+  $ mkcommit A_3
+  created new head
+  $ hg debugobsolete `getid A_1` `getid A_3`
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit A_4
+  created new head
+  $ hg debugobsolete `getid A_2` `getid A_4`
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit A_5
+  created new head
+  $ hg debugobsolete `getid A_4` `getid A_5`
+  $ hg log -G --hidden
+  @  6:e442cfc57690 A_5
+  |
+  | x  5:6a411f0d7a0a A_4
+  |/
+  | o  4:01f36c5a8fda A_3
+  |/
+  | x  3:392fd25390da A_2
+  |/
+  | x  2:82623d38b9ba A_1
+  |/
+  | x  1:007dc284c1f8 A_0
+  |/
+  o  0:d20a80d4def3 base
+  
+  $ hg debugsuccessorssets 'all()'
+  d20a80d4def3
+      d20a80d4def3
+  007dc284c1f8
+      01f36c5a8fda e442cfc57690
+  82623d38b9ba
+      01f36c5a8fda
+  392fd25390da
+      e442cfc57690
+  01f36c5a8fda
+      01f36c5a8fda
+  6a411f0d7a0a
+      e442cfc57690
+  e442cfc57690
+      e442cfc57690
+  $ hg log -r 'divergent()'
+
+Check more complexe obsolescence graft (with divergence)
+
+  $ mkcommit B_0; hg up 0
+  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ hg debugobsolete `getid B_0` `getid A_2`
+  $ mkcommit A_7; hg up 0
+  created new head
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit A_8; hg up 0
+  created new head
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg debugobsolete `getid A_5` `getid A_7` `getid A_8`
+  $ mkcommit A_9; hg up 0
+  created new head
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg debugobsolete `getid A_5` `getid A_9`
+  $ hg log -G --hidden
+  o  10:bed64f5d2f5a A_9
+  |
+  | o  9:14608b260df8 A_8
+  |/
+  | o  8:7ae126973a96 A_7
+  |/
+  | x  7:3750ebee865d B_0
+  | |
+  | x  6:e442cfc57690 A_5
+  |/
+  | x  5:6a411f0d7a0a A_4
+  |/
+  | o  4:01f36c5a8fda A_3
+  |/
+  | x  3:392fd25390da A_2
+  |/
+  | x  2:82623d38b9ba A_1
+  |/
+  | x  1:007dc284c1f8 A_0
+  |/
+  @  0:d20a80d4def3 base
+  
+  $ hg debugsuccessorssets 'all()'
+  d20a80d4def3
+      d20a80d4def3
+  007dc284c1f8
+      01f36c5a8fda bed64f5d2f5a
+      01f36c5a8fda 7ae126973a96 14608b260df8
+  82623d38b9ba
+      01f36c5a8fda
+  392fd25390da
+      bed64f5d2f5a
+      7ae126973a96 14608b260df8
+  01f36c5a8fda
+      01f36c5a8fda
+  6a411f0d7a0a
+      bed64f5d2f5a
+      7ae126973a96 14608b260df8
+  e442cfc57690
+      bed64f5d2f5a
+      7ae126973a96 14608b260df8
+  3750ebee865d
+      bed64f5d2f5a
+      7ae126973a96 14608b260df8
+  7ae126973a96
+      7ae126973a96
+  14608b260df8
+      14608b260df8
+  bed64f5d2f5a
+      bed64f5d2f5a
+  $ hg log -r 'divergent()'
+  4:01f36c5a8fda A_3
+  8:7ae126973a96 A_7
+  9:14608b260df8 A_8
+  10:bed64f5d2f5a A_9
+
+fix the divergence
+
+  $ mkcommit A_A; hg up 0
+  created new head
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg debugobsolete `getid A_9` `getid A_A`
+  $ hg debugobsolete `getid A_7` `getid A_A`
+  $ hg debugobsolete `getid A_8` `getid A_A`
+  $ hg log -G --hidden
+  o  11:a139f71be9da A_A
+  |
+  | x  10:bed64f5d2f5a A_9
+  |/
+  | x  9:14608b260df8 A_8
+  |/
+  | x  8:7ae126973a96 A_7
+  |/
+  | x  7:3750ebee865d B_0
+  | |
+  | x  6:e442cfc57690 A_5
+  |/
+  | x  5:6a411f0d7a0a A_4
+  |/
+  | o  4:01f36c5a8fda A_3
+  |/
+  | x  3:392fd25390da A_2
+  |/
+  | x  2:82623d38b9ba A_1
+  |/
+  | x  1:007dc284c1f8 A_0
+  |/
+  @  0:d20a80d4def3 base
+  
+  $ hg debugsuccessorssets 'all()'
+  d20a80d4def3
+      d20a80d4def3
+  007dc284c1f8
+      01f36c5a8fda a139f71be9da
+  82623d38b9ba
+      01f36c5a8fda
+  392fd25390da
+      a139f71be9da
+  01f36c5a8fda
+      01f36c5a8fda
+  6a411f0d7a0a
+      a139f71be9da
+  e442cfc57690
+      a139f71be9da
+  3750ebee865d
+      a139f71be9da
+  7ae126973a96
+      a139f71be9da
+  14608b260df8
+      a139f71be9da
+  bed64f5d2f5a
+      a139f71be9da
+  a139f71be9da
+      a139f71be9da
+  $ hg log -r 'divergent()'
+
+  $ cd ..
+
+
+Subset does not diverge
+------------------------------
+
+Do not report divergent successors-set if it is a subset of another
+successors-set. (report [A,B] not [A] + [A,B])
+
+  $ newcase subset
+  $ hg debugobsolete `getid A_0` `getid A_2`
+  $ hg debugobsolete `getid A_0` `getid A_1` `getid A_2`
+  $ hg debugsuccessorssets 'desc('A_0')'
+  007dc284c1f8
+      82623d38b9ba 392fd25390da
+
+  $ cd ..
--- a/tests/test-obsolete.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-obsolete.t	Fri Dec 28 14:13:06 2012 +0100
@@ -173,6 +173,14 @@
 
 And that we can't push bumped changeset
 
+  $ hg push ../tmpa -r 0 --force #(make repo related)
+  pushing to ../tmpa
+  searching for changes
+  warning: repository is unrelated
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
   $ hg push ../tmpa
   pushing to ../tmpa
   searching for changes
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-pathencode.py	Fri Dec 28 14:13:06 2012 +0100
@@ -0,0 +1,198 @@
+# This is a randomized test that generates different pathnames every
+# time it is invoked, and tests the encoding of those pathnames.
+#
+# It uses a simple probabilistic model to generate valid pathnames
+# that have proven likely to expose bugs and divergent behaviour in
+# different encoding implementations.
+
+from mercurial import parsers
+from mercurial import store
+import binascii, itertools, math, os, random, sys, time
+import collections
+
+if sys.version_info[:2] < (2, 6):
+    sys.exit(0)
+
+validchars = set(map(chr, range(0, 256)))
+alphanum = range(ord('A'), ord('Z'))
+
+for c in '\0/':
+    validchars.remove(c)
+
+winreserved = ('aux con prn nul'.split() +
+               ['com%d' % i for i in xrange(1, 10)] +
+               ['lpt%d' % i for i in xrange(1, 10)])
+
+def casecombinations(names):
+    '''Build all case-diddled combinations of names.'''
+
+    combos = set()
+
+    for r in names:
+        for i in xrange(len(r) + 1):
+            for c in itertools.combinations(xrange(len(r)), i):
+                d = r
+                for j in c:
+                    d = ''.join((d[:j], d[j].upper(), d[j + 1:]))
+                combos.add(d)
+    return sorted(combos)
+
+def buildprobtable(fp, cmd='hg manifest tip'):
+    '''Construct and print a table of probabilities for path name
+    components.  The numbers are percentages.'''
+
+    counts = collections.defaultdict(lambda: 0)
+    for line in os.popen(cmd).read().splitlines():
+        if line[-2:] in ('.i', '.d'):
+            line = line[:-2]
+        if line.startswith('data/'):
+            line = line[5:]
+        for c in line:
+            counts[c] += 1
+    for c in '\r/\n':
+        counts.pop(c, None)
+    t = sum(counts.itervalues()) / 100.0
+    fp.write('probtable = (')
+    for i, (k, v) in enumerate(sorted(counts.iteritems(), key=lambda x: x[1],
+                                      reverse=True)):
+        if (i % 5) == 0:
+            fp.write('\n    ')
+        vt = v / t
+        if vt < 0.0005:
+            break
+        fp.write('(%r, %.03f), ' % (k, vt))
+    fp.write('\n    )\n')
+
+# A table of character frequencies (as percentages), gleaned by
+# looking at filelog names from a real-world, very large repo.
+
+probtable = (
+    ('t', 9.828), ('e', 9.042), ('s', 8.011), ('a', 6.801), ('i', 6.618),
+    ('g', 5.053), ('r', 5.030), ('o', 4.887), ('p', 4.363), ('n', 4.258),
+    ('l', 3.830), ('h', 3.693), ('_', 3.659), ('.', 3.377), ('m', 3.194),
+    ('u', 2.364), ('d', 2.296), ('c', 2.163), ('b', 1.739), ('f', 1.625),
+    ('6', 0.666), ('j', 0.610), ('y', 0.554), ('x', 0.487), ('w', 0.477),
+    ('k', 0.476), ('v', 0.473), ('3', 0.336), ('1', 0.335), ('2', 0.326),
+    ('4', 0.310), ('5', 0.305), ('9', 0.302), ('8', 0.300), ('7', 0.299),
+    ('q', 0.298), ('0', 0.250), ('z', 0.223), ('-', 0.118), ('C', 0.095),
+    ('T', 0.087), ('F', 0.085), ('B', 0.077), ('S', 0.076), ('P', 0.076),
+    ('L', 0.059), ('A', 0.058), ('N', 0.051), ('D', 0.049), ('M', 0.046),
+    ('E', 0.039), ('I', 0.035), ('R', 0.035), ('G', 0.028), ('U', 0.026),
+    ('W', 0.025), ('O', 0.017), ('V', 0.015), ('H', 0.013), ('Q', 0.011),
+    ('J', 0.007), ('K', 0.005), ('+', 0.004), ('X', 0.003), ('Y', 0.001),
+    )
+
+for c, _ in probtable:
+    validchars.remove(c)
+validchars = list(validchars)
+
+def pickfrom(rng, table):
+    c = 0
+    r = rng.random() * sum(i[1] for i in table)
+    for i, p in table:
+        c += p
+        if c >= r:
+            return i
+
+reservedcombos = casecombinations(winreserved)
+
+# The first component of a name following a slash.
+
+firsttable = (
+    (lambda rng: pickfrom(rng, probtable), 90),
+    (lambda rng: rng.choice(validchars), 5),
+    (lambda rng: rng.choice(reservedcombos), 5),
+    )
+
+# Components of a name following the first.
+
+resttable = firsttable[:-1]
+
+# Special suffixes.
+
+internalsuffixcombos = casecombinations('.hg .i .d'.split())
+
+# The last component of a path, before a slash or at the end of a name.
+
+lasttable = resttable + (
+    (lambda rng: '', 95),
+    (lambda rng: rng.choice(internalsuffixcombos), 5),
+    )
+
+def makepart(rng, k):
+    '''Construct a part of a pathname, without slashes.'''
+
+    p = pickfrom(rng, firsttable)(rng)
+    l = len(p)
+    ps = [p]
+    while l <= k:
+        p = pickfrom(rng, resttable)(rng)
+        l += len(p)
+        ps.append(p)
+    ps.append(pickfrom(rng, lasttable)(rng))
+    return ''.join(ps)
+
+def makepath(rng, j, k):
+    '''Construct a complete pathname.'''
+
+    return ('data/' + '/'.join(makepart(rng, k) for _ in xrange(j)) +
+            rng.choice(['.d', '.i']))
+
+def genpath(rng, count):
+    '''Generate random pathnames with gradually increasing lengths.'''
+
+    mink, maxk = 1, 4096
+    def steps():
+        x, k = 0, mink
+        for i in xrange(count):
+            yield mink + int(round(math.sqrt((maxk - mink) * float(i) / count)))
+    for k in steps():
+        x = rng.randint(1, k)
+        y = rng.randint(1, k)
+        yield makepath(rng, x, y)
+
+def runtests(rng, seed, count):
+    nerrs = 0
+    for p in genpath(rng, count):
+        h = store._dothybridencode(p)    # uses C implementation, if available
+        r = store._hybridencode(p, True) # reference implementation in Python
+        if h != r:
+            if nerrs == 0:
+                print >> sys.stderr, 'seed:', hex(seed)[:-1]
+            print >> sys.stderr, "\np: '%s'" % p.encode("string_escape")
+            print >> sys.stderr, "h: '%s'" % h.encode("string_escape")
+            print >> sys.stderr, "r: '%s'" % r.encode("string_escape")
+            nerrs += 1
+    return nerrs
+
+def main():
+    import getopt
+
+    # Empirically observed to take about a second to run
+    count = 100
+    seed = None
+    opts, args = getopt.getopt(sys.argv[1:], 'c:s:',
+                               ['build', 'count=', 'seed='])
+    for o, a in opts:
+        if o in ('-c', '--count'):
+            count = int(a)
+        elif o in ('-s', '--seed'):
+            seed = long(a, base=0) # accepts base 10 or 16 strings
+        elif o == '--build':
+            buildprobtable(sys.stdout,
+                           'find .hg/store/data -type f && '
+                           'cat .hg/store/fncache 2>/dev/null')
+            sys.exit(0)
+
+    if seed is None:
+        try:
+            seed = long(binascii.hexlify(os.urandom(16)), 16)
+        except AttributeError:
+            seed = long(time.time() * 1000)
+
+    rng = random.Random(seed)
+    if runtests(rng, seed, count):
+        sys.exit(1)
+
+if __name__ == '__main__':
+    main()
--- a/tests/test-rebase-rename.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-rebase-rename.t	Fri Dec 28 14:13:06 2012 +0100
@@ -20,7 +20,10 @@
   $ hg ci -Am B
   adding b
 
-  $ hg up -q -C 0
+  $ hg mv b b-renamed
+  $ hg ci -m 'rename B'
+
+  $ hg up -q -C 1
 
   $ hg mv a a-renamed
 
@@ -28,28 +31,32 @@
   created new head
 
   $ hg tglog
-  @  2: 'rename A'
+  @  3: 'rename A'
   |
-  | o  1: 'B'
+  | o  2: 'rename B'
   |/
+  o  1: 'B'
+  |
   o  0: 'A'
   
 
 Rename is tracked:
 
   $ hg tlog -p --git -r tip
-  2: 'rename A' 
+  3: 'rename A' 
   diff --git a/a b/a-renamed
   rename from a
   rename to a-renamed
   
 Rebase the revision containing the rename:
 
-  $ hg rebase -s 2 -d 1
+  $ hg rebase -s 3 -d 2
   saved backup bundle to $TESTTMP/a/.hg/strip-backup/*-backup.hg (glob)
 
   $ hg tglog
-  @  2: 'rename A'
+  @  3: 'rename A'
+  |
+  o  2: 'rename B'
   |
   o  1: 'B'
   |
@@ -59,11 +66,32 @@
 Rename is not lost:
 
   $ hg tlog -p --git -r tip
-  2: 'rename A' 
+  3: 'rename A' 
   diff --git a/a b/a-renamed
   rename from a
   rename to a-renamed
   
+
+Rebased revision does not contain information about b (issue3739)
+
+  $ hg log -r 3 --debug
+  changeset:   3:3b905b1064f14ace3ad02353b79dd42d32981655
+  tag:         tip
+  phase:       draft
+  parent:      2:920a371a5635af23a26a011ca346cecd1cfcb942
+  parent:      -1:0000000000000000000000000000000000000000
+  manifest:    3:c4a62b2b64593c8fe0523d4c1ba2e243a8bd4dce
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  files+:      a-renamed
+  files-:      a
+  extra:       branch=default
+  extra:       rebase_source=89af05cb38a281f891c6f5581dd027092da29166
+  description:
+  rename A
+  
+  
+
   $ cd ..
 
 
@@ -78,47 +106,75 @@
   $ hg ci -Am B
   adding b
 
-  $ hg up -q -C 0
+  $ hg cp b b-copied
+  $ hg ci -Am 'copy B'
+
+  $ hg up -q -C 1
 
   $ hg cp a a-copied
   $ hg ci -m 'copy A'
   created new head
 
   $ hg tglog
-  @  2: 'copy A'
+  @  3: 'copy A'
   |
-  | o  1: 'B'
+  | o  2: 'copy B'
   |/
+  o  1: 'B'
+  |
   o  0: 'A'
   
 Copy is tracked:
 
   $ hg tlog -p --git -r tip
-  2: 'copy A' 
+  3: 'copy A' 
   diff --git a/a b/a-copied
   copy from a
   copy to a-copied
   
 Rebase the revision containing the copy:
 
-  $ hg rebase -s 2 -d 1
+  $ hg rebase -s 3 -d 2
   saved backup bundle to $TESTTMP/b/.hg/strip-backup/*-backup.hg (glob)
 
   $ hg tglog
-  @  2: 'copy A'
+  @  3: 'copy A'
+  |
+  o  2: 'copy B'
   |
   o  1: 'B'
   |
   o  0: 'A'
   
+
 Copy is not lost:
 
   $ hg tlog -p --git -r tip
-  2: 'copy A' 
+  3: 'copy A' 
   diff --git a/a b/a-copied
   copy from a
   copy to a-copied
   
+
+Rebased revision does not contain information about b (issue3739)
+
+  $ hg log -r 3 --debug
+  changeset:   3:98f6e6dbf45ab54079c2237fbd11066a5c41a11d
+  tag:         tip
+  phase:       draft
+  parent:      2:39e588434882ff77d01229d169cdc77f29e8855e
+  parent:      -1:0000000000000000000000000000000000000000
+  manifest:    3:2232f329d66fffe3930d43479ae624f66322b04d
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  files+:      a-copied
+  extra:       branch=default
+  extra:       rebase_source=0a8162ff18a8900df8df8ef7ac0046955205613e
+  description:
+  copy A
+  
+  
+
   $ cd ..
 
 
--- a/tests/test-remove.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-remove.t	Fri Dec 28 14:13:06 2012 +0100
@@ -107,7 +107,7 @@
   $ echo b > bar
   $ hg add bar
   $ remove -A bar
-  not removing bar: file still exists (use -f to force removal)
+  not removing bar: file still exists
   exit code: 1
   A bar
   ./bar
@@ -117,7 +117,7 @@
 21 state clean, options -A
 
   $ remove -A foo
-  not removing foo: file still exists (use -f to force removal)
+  not removing foo: file still exists
   exit code: 1
   ? bar
   ./bar
@@ -128,7 +128,7 @@
 
   $ echo b >> foo
   $ remove -A foo
-  not removing foo: file still exists (use -f to force removal)
+  not removing foo: file still exists
   exit code: 1
   M foo
   ? bar
@@ -220,7 +220,7 @@
 
   $ rm test/bar
   $ remove -A test
-  not removing test/foo: file still exists (use -f to force removal) (glob)
+  not removing test/foo: file still exists (glob)
   removing test/bar (glob)
   exit code: 1
   R test/bar
--- a/tests/test-rename-dir-merge.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-rename-dir-merge.t	Fri Dec 28 14:13:06 2012 +0100
@@ -31,11 +31,11 @@
      b/a
      b/b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     b/a -> a/a 
-     b/b -> a/b 
+     src: 'a/a' -> dst: 'b/a' 
+     src: 'a/b' -> dst: 'b/b' 
     checking for directory renames
-    dir a/ -> b/
-    file a/c -> b/c
+     discovered dir src: 'a/' -> dst: 'b/'
+     pending file src: 'a/c' -> dst: 'b/c'
   resolving manifests
    overwrite: False, partial: False
    ancestor: f9b20c0d4c51, local: ce36d17b18fb+, remote: 397f8b00a740
@@ -82,11 +82,11 @@
     unmatched files in other:
      a/c
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     b/a -> a/a 
-     b/b -> a/b 
+     src: 'a/a' -> dst: 'b/a' 
+     src: 'a/b' -> dst: 'b/b' 
     checking for directory renames
-    dir a/ -> b/
-    file a/c -> b/c
+     discovered dir src: 'a/' -> dst: 'b/'
+     pending file src: 'a/c' -> dst: 'b/c'
   resolving manifests
    overwrite: False, partial: False
    ancestor: f9b20c0d4c51, local: 397f8b00a740+, remote: ce36d17b18fb
--- a/tests/test-rename-merge1.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-rename-merge1.t	Fri Dec 28 14:13:06 2012 +0100
@@ -29,9 +29,9 @@
      b
      b2
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     c2 -> a2 !
-     b -> a *
-     b2 -> a2 !
+     src: 'a2' -> dst: 'c2' !
+     src: 'a' -> dst: 'b' *
+     src: 'a2' -> dst: 'b2' !
     checking for directory renames
    a2: divergent renames -> dr
   resolving manifests
@@ -176,7 +176,7 @@
     unmatched files in other:
      newfile
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     newfile -> file %
+     src: 'file' -> dst: 'newfile' %
     checking for directory renames
    file: rename and delete -> rd
   resolving manifests
--- a/tests/test-rename-merge2.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-rename-merge2.t	Fri Dec 28 14:13:06 2012 +0100
@@ -81,7 +81,7 @@
     unmatched files in other:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     b -> a *
+     src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    overwrite: False, partial: False
@@ -116,7 +116,7 @@
     unmatched files in local:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     b -> a *
+     src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    overwrite: False, partial: False
@@ -154,7 +154,7 @@
     unmatched files in other:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     b -> a *
+     src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    overwrite: False, partial: False
@@ -189,7 +189,7 @@
     unmatched files in local:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     b -> a *
+     src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    overwrite: False, partial: False
@@ -223,7 +223,7 @@
     unmatched files in other:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     b -> a 
+     src: 'a' -> dst: 'b' 
     checking for directory renames
   resolving manifests
    overwrite: False, partial: False
@@ -253,7 +253,7 @@
     unmatched files in local:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     b -> a 
+     src: 'a' -> dst: 'b' 
     checking for directory renames
   resolving manifests
    overwrite: False, partial: False
@@ -280,7 +280,7 @@
     unmatched files in other:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     b -> a 
+     src: 'a' -> dst: 'b' 
     checking for directory renames
   resolving manifests
    overwrite: False, partial: False
@@ -312,7 +312,7 @@
     unmatched files in local:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     b -> a 
+     src: 'a' -> dst: 'b' 
     checking for directory renames
   resolving manifests
    overwrite: False, partial: False
@@ -370,8 +370,8 @@
     unmatched files in other:
      c
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     c -> a !
-     b -> a !
+     src: 'a' -> dst: 'c' !
+     src: 'a' -> dst: 'b' !
     checking for directory renames
    a: divergent renames -> dr
   resolving manifests
@@ -649,7 +649,7 @@
     unmatched files in other:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     b -> a *
+     src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    overwrite: False, partial: False
@@ -683,7 +683,7 @@
     unmatched files in local:
      b
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     b -> a *
+     src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    overwrite: False, partial: False
@@ -721,7 +721,7 @@
     unmatched files in other:
      c
     all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     b -> a *
+     src: 'a' -> dst: 'b' *
     checking for directory renames
   resolving manifests
    overwrite: False, partial: False
--- a/tests/test-revlog-ancestry.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-revlog-ancestry.py	Fri Dec 28 14:13:06 2012 +0100
@@ -62,6 +62,14 @@
     for r in repo.changelog.ancestors([7], 6):
         print r,
 
+    print '\nAncestors of 7, including revs'
+    for r in repo.changelog.ancestors([7], inclusive=True):
+        print r,
+
+    print '\nAncestors of 7, 5 and 3, including revs'
+    for r in repo.changelog.ancestors([7, 5, 3], inclusive=True):
+        print r,
+
     # Descendants
     print '\n\nDescendants of 5'
     for r in repo.changelog.descendants([5]):
--- a/tests/test-revlog-ancestry.py.out	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-revlog-ancestry.py.out	Fri Dec 28 14:13:06 2012 +0100
@@ -6,6 +6,10 @@
 4 2 0 
 Ancestors of 7, stop at 6
 6 
+Ancestors of 7, including revs
+7 6 5 3 4 2 1 0 
+Ancestors of 7, 5 and 3, including revs
+7 5 3 6 4 2 1 0 
 
 Descendants of 5
 7 8 
--- a/tests/test-status-color.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-status-color.t	Fri Dec 28 14:13:06 2012 +0100
@@ -15,100 +15,100 @@
 hg status in repo root:
 
   $ hg status --color=always
-  \x1b[0;35;1;4m? a/1/in_a_1\x1b[0m (esc)
-  \x1b[0;35;1;4m? a/in_a\x1b[0m (esc)
-  \x1b[0;35;1;4m? b/1/in_b_1\x1b[0m (esc)
-  \x1b[0;35;1;4m? b/2/in_b_2\x1b[0m (esc)
-  \x1b[0;35;1;4m? b/in_b\x1b[0m (esc)
-  \x1b[0;35;1;4m? in_root\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
 
 hg status . in repo root:
 
   $ hg status --color=always .
-  \x1b[0;35;1;4m? a/1/in_a_1\x1b[0m (esc)
-  \x1b[0;35;1;4m? a/in_a\x1b[0m (esc)
-  \x1b[0;35;1;4m? b/1/in_b_1\x1b[0m (esc)
-  \x1b[0;35;1;4m? b/2/in_b_2\x1b[0m (esc)
-  \x1b[0;35;1;4m? b/in_b\x1b[0m (esc)
-  \x1b[0;35;1;4m? in_root\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
 
   $ hg status --color=always --cwd a
-  \x1b[0;35;1;4m? a/1/in_a_1\x1b[0m (esc)
-  \x1b[0;35;1;4m? a/in_a\x1b[0m (esc)
-  \x1b[0;35;1;4m? b/1/in_b_1\x1b[0m (esc)
-  \x1b[0;35;1;4m? b/2/in_b_2\x1b[0m (esc)
-  \x1b[0;35;1;4m? b/in_b\x1b[0m (esc)
-  \x1b[0;35;1;4m? in_root\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
   $ hg status --color=always --cwd a .
-  \x1b[0;35;1;4m? 1/in_a_1\x1b[0m (esc)
-  \x1b[0;35;1;4m? in_a\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_a_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a\x1b[0m (esc)
   $ hg status --color=always --cwd a ..
-  \x1b[0;35;1;4m? 1/in_a_1\x1b[0m (esc)
-  \x1b[0;35;1;4m? in_a\x1b[0m (esc)
-  \x1b[0;35;1;4m? ../b/1/in_b_1\x1b[0m (esc)
-  \x1b[0;35;1;4m? ../b/2/in_b_2\x1b[0m (esc)
-  \x1b[0;35;1;4m? ../b/in_b\x1b[0m (esc)
-  \x1b[0;35;1;4m? ../in_root\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_a_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../b/1/in_b_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../b/2/in_b_2\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../b/in_b\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_root\x1b[0m (esc)
 
   $ hg status --color=always --cwd b
-  \x1b[0;35;1;4m? a/1/in_a_1\x1b[0m (esc)
-  \x1b[0;35;1;4m? a/in_a\x1b[0m (esc)
-  \x1b[0;35;1;4m? b/1/in_b_1\x1b[0m (esc)
-  \x1b[0;35;1;4m? b/2/in_b_2\x1b[0m (esc)
-  \x1b[0;35;1;4m? b/in_b\x1b[0m (esc)
-  \x1b[0;35;1;4m? in_root\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
   $ hg status --color=always --cwd b .
-  \x1b[0;35;1;4m? 1/in_b_1\x1b[0m (esc)
-  \x1b[0;35;1;4m? 2/in_b_2\x1b[0m (esc)
-  \x1b[0;35;1;4m? in_b\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_b_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m2/in_b_2\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b\x1b[0m (esc)
   $ hg status --color=always --cwd b ..
-  \x1b[0;35;1;4m? ../a/1/in_a_1\x1b[0m (esc)
-  \x1b[0;35;1;4m? ../a/in_a\x1b[0m (esc)
-  \x1b[0;35;1;4m? 1/in_b_1\x1b[0m (esc)
-  \x1b[0;35;1;4m? 2/in_b_2\x1b[0m (esc)
-  \x1b[0;35;1;4m? in_b\x1b[0m (esc)
-  \x1b[0;35;1;4m? ../in_root\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../a/1/in_a_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../a/in_a\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_b_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m2/in_b_2\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_root\x1b[0m (esc)
 
   $ hg status --color=always --cwd a/1
-  \x1b[0;35;1;4m? a/1/in_a_1\x1b[0m (esc)
-  \x1b[0;35;1;4m? a/in_a\x1b[0m (esc)
-  \x1b[0;35;1;4m? b/1/in_b_1\x1b[0m (esc)
-  \x1b[0;35;1;4m? b/2/in_b_2\x1b[0m (esc)
-  \x1b[0;35;1;4m? b/in_b\x1b[0m (esc)
-  \x1b[0;35;1;4m? in_root\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
   $ hg status --color=always --cwd a/1 .
-  \x1b[0;35;1;4m? in_a_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a_1\x1b[0m (esc)
   $ hg status --color=always --cwd a/1 ..
-  \x1b[0;35;1;4m? in_a_1\x1b[0m (esc)
-  \x1b[0;35;1;4m? ../in_a\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_a\x1b[0m (esc)
 
   $ hg status --color=always --cwd b/1
-  \x1b[0;35;1;4m? a/1/in_a_1\x1b[0m (esc)
-  \x1b[0;35;1;4m? a/in_a\x1b[0m (esc)
-  \x1b[0;35;1;4m? b/1/in_b_1\x1b[0m (esc)
-  \x1b[0;35;1;4m? b/2/in_b_2\x1b[0m (esc)
-  \x1b[0;35;1;4m? b/in_b\x1b[0m (esc)
-  \x1b[0;35;1;4m? in_root\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
   $ hg status --color=always --cwd b/1 .
-  \x1b[0;35;1;4m? in_b_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_1\x1b[0m (esc)
   $ hg status --color=always --cwd b/1 ..
-  \x1b[0;35;1;4m? in_b_1\x1b[0m (esc)
-  \x1b[0;35;1;4m? ../2/in_b_2\x1b[0m (esc)
-  \x1b[0;35;1;4m? ../in_b\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../2/in_b_2\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_b\x1b[0m (esc)
 
   $ hg status --color=always --cwd b/2
-  \x1b[0;35;1;4m? a/1/in_a_1\x1b[0m (esc)
-  \x1b[0;35;1;4m? a/in_a\x1b[0m (esc)
-  \x1b[0;35;1;4m? b/1/in_b_1\x1b[0m (esc)
-  \x1b[0;35;1;4m? b/2/in_b_2\x1b[0m (esc)
-  \x1b[0;35;1;4m? b/in_b\x1b[0m (esc)
-  \x1b[0;35;1;4m? in_root\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
   $ hg status --color=always --cwd b/2 .
-  \x1b[0;35;1;4m? in_b_2\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_2\x1b[0m (esc)
   $ hg status --color=always --cwd b/2 ..
-  \x1b[0;35;1;4m? ../1/in_b_1\x1b[0m (esc)
-  \x1b[0;35;1;4m? in_b_2\x1b[0m (esc)
-  \x1b[0;35;1;4m? ../in_b\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../1/in_b_1\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_2\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_b\x1b[0m (esc)
   $ cd ..
 
   $ hg init repo2
@@ -128,59 +128,59 @@
 hg status:
 
   $ hg status --color=always
-  \x1b[0;32;1mA added\x1b[0m (esc)
-  \x1b[0;31;1mR removed\x1b[0m (esc)
-  \x1b[0;36;1;4m! deleted\x1b[0m (esc)
-  \x1b[0;35;1;4m? unknown\x1b[0m (esc)
+  \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
+  \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc)
+  \x1b[0;36;1;4m! \x1b[0m\x1b[0;36;1;4mdeleted\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4munknown\x1b[0m (esc)
 
 hg status modified added removed deleted unknown never-existed ignored:
 
   $ hg status --color=always modified added removed deleted unknown never-existed ignored
   never-existed: * (glob)
-  \x1b[0;32;1mA added\x1b[0m (esc)
-  \x1b[0;31;1mR removed\x1b[0m (esc)
-  \x1b[0;36;1;4m! deleted\x1b[0m (esc)
-  \x1b[0;35;1;4m? unknown\x1b[0m (esc)
+  \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
+  \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc)
+  \x1b[0;36;1;4m! \x1b[0m\x1b[0;36;1;4mdeleted\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4munknown\x1b[0m (esc)
 
   $ hg copy modified copied
 
 hg status -C:
 
   $ hg status --color=always -C
-  \x1b[0;32;1mA added\x1b[0m (esc)
-  \x1b[0;32;1mA copied\x1b[0m (esc)
+  \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
+  \x1b[0;32;1mA \x1b[0m\x1b[0;32;1mcopied\x1b[0m (esc)
   \x1b[0;0m  modified\x1b[0m (esc)
-  \x1b[0;31;1mR removed\x1b[0m (esc)
-  \x1b[0;36;1;4m! deleted\x1b[0m (esc)
-  \x1b[0;35;1;4m? unknown\x1b[0m (esc)
+  \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc)
+  \x1b[0;36;1;4m! \x1b[0m\x1b[0;36;1;4mdeleted\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4munknown\x1b[0m (esc)
 
 hg status -A:
 
   $ hg status --color=always -A
-  \x1b[0;32;1mA added\x1b[0m (esc)
-  \x1b[0;32;1mA copied\x1b[0m (esc)
+  \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
+  \x1b[0;32;1mA \x1b[0m\x1b[0;32;1mcopied\x1b[0m (esc)
   \x1b[0;0m  modified\x1b[0m (esc)
-  \x1b[0;31;1mR removed\x1b[0m (esc)
-  \x1b[0;36;1;4m! deleted\x1b[0m (esc)
-  \x1b[0;35;1;4m? unknown\x1b[0m (esc)
-  \x1b[0;30;1mI ignored\x1b[0m (esc)
-  \x1b[0;0mC .hgignore\x1b[0m (esc)
-  \x1b[0;0mC modified\x1b[0m (esc)
+  \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc)
+  \x1b[0;36;1;4m! \x1b[0m\x1b[0;36;1;4mdeleted\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4munknown\x1b[0m (esc)
+  \x1b[0;30;1mI \x1b[0m\x1b[0;30;1mignored\x1b[0m (esc)
+  \x1b[0;0mC \x1b[0m\x1b[0;0m.hgignore\x1b[0m (esc)
+  \x1b[0;0mC \x1b[0m\x1b[0;0mmodified\x1b[0m (esc)
 
 hg status -A (with terminfo color):
 
   $ mkdir "$TESTTMP/terminfo"
   $ TERMINFO="$TESTTMP/terminfo" tic "$TESTDIR/hgterm.ti"
   $ TERM=hgterm TERMINFO="$TESTTMP/terminfo" hg status --config color.mode=terminfo --color=always -A
-  \x1b[30m\x1b[32m\x1b[1mA added\x1b[30m (esc)
-  \x1b[30m\x1b[32m\x1b[1mA copied\x1b[30m (esc)
+  \x1b[30m\x1b[32m\x1b[1mA \x1b[30m\x1b[30m\x1b[32m\x1b[1madded\x1b[30m (esc)
+  \x1b[30m\x1b[32m\x1b[1mA \x1b[30m\x1b[30m\x1b[32m\x1b[1mcopied\x1b[30m (esc)
   \x1b[30m\x1b[30m  modified\x1b[30m (esc)
-  \x1b[30m\x1b[31m\x1b[1mR removed\x1b[30m (esc)
-  \x1b[30m\x1b[36m\x1b[1m\x1b[4m! deleted\x1b[30m (esc)
-  \x1b[30m\x1b[35m\x1b[1m\x1b[4m? unknown\x1b[30m (esc)
-  \x1b[30m\x1b[30m\x1b[1mI ignored\x1b[30m (esc)
-  \x1b[30m\x1b[30mC .hgignore\x1b[30m (esc)
-  \x1b[30m\x1b[30mC modified\x1b[30m (esc)
+  \x1b[30m\x1b[31m\x1b[1mR \x1b[30m\x1b[30m\x1b[31m\x1b[1mremoved\x1b[30m (esc)
+  \x1b[30m\x1b[36m\x1b[1m\x1b[4m! \x1b[30m\x1b[30m\x1b[36m\x1b[1m\x1b[4mdeleted\x1b[30m (esc)
+  \x1b[30m\x1b[35m\x1b[1m\x1b[4m? \x1b[30m\x1b[30m\x1b[35m\x1b[1m\x1b[4munknown\x1b[30m (esc)
+  \x1b[30m\x1b[30m\x1b[1mI \x1b[30m\x1b[30m\x1b[30m\x1b[1mignored\x1b[30m (esc)
+  \x1b[30m\x1b[30mC \x1b[30m\x1b[30m\x1b[30m.hgignore\x1b[30m (esc)
+  \x1b[30m\x1b[30mC \x1b[30m\x1b[30m\x1b[30mmodified\x1b[30m (esc)
 
 
   $ echo "^ignoreddir$" > .hgignore
@@ -194,7 +194,7 @@
 hg status -i ignoreddir/file:
 
   $ hg status --color=always -i ignoreddir/file
-  \x1b[0;30;1mI ignoreddir/file\x1b[0m (esc)
+  \x1b[0;30;1mI \x1b[0m\x1b[0;30;1mignoreddir/file\x1b[0m (esc)
   $ cd ..
 
 check 'status -q' and some combinations
@@ -220,11 +220,11 @@
   $ hg --config color.status.modified=periwinkle status --color=always
   ignoring unknown color/effect 'periwinkle' (configured in color.status.modified)
   M modified
-  \x1b[0;32;1mA added\x1b[0m (esc)
-  \x1b[0;32;1mA copied\x1b[0m (esc)
-  \x1b[0;31;1mR removed\x1b[0m (esc)
-  \x1b[0;36;1;4m! deleted\x1b[0m (esc)
-  \x1b[0;35;1;4m? unknown\x1b[0m (esc)
+  \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
+  \x1b[0;32;1mA \x1b[0m\x1b[0;32;1mcopied\x1b[0m (esc)
+  \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc)
+  \x1b[0;36;1;4m! \x1b[0m\x1b[0;36;1;4mdeleted\x1b[0m (esc)
+  \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4munknown\x1b[0m (esc)
 
 Run status with 2 different flags.
 Check if result is the same or different.
--- a/tests/test-subrepo-git.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-subrepo-git.t	Fri Dec 28 14:13:06 2012 +0100
@@ -331,10 +331,10 @@
   $ hg sum | grep commit
   commit: 1 subrepos
   $ hg push -q
-  abort: subrepo s is missing
+  abort: subrepo s is missing (in subrepo s)
   [255]
   $ hg commit --subrepos -qm missing
-  abort: subrepo s is missing
+  abort: subrepo s is missing (in subrepo s)
   [255]
   $ hg update -C
   cloning subrepo s from $TESTTMP/gitroot
--- a/tests/test-subrepo-recursion.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-subrepo-recursion.t	Fri Dec 28 14:13:06 2012 +0100
@@ -386,7 +386,7 @@
   $ echo f > foo/f
   $ hg archive --subrepos -r tip archive
   cloning subrepo foo from $TESTTMP/empty/foo
-  abort: destination '$TESTTMP/almost-empty/foo' is not empty (glob)
+  abort: destination '$TESTTMP/almost-empty/foo' is not empty (in subrepo foo) (glob)
   [255]
 
 Clone and test outgoing:
--- a/tests/test-subrepo-svn.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-subrepo-svn.t	Fri Dec 28 14:13:06 2012 +0100
@@ -119,7 +119,7 @@
   $ rm s/alpha
   $ hg commit --subrepos -m 'abort on missing file'
   committing subrepository s
-  abort: cannot commit missing svn entries
+  abort: cannot commit missing svn entries (in subrepo s)
   [255]
   $ svn revert s/alpha > /dev/null
 
@@ -180,7 +180,7 @@
   $ echo zzz > s/externals/other
   $ hg ci --subrepos -m 'amend externals from hg'
   committing subrepository s
-  abort: cannot commit svn externals
+  abort: cannot commit svn externals (in subrepo s)
   [255]
   $ hg diff --subrepos -r 1:2 | grep -v diff
   --- a/.hgsubstate	Thu Jan 01 00:00:00 1970 +0000
@@ -202,7 +202,7 @@
   property 'svn:mime-type' set on 's/externals/other' (glob)
   $ hg ci --subrepos -m 'amend externals from hg'
   committing subrepository s
-  abort: cannot commit svn externals
+  abort: cannot commit svn externals (in subrepo s)
   [255]
   $ svn revert -q s/externals/other
 
--- a/tests/test-subrepo.t	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-subrepo.t	Fri Dec 28 14:13:06 2012 +0100
@@ -320,7 +320,7 @@
   no changes found
   pushing subrepo s to $TESTTMP/t/s (glob)
   searching for changes
-  abort: push creates new remote head 12a213df6fa9!
+  abort: push creates new remote head 12a213df6fa9! (in subrepo s)
   (did you forget to merge? use push -f to force)
   [255]
   $ hg push -f
@@ -587,7 +587,7 @@
   created new head
   $ hg -R repo2 ci -m3
   $ hg -q -R repo2 push
-  abort: push creates new remote head cc505f09a8b2!
+  abort: push creates new remote head cc505f09a8b2! (in subrepo s)
   (did you forget to merge? use push -f to force)
   [255]
   $ hg -R repo update
@@ -599,7 +599,7 @@
   $ hg -R repo2 push -f -q
   $ hg -R repo update
   b: untracked file differs
-  abort: untracked files in working directory differ from files in requested revision
+  abort: untracked files in working directory differ from files in requested revision (in subrepo s)
   [255]
 
   $ cat repo/s/b
@@ -645,7 +645,7 @@
   added 2 changesets with 3 changes to 2 files
   (run 'hg update' to get a working copy)
   $ hg -R issue1852b update
-  abort: default path for subrepository sub/repo not found (glob)
+  abort: default path for subrepository not found (in subrepo sub/repo) (glob)
   [255]
 
 Pull -u now doesn't help
@@ -718,6 +718,14 @@
   committing subrepository subrepo-2
   $ hg st subrepo-2/file
 
+Check that share works with subrepo
+  $ hg --config extensions.share= share . ../shared
+  updating working directory
+  cloning subrepo subrepo-2 from $TESTTMP/subrepo-status/subrepo-2
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ test -f ../shared/subrepo-1/.hg/sharedpath
+  [1]
+
 Check hg update --clean
   $ cd $TESTTMP/t
   $ rm -r t/t.orig
--- a/tests/test-ui-color.py	Fri Dec 28 14:10:35 2012 +0100
+++ b/tests/test-ui-color.py	Fri Dec 28 14:13:06 2012 +0100
@@ -5,8 +5,8 @@
 # ensure errors aren't buffered
 testui = color.colorui()
 testui.pushbuffer()
-testui.write('buffered\n')
-testui.warn('warning\n')
+testui.write(('buffered\n'))
+testui.warn(('warning\n'))
 testui.write_err('error\n')
 print repr(testui.popbuffer())