--- a/hgext/fetch.py Sat Jul 21 16:02:09 2007 -0500
+++ b/hgext/fetch.py Sat Jul 21 16:02:10 2007 -0500
@@ -19,7 +19,7 @@
merged, and the result of the merge is committed. Otherwise, the
working directory is updated.'''
- def postincoming(other, modheads):
+ def postincoming(other, modheads, lock, wlock):
if modheads == 0:
return 0
if modheads == 1:
@@ -50,7 +50,7 @@
ui.status(_('new changeset %d:%s merges remote changes '
'with local\n') % (repo.changelog.rev(n),
short(n)))
- def pull():
+ def pull(lock, wlock):
cmdutil.setremoteconfig(ui, opts)
other = hg.repository(ui, ui.expandpath(source))
@@ -61,7 +61,7 @@
elif opts['rev']:
revs = [other.lookup(rev) for rev in opts['rev']]
modheads = repo.pull(other, heads=revs, lock=lock)
- return postincoming(other, modheads)
+ return postincoming(other, modheads, lock, wlock)
parent, p2 = repo.dirstate.parents()
if parent != repo.changelog.tip():
@@ -69,19 +69,19 @@
'(use "hg update" to check out tip)'))
if p2 != nullid:
raise util.Abort(_('outstanding uncommitted merge'))
- wlock = repo.wlock()
- lock = repo.lock()
+ wlock = lock = None
try:
+ wlock = repo.wlock()
+ lock = repo.lock()
mod, add, rem = repo.status(wlock=wlock)[:3]
if mod or add or rem:
raise util.Abort(_('outstanding uncommitted changes'))
if len(repo.heads()) > 1:
raise util.Abort(_('multiple heads in this repository '
'(use "hg heads" and "hg merge" to merge)'))
- return pull()
+ return pull(lock, wlock)
finally:
- lock.release()
- wlock.release()
+ del lock, wlock
cmdtable = {
'fetch':
--- a/hgext/mq.py Sat Jul 21 16:02:09 2007 -0500
+++ b/hgext/mq.py Sat Jul 21 16:02:10 2007 -0500
@@ -439,24 +439,28 @@
def apply(self, repo, series, list=False, update_status=True,
strict=False, patchdir=None, merge=None, wlock=None,
all_files={}):
- if not wlock:
- wlock = repo.wlock()
- lock = repo.lock()
- tr = repo.transaction()
+ lock = tr = None
try:
- ret = self._apply(tr, repo, series, list, update_status,
- strict, patchdir, merge, wlock,
- lock=lock, all_files=all_files)
- tr.close()
- self.save_dirty()
- return ret
- except:
+ if not wlock:
+ wlock = repo.wlock()
+ lock = repo.lock()
+ tr = repo.transaction()
try:
- tr.abort()
- finally:
- repo.invalidate()
- repo.dirstate.invalidate()
- raise
+ ret = self._apply(tr, repo, series, list, update_status,
+ strict, patchdir, merge, wlock,
+ lock=lock, all_files=all_files)
+ tr.close()
+ self.save_dirty()
+ return ret
+ except:
+ try:
+ tr.abort()
+ finally:
+ repo.invalidate()
+ repo.dirstate.invalidate()
+ raise
+ finally:
+ del lock, wlock, tr
def _apply(self, tr, repo, series, list=False, update_status=True,
strict=False, patchdir=None, merge=None, wlock=None,
@@ -616,44 +620,51 @@
commitfiles = m + a + r
self.check_toppatch(repo)
wlock = repo.wlock()
- insert = self.full_series_end()
- if msg:
- n = repo.commit(commitfiles, msg, force=True, wlock=wlock)
- else:
- n = repo.commit(commitfiles,
- "[mq]: %s" % patch, force=True, wlock=wlock)
- if n == None:
- raise util.Abort(_("repo commit failed"))
- self.full_series[insert:insert] = [patch]
- self.applied.append(statusentry(revlog.hex(n), patch))
- self.parse_series()
- self.series_dirty = 1
- self.applied_dirty = 1
- p = self.opener(patch, "w")
- if msg:
- msg = msg + "\n"
- p.write(msg)
- p.close()
- wlock = None
- r = self.qrepo()
- if r: r.add([patch])
- if commitfiles:
- self.refresh(repo, short=True)
- self.removeundo(repo)
+ try:
+ insert = self.full_series_end()
+ if msg:
+ n = repo.commit(commitfiles, msg, force=True, wlock=wlock)
+ else:
+ n = repo.commit(commitfiles,
+ "[mq]: %s" % patch, force=True, wlock=wlock)
+ if n == None:
+ raise util.Abort(_("repo commit failed"))
+ self.full_series[insert:insert] = [patch]
+ self.applied.append(statusentry(revlog.hex(n), patch))
+ self.parse_series()
+ self.series_dirty = 1
+ self.applied_dirty = 1
+ p = self.opener(patch, "w")
+ if msg:
+ msg = msg + "\n"
+ p.write(msg)
+ p.close()
+ wlock = None
+ r = self.qrepo()
+ if r: r.add([patch])
+ if commitfiles:
+ self.refresh(repo, short=True)
+ self.removeundo(repo)
+ finally:
+ del wlock
def strip(self, repo, rev, update=True, backup="all", wlock=None):
- if not wlock:
- wlock = repo.wlock()
- lock = repo.lock()
+ lock = None
+ try:
+ if not wlock:
+ wlock = repo.wlock()
+ lock = repo.lock()
- if update:
- self.check_localchanges(repo, refresh=False)
- urev = self.qparents(repo, rev)
- hg.clean(repo, urev, wlock=wlock)
- repo.dirstate.write()
+ if update:
+ self.check_localchanges(repo, refresh=False)
+ urev = self.qparents(repo, rev)
+ hg.clean(repo, urev, wlock=wlock)
+ repo.dirstate.write()
- self.removeundo(repo)
- repair.strip(self.ui, repo, rev, backup)
+ self.removeundo(repo)
+ repair.strip(self.ui, repo, rev, backup)
+ finally:
+ del lock, wlock
def isapplied(self, patch):
"""returns (index, rev, patch)"""
@@ -740,69 +751,74 @@
mergeq=None, wlock=None):
if not wlock:
wlock = repo.wlock()
- patch = self.lookup(patch)
- # Suppose our series file is: A B C and the current 'top' patch is B.
- # qpush C should be performed (moving forward)
- # qpush B is a NOP (no change)
- # qpush A is an error (can't go backwards with qpush)
- if patch:
- info = self.isapplied(patch)
- if info:
- if info[0] < len(self.applied) - 1:
- raise util.Abort(_("cannot push to a previous patch: %s") %
- patch)
- if info[0] < len(self.series) - 1:
- self.ui.warn(_('qpush: %s is already at the top\n') % patch)
- else:
- self.ui.warn(_('all patches are currently applied\n'))
- return
+ try:
+ patch = self.lookup(patch)
+ # Suppose our series file is: A B C and the current 'top'
+ # patch is B. qpush C should be performed (moving forward)
+ # qpush B is a NOP (no change) qpush A is an error (can't
+ # go backwards with qpush)
+ if patch:
+ info = self.isapplied(patch)
+ if info:
+ if info[0] < len(self.applied) - 1:
+ raise util.Abort(
+ _("cannot push to a previous patch: %s") % patch)
+ if info[0] < len(self.series) - 1:
+ self.ui.warn(
+ _('qpush: %s is already at the top\n') % patch)
+ else:
+ self.ui.warn(_('all patches are currently applied\n'))
+ return
- # Following the above example, starting at 'top' of B:
- # qpush should be performed (pushes C), but a subsequent qpush without
- # an argument is an error (nothing to apply). This allows a loop
- # of "...while hg qpush..." to work as it detects an error when done
- if self.series_end() == len(self.series):
- self.ui.warn(_('patch series already fully applied\n'))
- return 1
- if not force:
- self.check_localchanges(repo)
+ # Following the above example, starting at 'top' of B:
+ # qpush should be performed (pushes C), but a subsequent
+ # qpush without an argument is an error (nothing to
+ # apply). This allows a loop of "...while hg qpush..." to
+ # work as it detects an error when done
+ if self.series_end() == len(self.series):
+ self.ui.warn(_('patch series already fully applied\n'))
+ return 1
+ if not force:
+ self.check_localchanges(repo)
- self.applied_dirty = 1;
- start = self.series_end()
- if start > 0:
- self.check_toppatch(repo)
- if not patch:
- patch = self.series[start]
- end = start + 1
- else:
- end = self.series.index(patch, start) + 1
- s = self.series[start:end]
- all_files = {}
- try:
- if mergeq:
- ret = self.mergepatch(repo, mergeq, s, wlock)
+ self.applied_dirty = 1;
+ start = self.series_end()
+ if start > 0:
+ self.check_toppatch(repo)
+ if not patch:
+ patch = self.series[start]
+ end = start + 1
else:
- ret = self.apply(repo, s, list, wlock=wlock,
- all_files=all_files)
- except:
- self.ui.warn(_('cleaning up working directory...'))
- node = repo.dirstate.parents()[0]
- hg.revert(repo, node, None, wlock)
- unknown = repo.status(wlock=wlock)[4]
- # only remove unknown files that we know we touched or
- # created while patching
- for f in unknown:
- if f in all_files:
- util.unlink(repo.wjoin(f))
- self.ui.warn(_('done\n'))
- raise
- top = self.applied[-1].name
- if ret[0]:
- self.ui.write("Errors during apply, please fix and refresh %s\n" %
- top)
- else:
- self.ui.write("Now at: %s\n" % top)
- return ret[0]
+ end = self.series.index(patch, start) + 1
+ s = self.series[start:end]
+ all_files = {}
+ try:
+ if mergeq:
+ ret = self.mergepatch(repo, mergeq, s, wlock)
+ else:
+ ret = self.apply(repo, s, list, wlock=wlock,
+ all_files=all_files)
+ except:
+ self.ui.warn(_('cleaning up working directory...'))
+ node = repo.dirstate.parents()[0]
+ hg.revert(repo, node, None, wlock)
+ unknown = repo.status(wlock=wlock)[4]
+ # only remove unknown files that we know we touched or
+ # created while patching
+ for f in unknown:
+ if f in all_files:
+ util.unlink(repo.wjoin(f))
+ self.ui.warn(_('done\n'))
+ raise
+ top = self.applied[-1].name
+ if ret[0]:
+ self.ui.write(
+ "Errors during apply, please fix and refresh %s\n" % top)
+ else:
+ self.ui.write("Now at: %s\n" % top)
+ return ret[0]
+ finally:
+ del wlock
def pop(self, repo, patch=None, force=False, update=True, all=False,
wlock=None):
@@ -812,82 +828,85 @@
if not wlock:
wlock = repo.wlock()
- if patch:
- # index, rev, patch
- info = self.isapplied(patch)
- if not info:
- patch = self.lookup(patch)
- info = self.isapplied(patch)
- if not info:
- raise util.Abort(_("patch %s is not applied") % patch)
+ try:
+ if patch:
+ # index, rev, patch
+ info = self.isapplied(patch)
+ if not info:
+ patch = self.lookup(patch)
+ info = self.isapplied(patch)
+ if not info:
+ raise util.Abort(_("patch %s is not applied") % patch)
- if len(self.applied) == 0:
- # Allow qpop -a to work repeatedly,
- # but not qpop without an argument
- self.ui.warn(_("no patches applied\n"))
- return not all
+ if len(self.applied) == 0:
+ # Allow qpop -a to work repeatedly,
+ # but not qpop without an argument
+ self.ui.warn(_("no patches applied\n"))
+ return not all
- if not update:
- parents = repo.dirstate.parents()
- rr = [ revlog.bin(x.rev) for x in self.applied ]
- for p in parents:
- if p in rr:
- self.ui.warn("qpop: forcing dirstate update\n")
- update = True
+ if not update:
+ parents = repo.dirstate.parents()
+ rr = [ revlog.bin(x.rev) for x in self.applied ]
+ for p in parents:
+ if p in rr:
+ self.ui.warn("qpop: forcing dirstate update\n")
+ update = True
- if not force and update:
- self.check_localchanges(repo)
+ if not force and update:
+ self.check_localchanges(repo)
- self.applied_dirty = 1;
- end = len(self.applied)
- if not patch:
- if all:
- popi = 0
+ self.applied_dirty = 1;
+ end = len(self.applied)
+ if not patch:
+ if all:
+ popi = 0
+ else:
+ popi = len(self.applied) - 1
else:
- popi = len(self.applied) - 1
- else:
- popi = info[0] + 1
- if popi >= end:
- self.ui.warn("qpop: %s is already at the top\n" % patch)
- return
- info = [ popi ] + [self.applied[popi].rev, self.applied[popi].name]
+ popi = info[0] + 1
+ if popi >= end:
+ self.ui.warn("qpop: %s is already at the top\n" % patch)
+ return
+ info = [ popi ] + [self.applied[popi].rev, self.applied[popi].name]
- start = info[0]
- rev = revlog.bin(info[1])
+ start = info[0]
+ rev = revlog.bin(info[1])
- # we know there are no local changes, so we can make a simplified
- # form of hg.update.
- if update:
- top = self.check_toppatch(repo)
- qp = self.qparents(repo, rev)
- changes = repo.changelog.read(qp)
- mmap = repo.manifest.read(changes[0])
- m, a, r, d, u = repo.status(qp, top)[:5]
- if d:
- raise util.Abort("deletions found between repo revs")
- for f in m:
- getfile(f, mmap[f])
- for f in r:
- getfile(f, mmap[f])
- util.set_exec(repo.wjoin(f), mmap.execf(f))
- for f in m + r:
- repo.dirstate.normal(f)
- for f in a:
- try:
- os.unlink(repo.wjoin(f))
- except OSError, e:
- if e.errno != errno.ENOENT:
- raise
- try: os.removedirs(os.path.dirname(repo.wjoin(f)))
- except: pass
- repo.dirstate.forget(f)
- repo.dirstate.setparents(qp, revlog.nullid)
- self.strip(repo, rev, update=False, backup='strip', wlock=wlock)
- del self.applied[start:end]
- if len(self.applied):
- self.ui.write("Now at: %s\n" % self.applied[-1].name)
- else:
- self.ui.write("Patch queue now empty\n")
+ # we know there are no local changes, so we can make a simplified
+ # form of hg.update.
+ if update:
+ top = self.check_toppatch(repo)
+ qp = self.qparents(repo, rev)
+ changes = repo.changelog.read(qp)
+ mmap = repo.manifest.read(changes[0])
+ m, a, r, d, u = repo.status(qp, top)[:5]
+ if d:
+ raise util.Abort("deletions found between repo revs")
+ for f in m:
+ getfile(f, mmap[f])
+ for f in r:
+ getfile(f, mmap[f])
+ util.set_exec(repo.wjoin(f), mmap.execf(f))
+ for f in m + r:
+ repo.dirstate.normal(f)
+ for f in a:
+ try:
+ os.unlink(repo.wjoin(f))
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ try: os.removedirs(os.path.dirname(repo.wjoin(f)))
+ except: pass
+ repo.dirstate.forget(f)
+ repo.dirstate.setparents(qp, revlog.nullid)
+ self.strip(repo, rev, update=False, backup='strip', wlock=wlock)
+ del self.applied[start:end]
+ if len(self.applied):
+ self.ui.write("Now at: %s\n" % self.applied[-1].name)
+ else:
+ self.ui.write("Patch queue now empty\n")
+ finally:
+ del wlock
def diff(self, repo, pats, opts):
top = self.check_toppatch(repo)
@@ -904,179 +923,184 @@
self.ui.write("No patches applied\n")
return 1
wlock = repo.wlock()
- self.check_toppatch(repo)
- (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
- top = revlog.bin(top)
- cparents = repo.changelog.parents(top)
- patchparent = self.qparents(repo, top)
- message, comments, user, date, patchfound = self.readheaders(patchfn)
-
- patchf = self.opener(patchfn, 'r+')
-
- # if the patch was a git patch, refresh it as a git patch
- for line in patchf:
- if line.startswith('diff --git'):
- self.diffopts().git = True
- break
- patchf.seek(0)
- patchf.truncate()
+ try:
+ self.check_toppatch(repo)
+ (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
+ top = revlog.bin(top)
+ cparents = repo.changelog.parents(top)
+ patchparent = self.qparents(repo, top)
+ message, comments, user, date, patchfound = self.readheaders(patchfn)
- msg = opts.get('msg', '').rstrip()
- if msg:
- if comments:
- # Remove existing message.
- ci = 0
- subj = None
- for mi in xrange(len(message)):
- if comments[ci].lower().startswith('subject: '):
- subj = comments[ci][9:]
- while message[mi] != comments[ci] and message[mi] != subj:
- ci += 1
- del comments[ci]
- comments.append(msg)
- if comments:
- comments = "\n".join(comments) + '\n\n'
- patchf.write(comments)
+ patchf = self.opener(patchfn, 'r+')
+
+ # if the patch was a git patch, refresh it as a git patch
+ for line in patchf:
+ if line.startswith('diff --git'):
+ self.diffopts().git = True
+ break
+ patchf.seek(0)
+ patchf.truncate()
- if opts.get('git'):
- self.diffopts().git = True
- fns, matchfn, anypats = cmdutil.matchpats(repo, pats, opts)
- tip = repo.changelog.tip()
- if top == tip:
- # if the top of our patch queue is also the tip, there is an
- # optimization here. We update the dirstate in place and strip
- # off the tip commit. Then just commit the current directory
- # tree. We can also send repo.commit the list of files
- # changed to speed up the diff
- #
- # in short mode, we only diff the files included in the
- # patch already
- #
- # this should really read:
- # mm, dd, aa, aa2, uu = repo.status(tip, patchparent)[:5]
- # but we do it backwards to take advantage of manifest/chlog
- # caching against the next repo.status call
- #
- mm, aa, dd, aa2, uu = repo.status(patchparent, tip)[:5]
- changes = repo.changelog.read(tip)
- man = repo.manifest.read(changes[0])
- aaa = aa[:]
- if opts.get('short'):
- filelist = mm + aa + dd
- match = dict.fromkeys(filelist).__contains__
- else:
- filelist = None
- match = util.always
- m, a, r, d, u = repo.status(files=filelist, match=match)[:5]
+ msg = opts.get('msg', '').rstrip()
+ if msg:
+ if comments:
+ # Remove existing message.
+ ci = 0
+ subj = None
+ for mi in xrange(len(message)):
+ if comments[ci].lower().startswith('subject: '):
+ subj = comments[ci][9:]
+ while message[mi] != comments[ci] and message[mi] != subj:
+ ci += 1
+ del comments[ci]
+ comments.append(msg)
+ if comments:
+ comments = "\n".join(comments) + '\n\n'
+ patchf.write(comments)
- # we might end up with files that were added between tip and
- # the dirstate parent, but then changed in the local dirstate.
- # in this case, we want them to only show up in the added section
- for x in m:
- if x not in aa:
- mm.append(x)
- # we might end up with files added by the local dirstate that
- # were deleted by the patch. In this case, they should only
- # show up in the changed section.
- for x in a:
- if x in dd:
- del dd[dd.index(x)]
- mm.append(x)
+ if opts.get('git'):
+ self.diffopts().git = True
+ fns, matchfn, anypats = cmdutil.matchpats(repo, pats, opts)
+ tip = repo.changelog.tip()
+ if top == tip:
+ # if the top of our patch queue is also the tip, there is an
+ # optimization here. We update the dirstate in place and strip
+ # off the tip commit. Then just commit the current directory
+ # tree. We can also send repo.commit the list of files
+ # changed to speed up the diff
+ #
+ # in short mode, we only diff the files included in the
+ # patch already
+ #
+ # this should really read:
+ # mm, dd, aa, aa2, uu = repo.status(tip, patchparent)[:5]
+ # but we do it backwards to take advantage of manifest/chlog
+ # caching against the next repo.status call
+ #
+ mm, aa, dd, aa2, uu = repo.status(patchparent, tip)[:5]
+ changes = repo.changelog.read(tip)
+ man = repo.manifest.read(changes[0])
+ aaa = aa[:]
+ if opts.get('short'):
+ filelist = mm + aa + dd
+ match = dict.fromkeys(filelist).__contains__
else:
- aa.append(x)
- # make sure any files deleted in the local dirstate
- # are not in the add or change column of the patch
- forget = []
- for x in d + r:
- if x in aa:
- del aa[aa.index(x)]
- forget.append(x)
- continue
- elif x in mm:
- del mm[mm.index(x)]
- dd.append(x)
+ filelist = None
+ match = util.always
+ m, a, r, d, u = repo.status(files=filelist, match=match)[:5]
- m = util.unique(mm)
- r = util.unique(dd)
- a = util.unique(aa)
- c = [filter(matchfn, l) for l in (m, a, r, [], u)]
- filelist = util.unique(c[0] + c[1] + c[2])
- patch.diff(repo, patchparent, files=filelist, match=matchfn,
- fp=patchf, changes=c, opts=self.diffopts())
- patchf.close()
+ # we might end up with files that were added between
+ # tip and the dirstate parent, but then changed in the
+ # local dirstate. in this case, we want them to only
+ # show up in the added section
+ for x in m:
+ if x not in aa:
+ mm.append(x)
+ # we might end up with files added by the local dirstate that
+ # were deleted by the patch. In this case, they should only
+ # show up in the changed section.
+ for x in a:
+ if x in dd:
+ del dd[dd.index(x)]
+ mm.append(x)
+ else:
+ aa.append(x)
+ # make sure any files deleted in the local dirstate
+ # are not in the add or change column of the patch
+ forget = []
+ for x in d + r:
+ if x in aa:
+ del aa[aa.index(x)]
+ forget.append(x)
+ continue
+ elif x in mm:
+ del mm[mm.index(x)]
+ dd.append(x)
+
+ m = util.unique(mm)
+ r = util.unique(dd)
+ a = util.unique(aa)
+ c = [filter(matchfn, l) for l in (m, a, r, [], u)]
+ filelist = util.unique(c[0] + c[1] + c[2])
+ patch.diff(repo, patchparent, files=filelist, match=matchfn,
+ fp=patchf, changes=c, opts=self.diffopts())
+ patchf.close()
- repo.dirstate.setparents(*cparents)
- copies = {}
- for dst in a:
- src = repo.dirstate.copied(dst)
- if src is None:
- continue
- copies.setdefault(src, []).append(dst)
- repo.dirstate.add(dst)
- # remember the copies between patchparent and tip
- # this may be slow, so don't do it if we're not tracking copies
- if self.diffopts().git:
- for dst in aaa:
- f = repo.file(dst)
- src = f.renamed(man[dst])
- if src:
- copies[src[0]] = copies.get(dst, [])
- if dst in a:
- copies[src[0]].append(dst)
- # we can't copy a file created by the patch itself
- if dst in copies:
- del copies[dst]
- for src, dsts in copies.iteritems():
- for dst in dsts:
- repo.dirstate.copy(src, dst)
- for f in r:
- repo.dirstate.remove(f)
- # if the patch excludes a modified file, mark that file with mtime=0
- # so status can see it.
- mm = []
- for i in xrange(len(m)-1, -1, -1):
- if not matchfn(m[i]):
- mm.append(m[i])
- del m[i]
- for f in m:
- repo.dirstate.normal(f)
- for f in mm:
- repo.dirstate.normaldirty(f)
- for f in forget:
- repo.dirstate.forget(f)
+ repo.dirstate.setparents(*cparents)
+ copies = {}
+ for dst in a:
+ src = repo.dirstate.copied(dst)
+ if src is None:
+ continue
+ copies.setdefault(src, []).append(dst)
+ repo.dirstate.add(dst)
+ # remember the copies between patchparent and tip
+ # this may be slow, so don't do it if we're not tracking copies
+ if self.diffopts().git:
+ for dst in aaa:
+ f = repo.file(dst)
+ src = f.renamed(man[dst])
+ if src:
+ copies[src[0]] = copies.get(dst, [])
+ if dst in a:
+ copies[src[0]].append(dst)
+ # we can't copy a file created by the patch itself
+ if dst in copies:
+ del copies[dst]
+ for src, dsts in copies.iteritems():
+ for dst in dsts:
+ repo.dirstate.copy(src, dst)
+ for f in r:
+ repo.dirstate.remove(f)
+ # if the patch excludes a modified file, mark that
+ # file with mtime=0 so status can see it.
+ mm = []
+ for i in xrange(len(m)-1, -1, -1):
+ if not matchfn(m[i]):
+ mm.append(m[i])
+ del m[i]
+ for f in m:
+ repo.dirstate.normal(f)
+ for f in mm:
+ repo.dirstate.normaldirty(f)
+ for f in forget:
+ repo.dirstate.forget(f)
- if not msg:
- if not message:
- message = "[mq]: %s\n" % patchfn
+ if not msg:
+ if not message:
+ message = "[mq]: %s\n" % patchfn
+ else:
+ message = "\n".join(message)
else:
- message = "\n".join(message)
- else:
- message = msg
+ message = msg
- self.strip(repo, top, update=False, backup='strip', wlock=wlock)
- n = repo.commit(filelist, message, changes[1], match=matchfn,
- force=1, wlock=wlock)
- self.applied[-1] = statusentry(revlog.hex(n), patchfn)
- self.applied_dirty = 1
- self.removeundo(repo)
- else:
- self.printdiff(repo, patchparent, fp=patchf)
- patchf.close()
- added = repo.status()[1]
- for a in added:
- f = repo.wjoin(a)
- try:
- os.unlink(f)
- except OSError, e:
- if e.errno != errno.ENOENT:
- raise
- try: os.removedirs(os.path.dirname(f))
- except: pass
- # forget the file copies in the dirstate
- # push should readd the files later on
- repo.dirstate.forget(a)
- self.pop(repo, force=True, wlock=wlock)
- self.push(repo, force=True, wlock=wlock)
+ self.strip(repo, top, update=False,
+ backup='strip', wlock=wlock)
+ n = repo.commit(filelist, message, changes[1], match=matchfn,
+ force=1, wlock=wlock)
+ self.applied[-1] = statusentry(revlog.hex(n), patchfn)
+ self.applied_dirty = 1
+ self.removeundo(repo)
+ else:
+ self.printdiff(repo, patchparent, fp=patchf)
+ patchf.close()
+ added = repo.status()[1]
+ for a in added:
+ f = repo.wjoin(a)
+ try:
+ os.unlink(f)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ try: os.removedirs(os.path.dirname(f))
+ except: pass
+ # forget the file copies in the dirstate
+ # push should readd the files later on
+ repo.dirstate.forget(a)
+ self.pop(repo, force=True, wlock=wlock)
+ self.push(repo, force=True, wlock=wlock)
+ finally:
+ del wlock
def init(self, repo, create=False):
if not create and os.path.isdir(self.path):
@@ -1872,10 +1896,13 @@
r = q.qrepo()
if r:
wlock = r.wlock()
- if r.dirstate[name] == 'r':
- r.undelete([name], wlock)
- r.copy(patch, name, wlock)
- r.remove([patch], False, wlock)
+ try:
+ if r.dirstate[name] == 'r':
+ r.undelete([name], wlock)
+ r.copy(patch, name, wlock)
+ r.remove([patch], False, wlock)
+ finally:
+ del wlock
q.save_dirty()
--- a/hgext/transplant.py Sat Jul 21 16:02:09 2007 -0500
+++ b/hgext/transplant.py Sat Jul 21 16:02:10 2007 -0500
@@ -96,9 +96,10 @@
diffopts = patch.diffopts(self.ui, opts)
diffopts.git = True
- wlock = repo.wlock()
- lock = repo.lock()
+ lock = wlock = None
try:
+ wlock = repo.wlock()
+ lock = repo.lock()
for rev in revs:
node = revmap[rev]
revstr = '%s:%s' % (rev, revlog.short(node))
@@ -166,6 +167,7 @@
finally:
self.saveseries(revmap, merges)
self.transplants.write()
+ del lock, wlock
def filter(self, filter, changelog, patchfile):
'''arbitrarily rewrite changeset before applying it'''
@@ -272,20 +274,25 @@
extra = {'transplant_source': node}
wlock = repo.wlock()
- p1, p2 = repo.dirstate.parents()
- if p1 != parents[0]:
- raise util.Abort(_('working dir not at transplant parent %s') %
- revlog.hex(parents[0]))
- if merge:
- repo.dirstate.setparents(p1, parents[1])
- n = repo.commit(None, message, user, date, wlock=wlock, extra=extra)
- if not n:
- raise util.Abort(_('commit failed'))
- if not merge:
- self.transplants.set(n, node)
- self.unlog()
+ try:
+ p1, p2 = repo.dirstate.parents()
+ if p1 != parents[0]:
+ raise util.Abort(
+ _('working dir not at transplant parent %s') %
+ revlog.hex(parents[0]))
+ if merge:
+ repo.dirstate.setparents(p1, parents[1])
+ n = repo.commit(None, message, user, date, wlock=wlock,
+ extra=extra)
+ if not n:
+ raise util.Abort(_('commit failed'))
+ if not merge:
+ self.transplants.set(n, node)
+ self.unlog()
- return n, node
+ return n, node
+ finally:
+ del wlock
def readseries(self):
nodes = []
--- a/mercurial/commands.py Sat Jul 21 16:02:09 2007 -0500
+++ b/mercurial/commands.py Sat Jul 21 16:02:10 2007 -0500
@@ -676,7 +676,10 @@
before that, see hg revert.
"""
wlock = repo.wlock(False)
- errs, copied = docopy(ui, repo, pats, opts, wlock)
+ try:
+ errs, copied = docopy(ui, repo, pats, opts, wlock)
+ finally:
+ del wlock
return errs
def debugancestor(ui, index, rev1, rev2):
@@ -713,7 +716,10 @@
ctx = repo.changectx(rev)
files = ctx.manifest()
wlock = repo.wlock()
- repo.dirstate.rebuild(rev, files)
+ try:
+ repo.dirstate.rebuild(rev, files)
+ finally:
+ del wlock
def debugcheckstate(ui, repo):
"""validate the correctness of the current dirstate"""
@@ -782,7 +788,7 @@
try:
repo.dirstate.setparents(repo.lookup(rev1), repo.lookup(rev2))
finally:
- wlock.release()
+ del wlock
def debugstate(ui, repo):
"""show the contents of the current dirstate"""
@@ -1581,70 +1587,76 @@
d = opts["base"]
strip = opts["strip"]
-
- wlock = repo.wlock()
- lock = repo.lock()
-
- for p in patches:
- pf = os.path.join(d, p)
-
- if pf == '-':
- ui.status(_("applying patch from stdin\n"))
- tmpname, message, user, date, branch, nodeid, p1, p2 = patch.extract(ui, sys.stdin)
- else:
- ui.status(_("applying %s\n") % p)
- tmpname, message, user, date, branch, nodeid, p1, p2 = patch.extract(ui, file(pf, 'rb'))
-
- if tmpname is None:
- raise util.Abort(_('no diffs found'))
-
- try:
- cmdline_message = cmdutil.logmessage(opts)
- if cmdline_message:
- # pickup the cmdline msg
- message = cmdline_message
- elif message:
- # pickup the patch msg
- message = message.strip()
+ wlock = lock = None
+ try:
+ wlock = repo.wlock()
+ lock = repo.lock()
+ for p in patches:
+ pf = os.path.join(d, p)
+
+ if pf == '-':
+ ui.status(_("applying patch from stdin\n"))
+ data = patch.extract(ui, sys.stdin)
else:
- # launch the editor
- message = None
- ui.debug(_('message:\n%s\n') % message)
-
- wp = repo.workingctx().parents()
- if opts.get('exact'):
- if not nodeid or not p1:
- raise util.Abort(_('not a mercurial patch'))
- p1 = repo.lookup(p1)
- p2 = repo.lookup(p2 or hex(nullid))
-
- if p1 != wp[0].node():
- hg.clean(repo, p1, wlock=wlock)
- repo.dirstate.setparents(p1, p2)
- elif p2:
- try:
+ ui.status(_("applying %s\n") % p)
+ data = patch.extract(ui, file(pf, 'rb'))
+
+ tmpname, message, user, date, branch, nodeid, p1, p2 = data
+
+ if tmpname is None:
+ raise util.Abort(_('no diffs found'))
+
+ try:
+ cmdline_message = cmdutil.logmessage(opts)
+ if cmdline_message:
+ # pickup the cmdline msg
+ message = cmdline_message
+ elif message:
+ # pickup the patch msg
+ message = message.strip()
+ else:
+ # launch the editor
+ message = None
+ ui.debug(_('message:\n%s\n') % message)
+
+ wp = repo.workingctx().parents()
+ if opts.get('exact'):
+ if not nodeid or not p1:
+ raise util.Abort(_('not a mercurial patch'))
p1 = repo.lookup(p1)
- p2 = repo.lookup(p2)
- if p1 == wp[0].node():
- repo.dirstate.setparents(p1, p2)
- except hg.RepoError:
- pass
- if opts.get('exact') or opts.get('import_branch'):
- repo.dirstate.setbranch(branch or 'default')
-
- files = {}
- try:
- fuzz = patch.patch(tmpname, ui, strip=strip, cwd=repo.root,
- files=files)
+ p2 = repo.lookup(p2 or hex(nullid))
+
+ if p1 != wp[0].node():
+ hg.clean(repo, p1, wlock=wlock)
+ repo.dirstate.setparents(p1, p2)
+ elif p2:
+ try:
+ p1 = repo.lookup(p1)
+ p2 = repo.lookup(p2)
+ if p1 == wp[0].node():
+ repo.dirstate.setparents(p1, p2)
+ except hg.RepoError:
+ pass
+ if opts.get('exact') or opts.get('import_branch'):
+ repo.dirstate.setbranch(branch or 'default')
+
+ files = {}
+ try:
+ fuzz = patch.patch(tmpname, ui, strip=strip, cwd=repo.root,
+ files=files)
+ finally:
+ files = patch.updatedir(ui, repo, files, wlock=wlock)
+ n = repo.commit(files, message, user, date, wlock=wlock,
+ lock=lock)
+ if opts.get('exact'):
+ if hex(n) != nodeid:
+ repo.rollback(wlock=wlock, lock=lock)
+ raise util.Abort(_('patch is damaged' +
+ ' or loses information'))
finally:
- files = patch.updatedir(ui, repo, files, wlock=wlock)
- n = repo.commit(files, message, user, date, wlock=wlock, lock=lock)
- if opts.get('exact'):
- if hex(n) != nodeid:
- repo.rollback(wlock=wlock, lock=lock)
- raise util.Abort(_('patch is damaged or loses information'))
- finally:
- os.unlink(tmpname)
+ os.unlink(tmpname)
+ finally:
+ del wlock, lock
def incoming(ui, repo, source="default", **opts):
"""show new changesets found in source
@@ -2248,15 +2260,18 @@
before that, see hg revert.
"""
wlock = repo.wlock(False)
- errs, copied = docopy(ui, repo, pats, opts, wlock)
- names = []
- for abs, rel, exact in copied:
- if ui.verbose or not exact:
- ui.status(_('removing %s\n') % rel)
- names.append(abs)
- if not opts.get('dry_run'):
- repo.remove(names, True, wlock=wlock)
- return errs
+ try:
+ errs, copied = docopy(ui, repo, pats, opts, wlock)
+ names = []
+ for abs, rel, exact in copied:
+ if ui.verbose or not exact:
+ ui.status(_('removing %s\n') % rel)
+ names.append(abs)
+ if not opts.get('dry_run'):
+ repo.remove(names, True, wlock=wlock)
+ return errs
+ finally:
+ del wlock
def revert(ui, repo, *pats, **opts):
"""revert files or dirs to their states as of some revision
@@ -2310,8 +2325,6 @@
else:
pmf = None
- wlock = repo.wlock()
-
# need all matching names in dirstate and manifest of target rev,
# so have to walk both. do not print errors if files exist in one
# but not other.
@@ -2319,113 +2332,116 @@
names = {}
target_only = {}
- # walk dirstate.
-
- for src, abs, rel, exact in cmdutil.walk(repo, pats, opts,
- badmatch=mf.has_key):
- names[abs] = (rel, exact)
- if src == 'b':
- target_only[abs] = True
-
- # walk target manifest.
-
- def badmatch(path):
- if path in names:
- return True
- path_ = path + '/'
- for f in names:
- if f.startswith(path_):
+ wlock = repo.wlock()
+ try:
+ # walk dirstate.
+ for src, abs, rel, exact in cmdutil.walk(repo, pats, opts,
+ badmatch=mf.has_key):
+ names[abs] = (rel, exact)
+ if src == 'b':
+ target_only[abs] = True
+
+ # walk target manifest.
+
+ def badmatch(path):
+ if path in names:
return True
- return False
-
- for src, abs, rel, exact in cmdutil.walk(repo, pats, opts, node=node,
- badmatch=badmatch):
- if abs in names or src == 'b':
- continue
- names[abs] = (rel, exact)
- target_only[abs] = True
-
- changes = repo.status(match=names.has_key, wlock=wlock)[:5]
- modified, added, removed, deleted, unknown = map(dict.fromkeys, changes)
-
- revert = ([], _('reverting %s\n'))
- add = ([], _('adding %s\n'))
- remove = ([], _('removing %s\n'))
- forget = ([], _('forgetting %s\n'))
- undelete = ([], _('undeleting %s\n'))
- update = {}
-
- disptable = (
- # dispatch table:
- # file state
- # action if in target manifest
- # action if not in target manifest
- # make backup if in target manifest
- # make backup if not in target manifest
- (modified, revert, remove, True, True),
- (added, revert, forget, True, False),
- (removed, undelete, None, False, False),
- (deleted, revert, remove, False, False),
- (unknown, add, None, True, False),
- (target_only, add, None, False, False),
- )
-
- entries = names.items()
- entries.sort()
-
- for abs, (rel, exact) in entries:
- mfentry = mf.get(abs)
- target = repo.wjoin(abs)
- def handle(xlist, dobackup):
- xlist[0].append(abs)
- update[abs] = 1
- if dobackup and not opts['no_backup'] and util.lexists(target):
- bakname = "%s.orig" % rel
- ui.note(_('saving current version of %s as %s\n') %
- (rel, bakname))
- if not opts.get('dry_run'):
- util.copyfile(target, bakname)
- if ui.verbose or not exact:
- ui.status(xlist[1] % rel)
- for table, hitlist, misslist, backuphit, backupmiss in disptable:
- if abs not in table: continue
- # file has changed in dirstate
- if mfentry:
- handle(hitlist, backuphit)
- elif misslist is not None:
- handle(misslist, backupmiss)
+ path_ = path + '/'
+ for f in names:
+ if f.startswith(path_):
+ return True
+ return False
+
+ for src, abs, rel, exact in cmdutil.walk(repo, pats, opts, node=node,
+ badmatch=badmatch):
+ if abs in names or src == 'b':
+ continue
+ names[abs] = (rel, exact)
+ target_only[abs] = True
+
+ changes = repo.status(match=names.has_key, wlock=wlock)[:5]
+ modified, added, removed, deleted, unknown = map(dict.fromkeys, changes)
+
+ revert = ([], _('reverting %s\n'))
+ add = ([], _('adding %s\n'))
+ remove = ([], _('removing %s\n'))
+ forget = ([], _('forgetting %s\n'))
+ undelete = ([], _('undeleting %s\n'))
+ update = {}
+
+ disptable = (
+ # dispatch table:
+ # file state
+ # action if in target manifest
+ # action if not in target manifest
+ # make backup if in target manifest
+ # make backup if not in target manifest
+ (modified, revert, remove, True, True),
+ (added, revert, forget, True, False),
+ (removed, undelete, None, False, False),
+ (deleted, revert, remove, False, False),
+ (unknown, add, None, True, False),
+ (target_only, add, None, False, False),
+ )
+
+ entries = names.items()
+ entries.sort()
+
+ for abs, (rel, exact) in entries:
+ mfentry = mf.get(abs)
+ target = repo.wjoin(abs)
+ def handle(xlist, dobackup):
+ xlist[0].append(abs)
+ update[abs] = 1
+ if dobackup and not opts['no_backup'] and util.lexists(target):
+ bakname = "%s.orig" % rel
+ ui.note(_('saving current version of %s as %s\n') %
+ (rel, bakname))
+ if not opts.get('dry_run'):
+ util.copyfile(target, bakname)
+ if ui.verbose or not exact:
+ ui.status(xlist[1] % rel)
+ for table, hitlist, misslist, backuphit, backupmiss in disptable:
+ if abs not in table: continue
+ # file has changed in dirstate
+ if mfentry:
+ handle(hitlist, backuphit)
+ elif misslist is not None:
+ handle(misslist, backupmiss)
+ else:
+ if exact: ui.warn(_('file not managed: %s\n') % rel)
+ break
else:
- if exact: ui.warn(_('file not managed: %s\n') % rel)
- break
- else:
- # file has not changed in dirstate
- if node == parent:
- if exact: ui.warn(_('no changes needed to %s\n') % rel)
- continue
- if pmf is None:
- # only need parent manifest in this unlikely case,
- # so do not read by default
- pmf = repo.changectx(parent).manifest()
- if abs in pmf:
- if mfentry:
- # if version of file is same in parent and target
- # manifests, do nothing
- if pmf[abs] != mfentry:
- handle(revert, False)
- else:
- handle(remove, False)
-
- if not opts.get('dry_run'):
- for f in forget[0]:
- repo.dirstate.forget(f)
- r = hg.revert(repo, node, update.has_key, wlock)
- for f in add[0]:
- repo.dirstate.add(f)
- for f in undelete[0]:
- repo.dirstate.normal(f)
- for f in remove[0]:
- repo.dirstate.remove(f)
- return r
+ # file has not changed in dirstate
+ if node == parent:
+ if exact: ui.warn(_('no changes needed to %s\n') % rel)
+ continue
+ if pmf is None:
+ # only need parent manifest in this unlikely case,
+ # so do not read by default
+ pmf = repo.changectx(parent).manifest()
+ if abs in pmf:
+ if mfentry:
+ # if version of file is same in parent and target
+ # manifests, do nothing
+ if pmf[abs] != mfentry:
+ handle(revert, False)
+ else:
+ handle(remove, False)
+
+ if not opts.get('dry_run'):
+ for f in forget[0]:
+ repo.dirstate.forget(f)
+ r = hg.revert(repo, node, update.has_key, wlock)
+ for f in add[0]:
+ repo.dirstate.add(f)
+ for f in undelete[0]:
+ repo.dirstate.normal(f)
+ for f in remove[0]:
+ repo.dirstate.remove(f)
+ return r
+ finally:
+ del wlock
def rollback(ui, repo):
"""roll back the last transaction in this repository
--- a/mercurial/hg.py Sat Jul 21 16:02:09 2007 -0500
+++ b/mercurial/hg.py Sat Jul 21 16:02:10 2007 -0500
@@ -130,103 +130,99 @@
if self.dir_:
self.rmtree(self.dir_, True)
- dir_cleanup = None
- if islocal(dest):
- dir_cleanup = DirCleanup(dest)
+ src_lock = dest_lock = dir_cleanup = None
+ try:
+ if islocal(dest):
+ dir_cleanup = DirCleanup(dest)
- abspath = origsource
- copy = False
- if src_repo.local() and islocal(dest):
- abspath = os.path.abspath(origsource)
- copy = not pull and not rev
+ abspath = origsource
+ copy = False
+ if src_repo.local() and islocal(dest):
+ abspath = os.path.abspath(origsource)
+ copy = not pull and not rev
- src_lock, dest_lock = None, None
- if copy:
- try:
- # we use a lock here because if we race with commit, we
- # can end up with extra data in the cloned revlogs that's
- # not pointed to by changesets, thus causing verify to
- # fail
- src_lock = src_repo.lock()
- except lock.LockException:
- copy = False
+ if copy:
+ try:
+ # we use a lock here because if we race with commit, we
+ # can end up with extra data in the cloned revlogs that's
+ # not pointed to by changesets, thus causing verify to
+ # fail
+ src_lock = src_repo.lock()
+ except lock.LockException:
+ copy = False
- if copy:
- def force_copy(src, dst):
- try:
- util.copyfiles(src, dst)
- except OSError, inst:
- if inst.errno != errno.ENOENT:
- raise
+ if copy:
+ def force_copy(src, dst):
+ try:
+ util.copyfiles(src, dst)
+ except OSError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
- src_store = os.path.realpath(src_repo.spath)
- if not os.path.exists(dest):
- os.mkdir(dest)
- dest_path = os.path.realpath(os.path.join(dest, ".hg"))
- os.mkdir(dest_path)
- if src_repo.spath != src_repo.path:
- dest_store = os.path.join(dest_path, "store")
- os.mkdir(dest_store)
- else:
- dest_store = dest_path
- # copy the requires file
- force_copy(src_repo.join("requires"),
- os.path.join(dest_path, "requires"))
- # we lock here to avoid premature writing to the target
- dest_lock = lock.lock(os.path.join(dest_store, "lock"))
+ src_store = os.path.realpath(src_repo.spath)
+ if not os.path.exists(dest):
+ os.mkdir(dest)
+ dest_path = os.path.realpath(os.path.join(dest, ".hg"))
+ os.mkdir(dest_path)
+ if src_repo.spath != src_repo.path:
+ dest_store = os.path.join(dest_path, "store")
+ os.mkdir(dest_store)
+ else:
+ dest_store = dest_path
+ # copy the requires file
+ force_copy(src_repo.join("requires"),
+ os.path.join(dest_path, "requires"))
+ # we lock here to avoid premature writing to the target
+ dest_lock = lock.lock(os.path.join(dest_store, "lock"))
- files = ("data",
- "00manifest.d", "00manifest.i",
- "00changelog.d", "00changelog.i")
- for f in files:
- src = os.path.join(src_store, f)
- dst = os.path.join(dest_store, f)
- force_copy(src, dst)
+ files = ("data",
+ "00manifest.d", "00manifest.i",
+ "00changelog.d", "00changelog.i")
+ for f in files:
+ src = os.path.join(src_store, f)
+ dst = os.path.join(dest_store, f)
+ force_copy(src, dst)
+
+ # we need to re-init the repo after manually copying the data
+ # into it
+ dest_repo = repository(ui, dest)
+
+ else:
+ dest_repo = repository(ui, dest, create=True)
- # we need to re-init the repo after manually copying the data
- # into it
- dest_repo = repository(ui, dest)
-
- else:
- dest_repo = repository(ui, dest, create=True)
+ revs = None
+ if rev:
+ if 'lookup' not in src_repo.capabilities:
+ raise util.Abort(_("src repository does not support revision "
+ "lookup and so doesn't support clone by "
+ "revision"))
+ revs = [src_repo.lookup(r) for r in rev]
- revs = None
- if rev:
- if 'lookup' not in src_repo.capabilities:
- raise util.Abort(_("src repository does not support revision "
- "lookup and so doesn't support clone by "
- "revision"))
- revs = [src_repo.lookup(r) for r in rev]
+ if dest_repo.local():
+ dest_repo.clone(src_repo, heads=revs, stream=stream)
+ elif src_repo.local():
+ src_repo.push(dest_repo, revs=revs)
+ else:
+ raise util.Abort(_("clone from remote to remote not supported"))
if dest_repo.local():
- dest_repo.clone(src_repo, heads=revs, stream=stream)
- elif src_repo.local():
- src_repo.push(dest_repo, revs=revs)
- else:
- raise util.Abort(_("clone from remote to remote not supported"))
-
- if src_lock:
- src_lock.release()
+ fp = dest_repo.opener("hgrc", "w", text=True)
+ fp.write("[paths]\n")
+ fp.write("default = %s\n" % abspath)
+ fp.close()
- if dest_repo.local():
- fp = dest_repo.opener("hgrc", "w", text=True)
- fp.write("[paths]\n")
- fp.write("default = %s\n" % abspath)
- fp.close()
-
- if dest_lock:
- dest_lock.release()
+ if update:
+ try:
+ checkout = dest_repo.lookup("default")
+ except:
+ checkout = dest_repo.changelog.tip()
+ _update(dest_repo, checkout)
+ if dir_cleanup:
+ dir_cleanup.close()
- if update:
- try:
- checkout = dest_repo.lookup("default")
- except:
- checkout = dest_repo.changelog.tip()
- _update(dest_repo, checkout)
- if dir_cleanup:
- dir_cleanup.close()
-
- return src_repo, dest_repo
+ return src_repo, dest_repo
+ finally:
+ del src_lock, dest_lock, dir_cleanup
def _showstats(repo, stats):
stats = ((stats[0], _("updated")),
--- a/mercurial/hgweb/hgweb_mod.py Sat Jul 21 16:02:09 2007 -0500
+++ b/mercurial/hgweb/hgweb_mod.py Sat Jul 21 16:02:10 2007 -0500
@@ -1168,7 +1168,7 @@
req.write('%d\n' % ret)
req.write(val)
finally:
- lock.release()
+ del lock
except (OSError, IOError), inst:
req.write('0\n')
filename = getattr(inst, 'filename', '')
--- a/mercurial/localrepo.py Sat Jul 21 16:02:09 2007 -0500
+++ b/mercurial/localrepo.py Sat Jul 21 16:02:10 2007 -0500
@@ -516,28 +516,34 @@
def recover(self):
l = self.lock()
- if os.path.exists(self.sjoin("journal")):
- self.ui.status(_("rolling back interrupted transaction\n"))
- transaction.rollback(self.sopener, self.sjoin("journal"))
- self.invalidate()
- return True
- else:
- self.ui.warn(_("no interrupted transaction available\n"))
- return False
+ try:
+ if os.path.exists(self.sjoin("journal")):
+ self.ui.status(_("rolling back interrupted transaction\n"))
+ transaction.rollback(self.sopener, self.sjoin("journal"))
+ self.invalidate()
+ return True
+ else:
+ self.ui.warn(_("no interrupted transaction available\n"))
+ return False
+ finally:
+ del l
def rollback(self, wlock=None, lock=None):
- if not wlock:
- wlock = self.wlock()
- if not lock:
- lock = self.lock()
- if os.path.exists(self.sjoin("undo")):
- self.ui.status(_("rolling back last transaction\n"))
- transaction.rollback(self.sopener, self.sjoin("undo"))
- util.rename(self.join("undo.dirstate"), self.join("dirstate"))
- self.invalidate()
- self.dirstate.invalidate()
- else:
- self.ui.warn(_("no rollback information available\n"))
+ try:
+ if not wlock:
+ wlock = self.wlock()
+ if not lock:
+ lock = self.lock()
+ if os.path.exists(self.sjoin("undo")):
+ self.ui.status(_("rolling back last transaction\n"))
+ transaction.rollback(self.sopener, self.sjoin("undo"))
+ util.rename(self.join("undo.dirstate"), self.join("dirstate"))
+ self.invalidate()
+ self.dirstate.invalidate()
+ else:
+ self.ui.warn(_("no rollback information available\n"))
+ finally:
+ del wlock, lock
def invalidate(self):
for a in "changelog manifest".split():
@@ -639,164 +645,169 @@
def commit(self, files=None, text="", user=None, date=None,
match=util.always, force=False, lock=None, wlock=None,
force_editor=False, p1=None, p2=None, extra={}):
-
- commit = []
- remove = []
- changed = []
- use_dirstate = (p1 is None) # not rawcommit
- extra = extra.copy()
+ tr = None
+ try:
+ commit = []
+ remove = []
+ changed = []
+ use_dirstate = (p1 is None) # not rawcommit
+ extra = extra.copy()
- if use_dirstate:
- if files:
- for f in files:
- s = self.dirstate[f]
- if s in 'nma':
- commit.append(f)
- elif s == 'r':
- remove.append(f)
- else:
- self.ui.warn(_("%s not tracked!\n") % f)
+ if use_dirstate:
+ if files:
+ for f in files:
+ s = self.dirstate[f]
+ if s in 'nma':
+ commit.append(f)
+ elif s == 'r':
+ remove.append(f)
+ else:
+ self.ui.warn(_("%s not tracked!\n") % f)
+ else:
+ changes = self.status(match=match)[:5]
+ modified, added, removed, deleted, unknown = changes
+ commit = modified + added
+ remove = removed
else:
- changes = self.status(match=match)[:5]
- modified, added, removed, deleted, unknown = changes
- commit = modified + added
- remove = removed
- else:
- commit = files
+ commit = files
- if use_dirstate:
- p1, p2 = self.dirstate.parents()
- update_dirstate = True
- else:
- p1, p2 = p1, p2 or nullid
- update_dirstate = (self.dirstate.parents()[0] == p1)
+ if use_dirstate:
+ p1, p2 = self.dirstate.parents()
+ update_dirstate = True
+ else:
+ p1, p2 = p1, p2 or nullid
+ update_dirstate = (self.dirstate.parents()[0] == p1)
- c1 = self.changelog.read(p1)
- c2 = self.changelog.read(p2)
- m1 = self.manifest.read(c1[0]).copy()
- m2 = self.manifest.read(c2[0])
+ c1 = self.changelog.read(p1)
+ c2 = self.changelog.read(p2)
+ m1 = self.manifest.read(c1[0]).copy()
+ m2 = self.manifest.read(c2[0])
- if use_dirstate:
- branchname = self.workingctx().branch()
- try:
- branchname = branchname.decode('UTF-8').encode('UTF-8')
- except UnicodeDecodeError:
- raise util.Abort(_('branch name not in UTF-8!'))
- else:
- branchname = ""
+ if use_dirstate:
+ branchname = self.workingctx().branch()
+ try:
+ branchname = branchname.decode('UTF-8').encode('UTF-8')
+ except UnicodeDecodeError:
+ raise util.Abort(_('branch name not in UTF-8!'))
+ else:
+ branchname = ""
- if use_dirstate:
- oldname = c1[5].get("branch") # stored in UTF-8
- if (not commit and not remove and not force and p2 == nullid
- and branchname == oldname):
- self.ui.status(_("nothing changed\n"))
- return None
+ if use_dirstate:
+ oldname = c1[5].get("branch") # stored in UTF-8
+ if (not commit and not remove and not force and p2 == nullid
+ and branchname == oldname):
+ self.ui.status(_("nothing changed\n"))
+ return None
- xp1 = hex(p1)
- if p2 == nullid: xp2 = ''
- else: xp2 = hex(p2)
+ xp1 = hex(p1)
+ if p2 == nullid: xp2 = ''
+ else: xp2 = hex(p2)
- self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
+ self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
- if not wlock:
- wlock = self.wlock()
- if not lock:
- lock = self.lock()
- tr = self.transaction()
+ if not wlock:
+ wlock = self.wlock()
+ if not lock:
+ lock = self.lock()
+ tr = self.transaction()
- # check in files
- new = {}
- linkrev = self.changelog.count()
- commit.sort()
- is_exec = util.execfunc(self.root, m1.execf)
- is_link = util.linkfunc(self.root, m1.linkf)
- for f in commit:
- self.ui.note(f + "\n")
- try:
- new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
- new_exec = is_exec(f)
- new_link = is_link(f)
- if not changed or changed[-1] != f:
- # mention the file in the changelog if some flag changed,
- # even if there was no content change.
- old_exec = m1.execf(f)
- old_link = m1.linkf(f)
- if old_exec != new_exec or old_link != new_link:
- changed.append(f)
- m1.set(f, new_exec, new_link)
- except (OSError, IOError):
- if use_dirstate:
- self.ui.warn(_("trouble committing %s!\n") % f)
- raise
- else:
- remove.append(f)
+ # check in files
+ new = {}
+ linkrev = self.changelog.count()
+ commit.sort()
+ is_exec = util.execfunc(self.root, m1.execf)
+ is_link = util.linkfunc(self.root, m1.linkf)
+ for f in commit:
+ self.ui.note(f + "\n")
+ try:
+ new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
+ new_exec = is_exec(f)
+ new_link = is_link(f)
+ if not changed or changed[-1] != f:
+ # mention the file in the changelog if some
+ # flag changed, even if there was no content
+ # change.
+ old_exec = m1.execf(f)
+ old_link = m1.linkf(f)
+ if old_exec != new_exec or old_link != new_link:
+ changed.append(f)
+ m1.set(f, new_exec, new_link)
+ except (OSError, IOError):
+ if use_dirstate:
+ self.ui.warn(_("trouble committing %s!\n") % f)
+ raise
+ else:
+ remove.append(f)
- # update manifest
- m1.update(new)
- remove.sort()
- removed = []
+ # update manifest
+ m1.update(new)
+ remove.sort()
+ removed = []
- for f in remove:
- if f in m1:
- del m1[f]
- removed.append(f)
- elif f in m2:
- removed.append(f)
- mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
+ for f in remove:
+ if f in m1:
+ del m1[f]
+ removed.append(f)
+ elif f in m2:
+ removed.append(f)
+ mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0],
+ (new, removed))
- # add changeset
- new = new.keys()
- new.sort()
+ # add changeset
+ new = new.keys()
+ new.sort()
- user = user or self.ui.username()
- if not text or force_editor:
- edittext = []
- if text:
- edittext.append(text)
- edittext.append("")
- edittext.append("HG: user: %s" % user)
- if p2 != nullid:
- edittext.append("HG: branch merge")
- if branchname:
- edittext.append("HG: branch %s" % util.tolocal(branchname))
- edittext.extend(["HG: changed %s" % f for f in changed])
- edittext.extend(["HG: removed %s" % f for f in removed])
- if not changed and not remove:
- edittext.append("HG: no files changed")
- edittext.append("")
- # run editor in the repository root
- olddir = os.getcwd()
- os.chdir(self.root)
- text = self.ui.edit("\n".join(edittext), user)
- os.chdir(olddir)
+ user = user or self.ui.username()
+ if not text or force_editor:
+ edittext = []
+ if text:
+ edittext.append(text)
+ edittext.append("")
+ edittext.append("HG: user: %s" % user)
+ if p2 != nullid:
+ edittext.append("HG: branch merge")
+ if branchname:
+ edittext.append("HG: branch %s" % util.tolocal(branchname))
+ edittext.extend(["HG: changed %s" % f for f in changed])
+ edittext.extend(["HG: removed %s" % f for f in removed])
+ if not changed and not remove:
+ edittext.append("HG: no files changed")
+ edittext.append("")
+ # run editor in the repository root
+ olddir = os.getcwd()
+ os.chdir(self.root)
+ text = self.ui.edit("\n".join(edittext), user)
+ os.chdir(olddir)
- lines = [line.rstrip() for line in text.rstrip().splitlines()]
- while lines and not lines[0]:
- del lines[0]
- if not lines:
- return None
- text = '\n'.join(lines)
- if branchname:
- extra["branch"] = branchname
- n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
- user, date, extra)
- self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
- parent2=xp2)
- tr.close()
+ lines = [line.rstrip() for line in text.rstrip().splitlines()]
+ while lines and not lines[0]:
+ del lines[0]
+ if not lines:
+ return None
+ text = '\n'.join(lines)
+ if branchname:
+ extra["branch"] = branchname
+ n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
+ user, date, extra)
+ self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
+ parent2=xp2)
+ tr.close()
- if self.branchcache and "branch" in extra:
- self.branchcache[util.tolocal(extra["branch"])] = n
+ if self.branchcache and "branch" in extra:
+ self.branchcache[util.tolocal(extra["branch"])] = n
- if use_dirstate or update_dirstate:
- self.dirstate.setparents(n)
- if use_dirstate:
- for f in new:
- self.dirstate.normal(f)
- for f in removed:
- self.dirstate.forget(f)
+ if use_dirstate or update_dirstate:
+ self.dirstate.setparents(n)
+ if use_dirstate:
+ for f in new:
+ self.dirstate.normal(f)
+ for f in removed:
+ self.dirstate.forget(f)
- self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
- return n
+ self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
+ return n
+ finally:
+ del lock, wlock, tr
def walk(self, node=None, files=[], match=util.always, badmatch=None):
'''
@@ -895,18 +906,18 @@
# update dirstate for files that are actually clean
if fixup:
- cleanup = False
- if not wlock:
- try:
- wlock = self.wlock(False)
- cleanup = True
- except lock.LockException:
- pass
- if wlock:
- for f in fixup:
- self.dirstate.normal(f)
- if cleanup:
- wlock.release()
+ fixlock = wlock
+ try:
+ if not fixlock:
+ try:
+ fixlock = self.wlock(False)
+ except lock.LockException:
+ pass
+ if fixlock:
+ for f in fixup:
+ self.dirstate.normal(f)
+ finally:
+ del fixlock
else:
# we are comparing working dir against non-parent
# generate a pseudo-manifest for the working dir
@@ -954,84 +965,99 @@
return (modified, added, removed, deleted, unknown, ignored, clean)
def add(self, list, wlock=None):
- if not wlock:
- wlock = self.wlock()
- for f in list:
- p = self.wjoin(f)
- try:
- st = os.lstat(p)
- except:
- self.ui.warn(_("%s does not exist!\n") % f)
- continue
- if st.st_size > 10000000:
- self.ui.warn(_("%s: files over 10MB may cause memory and"
- " performance problems\n"
- "(use 'hg revert %s' to unadd the file)\n")
- % (f, f))
- if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
- self.ui.warn(_("%s not added: only files and symlinks "
- "supported currently\n") % f)
- elif self.dirstate[f] in 'an':
- self.ui.warn(_("%s already tracked!\n") % f)
- else:
- self.dirstate.add(f)
+ try:
+ if not wlock:
+ wlock = self.wlock()
+ for f in list:
+ p = self.wjoin(f)
+ try:
+ st = os.lstat(p)
+ except:
+ self.ui.warn(_("%s does not exist!\n") % f)
+ continue
+ if st.st_size > 10000000:
+ self.ui.warn(_("%s: files over 10MB may cause memory and"
+ " performance problems\n"
+ "(use 'hg revert %s' to unadd the file)\n")
+ % (f, f))
+ if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
+ self.ui.warn(_("%s not added: only files and symlinks "
+ "supported currently\n") % f)
+ elif self.dirstate[f] in 'an':
+ self.ui.warn(_("%s already tracked!\n") % f)
+ else:
+ self.dirstate.add(f)
+ finally:
+ del wlock
def forget(self, list, wlock=None):
- if not wlock:
- wlock = self.wlock()
- for f in list:
- if self.dirstate[f] != 'a':
- self.ui.warn(_("%s not added!\n") % f)
- else:
- self.dirstate.forget(f)
+ try:
+ if not wlock:
+ wlock = self.wlock()
+ for f in list:
+ if self.dirstate[f] != 'a':
+ self.ui.warn(_("%s not added!\n") % f)
+ else:
+ self.dirstate.forget(f)
+ finally:
+ del wlock
def remove(self, list, unlink=False, wlock=None):
- if unlink:
+ try:
+ if unlink:
+ for f in list:
+ try:
+ util.unlink(self.wjoin(f))
+ except OSError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ if not wlock:
+ wlock = self.wlock()
for f in list:
- try:
- util.unlink(self.wjoin(f))
- except OSError, inst:
- if inst.errno != errno.ENOENT:
- raise
- if not wlock:
- wlock = self.wlock()
- for f in list:
- if unlink and os.path.exists(self.wjoin(f)):
- self.ui.warn(_("%s still exists!\n") % f)
- elif self.dirstate[f] == 'a':
- self.dirstate.forget(f)
- elif f not in self.dirstate:
- self.ui.warn(_("%s not tracked!\n") % f)
- else:
- self.dirstate.remove(f)
+ if unlink and os.path.exists(self.wjoin(f)):
+ self.ui.warn(_("%s still exists!\n") % f)
+ elif self.dirstate[f] == 'a':
+ self.dirstate.forget(f)
+ elif f not in self.dirstate:
+ self.ui.warn(_("%s not tracked!\n") % f)
+ else:
+ self.dirstate.remove(f)
+ finally:
+ del wlock
def undelete(self, list, wlock=None):
- p = self.dirstate.parents()[0]
- mn = self.changelog.read(p)[0]
- m = self.manifest.read(mn)
- if not wlock:
- wlock = self.wlock()
- for f in list:
- if self.dirstate[f] != 'r':
- self.ui.warn("%s not removed!\n" % f)
- else:
- t = self.file(f).read(m[f])
- self.wwrite(f, t, m.flags(f))
- self.dirstate.normal(f)
+ try:
+ p = self.dirstate.parents()[0]
+ mn = self.changelog.read(p)[0]
+ m = self.manifest.read(mn)
+ if not wlock:
+ wlock = self.wlock()
+ for f in list:
+ if self.dirstate[f] != 'r':
+ self.ui.warn("%s not removed!\n" % f)
+ else:
+ t = self.file(f).read(m[f])
+ self.wwrite(f, t, m.flags(f))
+ self.dirstate.normal(f)
+ finally:
+ del wlock
def copy(self, source, dest, wlock=None):
- p = self.wjoin(dest)
- if not (os.path.exists(p) or os.path.islink(p)):
- self.ui.warn(_("%s does not exist!\n") % dest)
- elif not (os.path.isfile(p) or os.path.islink(p)):
- self.ui.warn(_("copy failed: %s is not a file or a "
- "symbolic link\n") % dest)
- else:
- if not wlock:
- wlock = self.wlock()
- if dest not in self.dirstate:
- self.dirstate.add(dest)
- self.dirstate.copy(source, dest)
+ try:
+ p = self.wjoin(dest)
+ if not (os.path.exists(p) or os.path.islink(p)):
+ self.ui.warn(_("%s does not exist!\n") % dest)
+ elif not (os.path.isfile(p) or os.path.islink(p)):
+ self.ui.warn(_("copy failed: %s is not a file or a "
+ "symbolic link\n") % dest)
+ else:
+ if not wlock:
+ wlock = self.wlock()
+ if dest not in self.dirstate:
+ self.dirstate.add(dest)
+ self.dirstate.copy(source, dest)
+ finally:
+ del wlock
def heads(self, start=None):
heads = self.changelog.heads(start)
@@ -1309,12 +1335,9 @@
return subset
def pull(self, remote, heads=None, force=False, lock=None):
- mylock = False
- if not lock:
- lock = self.lock()
- mylock = True
-
try:
+ if not lock:
+ lock = self.lock()
fetch = self.findincoming(remote, force=force)
if fetch == [nullid]:
self.ui.status(_("requesting all changes\n"))
@@ -1331,8 +1354,7 @@
cg = remote.changegroupsubset(fetch, heads, 'pull')
return self.addchangegroup(cg, 'pull', remote.url())
finally:
- if mylock:
- lock.release()
+ del lock
def push(self, remote, force=False, revs=None):
# there are two ways to push to remote repo:
@@ -1405,12 +1427,14 @@
def push_addchangegroup(self, remote, force, revs):
lock = remote.lock()
-
- ret = self.prepush(remote, force, revs)
- if ret[0] is not None:
- cg, remote_heads = ret
- return remote.addchangegroup(cg, 'push', self.url())
- return ret[1]
+ try:
+ ret = self.prepush(remote, force, revs)
+ if ret[0] is not None:
+ cg, remote_heads = ret
+ return remote.addchangegroup(cg, 'push', self.url())
+ return ret[1]
+ finally:
+ del lock
def push_unbundle(self, remote, force, revs):
# local repo finds heads on server, finds out what revs it
@@ -1794,65 +1818,67 @@
changesets = files = revisions = 0
- tr = self.transaction()
-
# write changelog data to temp files so concurrent readers will not see
# inconsistent view
cl = self.changelog
cl.delayupdate()
oldheads = len(cl.heads())
- # pull off the changeset group
- self.ui.status(_("adding changesets\n"))
- cor = cl.count() - 1
- chunkiter = changegroup.chunkiter(source)
- if cl.addgroup(chunkiter, csmap, tr, 1) is None:
- raise util.Abort(_("received changelog group is empty"))
- cnr = cl.count() - 1
- changesets = cnr - cor
+ tr = self.transaction()
+ try:
+ # pull off the changeset group
+ self.ui.status(_("adding changesets\n"))
+ cor = cl.count() - 1
+ chunkiter = changegroup.chunkiter(source)
+ if cl.addgroup(chunkiter, csmap, tr, 1) is None:
+ raise util.Abort(_("received changelog group is empty"))
+ cnr = cl.count() - 1
+ changesets = cnr - cor
- # pull off the manifest group
- self.ui.status(_("adding manifests\n"))
- chunkiter = changegroup.chunkiter(source)
- # no need to check for empty manifest group here:
- # if the result of the merge of 1 and 2 is the same in 3 and 4,
- # no new manifest will be created and the manifest group will
- # be empty during the pull
- self.manifest.addgroup(chunkiter, revmap, tr)
+ # pull off the manifest group
+ self.ui.status(_("adding manifests\n"))
+ chunkiter = changegroup.chunkiter(source)
+ # no need to check for empty manifest group here:
+ # if the result of the merge of 1 and 2 is the same in 3 and 4,
+ # no new manifest will be created and the manifest group will
+ # be empty during the pull
+ self.manifest.addgroup(chunkiter, revmap, tr)
- # process the files
- self.ui.status(_("adding file changes\n"))
- while 1:
- f = changegroup.getchunk(source)
- if not f:
- break
- self.ui.debug(_("adding %s revisions\n") % f)
- fl = self.file(f)
- o = fl.count()
- chunkiter = changegroup.chunkiter(source)
- if fl.addgroup(chunkiter, revmap, tr) is None:
- raise util.Abort(_("received file revlog group is empty"))
- revisions += fl.count() - o
- files += 1
+ # process the files
+ self.ui.status(_("adding file changes\n"))
+ while 1:
+ f = changegroup.getchunk(source)
+ if not f:
+ break
+ self.ui.debug(_("adding %s revisions\n") % f)
+ fl = self.file(f)
+ o = fl.count()
+ chunkiter = changegroup.chunkiter(source)
+ if fl.addgroup(chunkiter, revmap, tr) is None:
+ raise util.Abort(_("received file revlog group is empty"))
+ revisions += fl.count() - o
+ files += 1
+
+ # make changelog see real files again
+ cl.finalize(tr)
- # make changelog see real files again
- cl.finalize(tr)
+ newheads = len(self.changelog.heads())
+ heads = ""
+ if oldheads and newheads != oldheads:
+ heads = _(" (%+d heads)") % (newheads - oldheads)
- newheads = len(self.changelog.heads())
- heads = ""
- if oldheads and newheads != oldheads:
- heads = _(" (%+d heads)") % (newheads - oldheads)
+ self.ui.status(_("added %d changesets"
+ " with %d changes to %d files%s\n")
+ % (changesets, revisions, files, heads))
- self.ui.status(_("added %d changesets"
- " with %d changes to %d files%s\n")
- % (changesets, revisions, files, heads))
+ if changesets > 0:
+ self.hook('pretxnchangegroup', throw=True,
+ node=hex(self.changelog.node(cor+1)), source=srctype,
+ url=url)
- if changesets > 0:
- self.hook('pretxnchangegroup', throw=True,
- node=hex(self.changelog.node(cor+1)), source=srctype,
- url=url)
-
- tr.close()
+ tr.close()
+ finally:
+ del tr
if changesets > 0:
self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
--- a/mercurial/merge.py Sat Jul 21 16:02:09 2007 -0500
+++ b/mercurial/merge.py Sat Jul 21 16:02:10 2007 -0500
@@ -506,65 +506,67 @@
wlock = working dir lock, if already held
"""
- if not wlock:
- wlock = repo.wlock()
+ try:
+ if not wlock:
+ wlock = repo.wlock()
- wc = repo.workingctx()
- if node is None:
- # tip of current branch
- try:
- node = repo.branchtags()[wc.branch()]
- except KeyError:
- raise util.Abort(_("branch %s not found") % wc.branch())
- overwrite = force and not branchmerge
- forcemerge = force and branchmerge
- pl = wc.parents()
- p1, p2 = pl[0], repo.changectx(node)
- pa = p1.ancestor(p2)
- fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
- fastforward = False
+ wc = repo.workingctx()
+ if node is None:
+ # tip of current branch
+ try:
+ node = repo.branchtags()[wc.branch()]
+ except KeyError:
+ raise util.Abort(_("branch %s not found") % wc.branch())
+ overwrite = force and not branchmerge
+ forcemerge = force and branchmerge
+ pl = wc.parents()
+ p1, p2 = pl[0], repo.changectx(node)
+ pa = p1.ancestor(p2)
+ fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
+ fastforward = False
- ### check phase
- if not overwrite and len(pl) > 1:
- raise util.Abort(_("outstanding uncommitted merges"))
- if pa == p1 or pa == p2: # is there a linear path from p1 to p2?
- if branchmerge:
- if p1.branch() != p2.branch() and pa != p2:
- fastforward = True
- else:
- raise util.Abort(_("there is nothing to merge, just use "
- "'hg update' or look at 'hg heads'"))
- elif not (overwrite or branchmerge):
- raise util.Abort(_("update spans branches, use 'hg merge' "
- "or 'hg update -C' to lose changes"))
- if branchmerge and not forcemerge:
- if wc.files():
- raise util.Abort(_("outstanding uncommitted changes"))
+ ### check phase
+ if not overwrite and len(pl) > 1:
+ raise util.Abort(_("outstanding uncommitted merges"))
+ if pa == p1 or pa == p2: # is there a linear path from p1 to p2?
+ if branchmerge:
+ if p1.branch() != p2.branch() and pa != p2:
+ fastforward = True
+ else:
+ raise util.Abort(_("there is nothing to merge, just use "
+ "'hg update' or look at 'hg heads'"))
+ elif not (overwrite or branchmerge):
+ raise util.Abort(_("update spans branches, use 'hg merge' "
+ "or 'hg update -C' to lose changes"))
+ if branchmerge and not forcemerge:
+ if wc.files():
+ raise util.Abort(_("outstanding uncommitted changes"))
- ### calculate phase
- action = []
- if not force:
- checkunknown(wc, p2)
- if not util.checkfolding(repo.path):
- checkcollision(p2)
- if not branchmerge:
- action += forgetremoved(wc, p2)
- action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
+ ### calculate phase
+ action = []
+ if not force:
+ checkunknown(wc, p2)
+ if not util.checkfolding(repo.path):
+ checkcollision(p2)
+ if not branchmerge:
+ action += forgetremoved(wc, p2)
+ action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
- ### apply phase
- if not branchmerge: # just jump to the new rev
- fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
- if not partial:
- repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
+ ### apply phase
+ if not branchmerge: # just jump to the new rev
+ fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
+ if not partial:
+ repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
- stats = applyupdates(repo, action, wc, p2)
+ stats = applyupdates(repo, action, wc, p2)
- if not partial:
- recordupdates(repo, action, branchmerge)
- repo.dirstate.setparents(fp1, fp2)
- if not branchmerge and not fastforward:
- repo.dirstate.setbranch(p2.branch())
- repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
+ if not partial:
+ recordupdates(repo, action, branchmerge)
+ repo.dirstate.setparents(fp1, fp2)
+ if not branchmerge and not fastforward:
+ repo.dirstate.setbranch(p2.branch())
+ repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
- return stats
-
+ return stats
+ finally:
+ del wlock
--- a/mercurial/streamclone.py Sat Jul 21 16:02:09 2007 -0500
+++ b/mercurial/streamclone.py Sat Jul 21 16:02:10 2007 -0500
@@ -66,22 +66,25 @@
# get consistent snapshot of repo. lock during scan so lock not
# needed while we stream, and commits can happen.
+ lock = None
try:
- repolock = repo.lock()
- except (lock.LockHeld, lock.LockUnavailable), inst:
- repo.ui.warn('locking the repository failed: %s\n' % (inst,))
- fileobj.write('2\n')
- return
+ try:
+ repolock = repo.lock()
+ except (lock.LockHeld, lock.LockUnavailable), inst:
+ repo.ui.warn('locking the repository failed: %s\n' % (inst,))
+ fileobj.write('2\n')
+ return
- fileobj.write('0\n')
- repo.ui.debug('scanning\n')
- entries = []
- total_bytes = 0
- for name, size in walkrepo(repo.spath):
- name = repo.decodefn(util.pconvert(name))
- entries.append((name, size))
- total_bytes += size
- repolock.release()
+ fileobj.write('0\n')
+ repo.ui.debug('scanning\n')
+ entries = []
+ total_bytes = 0
+ for name, size in walkrepo(repo.spath):
+ name = repo.decodefn(util.pconvert(name))
+ entries.append((name, size))
+ total_bytes += size
+ finally:
+ del repolock
repo.ui.debug('%d files, %d bytes to transfer\n' %
(len(entries), total_bytes))
--- a/mercurial/verify.py Sat Jul 21 16:02:09 2007 -0500
+++ b/mercurial/verify.py Sat Jul 21 16:02:10 2007 -0500
@@ -10,6 +10,13 @@
import revlog, mdiff
def verify(repo):
+ lock = repo.lock()
+ try:
+ return _verify(repo)
+ finally:
+ del lock
+
+def _verify(repo):
filelinkrevs = {}
filenodes = {}
changesets = revisions = files = 0
@@ -17,8 +24,6 @@
warnings = [0]
neededmanifests = {}
- lock = repo.lock()
-
def err(msg):
repo.ui.warn(msg + "\n")
errors[0] += 1