Mercurial > hg-stable
changeset 16662:ea7bf1d49bce
merge with stable
author | Matt Mackall <mpm@selenic.com> |
---|---|
date | Sat, 12 May 2012 12:23:49 +0200 |
parents | de4b42daf396 (diff) 2fdd1902ed2d (current diff) |
children | a955e05dd7a0 |
files | mercurial/patch.py |
diffstat | 43 files changed, 1406 insertions(+), 438 deletions(-) [+] |
line wrap: on
line diff
--- a/contrib/check-code.py Sat May 12 09:43:12 2012 +0200 +++ b/contrib/check-code.py Sat May 12 12:23:49 2012 +0200 @@ -199,6 +199,7 @@ "always assign an opened file to a variable, and close it afterwards"), (r'(?i)descendent', "the proper spelling is descendAnt"), (r'\.debug\(\_', "don't mark debug messages for translation"), + (r'\.strip\(\)\.split\(\)', "no need to strip before splitting"), ], # warnings [
--- a/hgext/largefiles/overrides.py Sat May 12 09:43:12 2012 +0200 +++ b/hgext/largefiles/overrides.py Sat May 12 12:23:49 2012 +0200 @@ -697,6 +697,33 @@ ui.status(_("%d largefiles cached\n") % numcached) return result +def overrideclone(orig, ui, source, dest=None, **opts): + result = hg.clone(ui, opts, source, dest, + pull=opts.get('pull'), + stream=opts.get('uncompressed'), + rev=opts.get('rev'), + update=True, # required for successful walkchangerevs + branch=opts.get('branch')) + if result is None: + return True + totalsuccess = 0 + totalmissing = 0 + if opts.get('all_largefiles'): + sourcerepo, destrepo = result + matchfn = scmutil.match(destrepo[None], + [destrepo.wjoin(lfutil.shortname)], {}) + def prepare(ctx, fns): + pass + for ctx in cmdutil.walkchangerevs(destrepo, matchfn, {'rev' : None}, + prepare): + success, missing = lfcommands.cachelfiles(ui, destrepo, ctx.node()) + totalsuccess += len(success) + totalmissing += len(missing) + ui.status(_("%d additional largefiles cached\n") % totalsuccess) + if totalmissing > 0: + ui.status(_("%d largefiles failed to download\n") % totalmissing) + return totalmissing != 0 + def overriderebase(orig, ui, repo, **opts): repo._isrebasing = True try: @@ -782,6 +809,47 @@ archiver.done() +def hgsubrepoarchive(orig, repo, ui, archiver, prefix): + rev = repo._state[1] + ctx = repo._repo[rev] + + lfcommands.cachelfiles(ui, repo._repo, ctx.node()) + + def write(name, mode, islink, getdata): + if lfutil.isstandin(name): + return + data = getdata() + + archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data) + + for f in ctx: + ff = ctx.flags(f) + getdata = ctx[f].data + if lfutil.isstandin(f): + path = lfutil.findfile(repo._repo, getdata().strip()) + if path is None: + raise util.Abort( + _('largefile %s not found in repo store or system cache') + % lfutil.splitstandin(f)) + f = lfutil.splitstandin(f) + + def getdatafn(): + fd = None + try: + fd = open(os.path.join(prefix, path), 'rb') + return fd.read() + finally: + if fd: + fd.close() + + getdata = getdatafn + + write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata) + + for subpath in ctx.substate: + sub = ctx.sub(subpath) + sub.archive(repo.ui, archiver, prefix) + # If a largefile is modified, the change is not reflected in its # standin until a commit. cmdutil.bailifchanged() raises an exception # if the repo has uncommitted changes. Wrap it to also check if
--- a/hgext/largefiles/uisetup.py Sat May 12 09:43:12 2012 +0200 +++ b/hgext/largefiles/uisetup.py Sat May 12 12:23:49 2012 +0200 @@ -70,6 +70,12 @@ overrides.overrideupdate) entry = extensions.wrapcommand(commands.table, 'pull', overrides.overridepull) + entry = extensions.wrapcommand(commands.table, 'clone', + overrides.overrideclone) + cloneopt = [('', 'all-largefiles', None, + _('download all versions of all largefiles'))] + + entry[1].extend(cloneopt) entry = extensions.wrapcommand(commands.table, 'cat', overrides.overridecat) entry = extensions.wrapfunction(merge, '_checkunknownfile', @@ -100,6 +106,7 @@ extensions.wrapfunction(hg, 'merge', overrides.hgmerge) extensions.wrapfunction(archival, 'archive', overrides.overridearchive) + extensions.wrapfunction(hgsubrepo, 'archive', overrides.hgsubrepoarchive) extensions.wrapfunction(cmdutil, 'bailifchanged', overrides.overridebailifchanged)
--- a/hgext/mq.py Sat May 12 09:43:12 2012 +0200 +++ b/hgext/mq.py Sat May 12 12:23:49 2012 +0200 @@ -46,6 +46,17 @@ You will by default be managing a patch queue named "patches". You can create other, independent patch queues with the :hg:`qqueue` command. + +If the working directory contains uncommitted files, qpush, qpop and +qgoto abort immediately. If -f/--force is used, the changes are +discarded. Setting: + + [mq] + check = True + +make them behave as if -c/--check were passed, and non-conflicting +local changes will be tolerated and preserved. If incompatible options +such as -f/--force or --exact are passed, this setting is ignored. ''' from mercurial.i18n import _ @@ -280,6 +291,9 @@ if phase is not None: repo.ui.restoreconfig(backup) +class AbortNoCleanup(error.Abort): + pass + class queue(object): def __init__(self, ui, path, patchdir=None): self.basepath = path @@ -681,7 +695,7 @@ def apply(self, repo, series, list=False, update_status=True, strict=False, patchdir=None, merge=None, all_files=None, - tobackup=None): + tobackup=None, check=False): wlock = lock = tr = None try: wlock = repo.wlock() @@ -690,10 +704,14 @@ try: ret = self._apply(repo, series, list, update_status, strict, patchdir, merge, all_files=all_files, - tobackup=tobackup) + tobackup=tobackup, check=check) tr.close() self.savedirty() return ret + except AbortNoCleanup: + tr.close() + self.savedirty() + return 2, repo.dirstate.p1() except: try: tr.abort() @@ -708,7 +726,7 @@ def _apply(self, repo, series, list=False, update_status=True, strict=False, patchdir=None, merge=None, all_files=None, - tobackup=None): + tobackup=None, check=False): """returns (error, hash) error = 1 for unable to read, 2 for patch failed, 3 for patch @@ -749,6 +767,9 @@ if tobackup: touched = patchmod.changedfiles(self.ui, repo, pf) touched = set(touched) & tobackup + if touched and check: + raise AbortNoCleanup( + _("local changes found, refresh first")) self.backup(repo, touched, copy=True) tobackup = tobackup - touched (patcherr, files, fuzz) = self.patch(repo, pf) @@ -862,7 +883,7 @@ def finish(self, repo, revs): # Manually trigger phase computation to ensure phasedefaults is # executed before we remove the patches. - repo._phaserev + repo._phasecache patches = self._revpatches(repo, sorted(revs)) qfinished = self._cleanup(patches, len(patches)) if qfinished and repo.ui.configbool('mq', 'secret', False): @@ -959,6 +980,10 @@ else: raise util.Abort(_('patch "%s" already exists') % name) + def checkforcecheck(self, check, force): + if force and check: + raise util.Abort(_('cannot use both --force and --check')) + def new(self, repo, patchfn, *pats, **opts): """options: msg: a string or a no-argument function returning a string @@ -1156,8 +1181,9 @@ return self.series[i + off] raise util.Abort(_("patch %s not in series") % patch) - def push(self, repo, patch=None, force=False, list=False, - mergeq=None, all=False, move=False, exact=False, nobackup=False): + def push(self, repo, patch=None, force=False, list=False, mergeq=None, + all=False, move=False, exact=False, nobackup=False, check=False): + self.checkforcecheck(check, force) diffopts = self.diffopts() wlock = repo.wlock() try: @@ -1212,10 +1238,13 @@ if start == len(self.series): self.ui.warn(_('patch series already fully applied\n')) return 1 - if not force: + if not force and not check: self.checklocalchanges(repo, refresh=self.applied) if exact: + if check: + raise util.Abort( + _("cannot use --exact and --check together")) if move: raise util.Abort(_("cannot use --exact and --move together")) if self.applied: @@ -1257,9 +1286,12 @@ end = self.series.index(patch, start) + 1 tobackup = set() - if not nobackup and force: + if (not nobackup and force) or check: m, a, r, d = self.checklocalchanges(repo, force=True) - tobackup.update(m + a) + if check: + tobackup.update(m + a + r + d) + else: + tobackup.update(m + a) s = self.series[start:end] all_files = set() @@ -1268,7 +1300,7 @@ ret = self.mergepatch(repo, mergeq, s, diffopts) else: ret = self.apply(repo, s, list, all_files=all_files, - tobackup=tobackup) + tobackup=tobackup, check=check) except: self.ui.warn(_('cleaning up working directory...')) node = repo.dirstate.p1() @@ -1299,7 +1331,8 @@ wlock.release() def pop(self, repo, patch=None, force=False, update=True, all=False, - nobackup=False): + nobackup=False, check=False): + self.checkforcecheck(check, force) wlock = repo.wlock() try: if patch: @@ -1346,9 +1379,12 @@ tobackup = set() if update: - m, a, r, d = self.checklocalchanges(repo, force=force) - if not nobackup and force: - tobackup.update(m + a) + m, a, r, d = self.checklocalchanges(repo, force=force or check) + if force: + if not nobackup: + tobackup.update(m + a) + elif check: + tobackup.update(m + a + r + d) self.applieddirty = True end = len(self.applied) @@ -1379,8 +1415,10 @@ if d: raise util.Abort(_("deletions found between repo revs")) - # backup local changes in --force case - self.backup(repo, set(a + m + r) & tobackup) + tobackup = set(a + m + r) & tobackup + if check and tobackup: + self.localchangesfound() + self.backup(repo, tobackup) for f in a: try: @@ -1959,6 +1997,14 @@ self.removeundo(repo) return imported +def fixcheckopts(ui, opts): + if (not ui.configbool('mq', 'check') or opts.get('force') + or opts.get('exact')): + return opts + opts = dict(opts) + opts['check'] = True + return opts + @command("qdelete|qremove|qrm", [('k', 'keep', None, _('keep patch file')), ('r', 'rev', [], @@ -2498,20 +2544,25 @@ wlock.release() @command("qgoto", - [('f', 'force', None, _('overwrite any local changes')), + [('c', 'check', None, _('tolerate non-conflicting local changes')), + ('f', 'force', None, _('overwrite any local changes')), ('', 'no-backup', None, _('do not save backup copies of files'))], _('hg qgoto [OPTION]... PATCH')) def goto(ui, repo, patch, **opts): '''push or pop patches until named patch is at top of stack Returns 0 on success.''' + opts = fixcheckopts(ui, opts) q = repo.mq patch = q.lookup(patch) nobackup = opts.get('no_backup') + check = opts.get('check') if q.isapplied(patch): - ret = q.pop(repo, patch, force=opts.get('force'), nobackup=nobackup) + ret = q.pop(repo, patch, force=opts.get('force'), nobackup=nobackup, + check=check) else: - ret = q.push(repo, patch, force=opts.get('force'), nobackup=nobackup) + ret = q.push(repo, patch, force=opts.get('force'), nobackup=nobackup, + check=check) q.savedirty() return ret @@ -2631,7 +2682,8 @@ return newpath @command("^qpush", - [('f', 'force', None, _('apply on top of local changes')), + [('c', 'check', None, _('tolerate non-conflicting local changes')), + ('f', 'force', None, _('apply on top of local changes')), ('e', 'exact', None, _('apply the target patch to its recorded parent')), ('l', 'list', None, _('list patch name in commit text')), ('a', 'all', None, _('apply all patches')), @@ -2645,14 +2697,17 @@ def push(ui, repo, patch=None, **opts): """push the next patch onto the stack - When -f/--force is applied, all local changes in patched files - will be lost. + By default, abort if the working directory contains uncommitted + changes. With -c/--check, abort only if the uncommitted files + overlap with patched files. With -f/--force, backup and patch over + uncommitted changes. Return 0 on success. """ q = repo.mq mergeq = None + opts = fixcheckopts(ui, opts) if opts.get('merge'): if opts.get('name'): newpath = repo.join(opts.get('name')) @@ -2665,25 +2720,33 @@ ui.warn(_("merging with queue at: %s\n") % mergeq.path) ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'), mergeq=mergeq, all=opts.get('all'), move=opts.get('move'), - exact=opts.get('exact'), nobackup=opts.get('no_backup')) + exact=opts.get('exact'), nobackup=opts.get('no_backup'), + check=opts.get('check')) return ret @command("^qpop", [('a', 'all', None, _('pop all patches')), ('n', 'name', '', _('queue name to pop (DEPRECATED)'), _('NAME')), + ('c', 'check', None, _('tolerate non-conflicting local changes')), ('f', 'force', None, _('forget any local changes to patched files')), ('', 'no-backup', None, _('do not save backup copies of files'))], _('hg qpop [-a] [-f] [PATCH | INDEX]')) def pop(ui, repo, patch=None, **opts): """pop the current patch off the stack - By default, pops off the top of the patch stack. If given a patch - name, keeps popping off patches until the named patch is at the - top of the stack. + Without argument, pops off the top of the patch stack. If given a + patch name, keeps popping off patches until the named patch is at + the top of the stack. + + By default, abort if the working directory contains uncommitted + changes. With -c/--check, abort only if the uncommitted files + overlap with patched files. With -f/--force, backup and discard + changes made to such files. Return 0 on success. """ + opts = fixcheckopts(ui, opts) localupdate = True if opts.get('name'): q = queue(ui, repo.path, repo.join(opts.get('name'))) @@ -2692,7 +2755,8 @@ else: q = repo.mq ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate, - all=opts.get('all'), nobackup=opts.get('no_backup')) + all=opts.get('all'), nobackup=opts.get('no_backup'), + check=opts.get('check')) q.savedirty() return ret
--- a/hgext/rebase.py Sat May 12 09:43:12 2012 +0200 +++ b/hgext/rebase.py Sat May 12 12:23:49 2012 +0200 @@ -182,7 +182,7 @@ branch = repo[None].branch() dest = repo[branch] else: - dest = repo[destf] + dest = scmutil.revsingle(repo, destf) if revf: rebaseset = repo.revs('%lr', revf) @@ -201,7 +201,7 @@ root = None if not rebaseset: - repo.ui.debug('base is ancestor of destination') + repo.ui.debug('base is ancestor of destination\n') result = None elif not keepf and list(repo.revs('first(children(%ld) - %ld)', rebaseset, rebaseset)): @@ -618,7 +618,7 @@ if commonbase == dest: samebranch = root.branch() == dest.branch() if samebranch and root in dest.children(): - repo.ui.debug('source is a child of destination') + repo.ui.debug('source is a child of destination\n') return None # rebase on ancestor, force detach detach = True
--- a/hgext/transplant.py Sat May 12 09:43:12 2012 +0200 +++ b/hgext/transplant.py Sat May 12 12:23:49 2012 +0200 @@ -124,7 +124,7 @@ continue parents = source.changelog.parents(node) - if not opts.get('filter'): + if not (opts.get('filter') or opts.get('log')): # If the changeset parent is the same as the # wdir's parent, just pull it. if parents[0] == p1:
--- a/mercurial/cmdutil.py Sat May 12 09:43:12 2012 +0200 +++ b/mercurial/cmdutil.py Sat May 12 12:23:49 2012 +0200 @@ -1486,7 +1486,7 @@ def badfn(path, msg): if path in names: return - if path in repo[node].substate: + if path in ctx.substate: return path_ = path + '/' for f in names: @@ -1494,14 +1494,14 @@ return ui.warn("%s: %s\n" % (m.rel(path), msg)) - m = scmutil.match(repo[node], pats, opts) + m = scmutil.match(ctx, pats, opts) m.bad = badfn - for abs in repo[node].walk(m): + for abs in ctx.walk(m): if abs not in names: names[abs] = m.rel(abs), m.exact(abs) # get the list of subrepos that must be reverted - targetsubs = [s for s in repo[node].substate if m(s)] + targetsubs = [s for s in ctx.substate if m(s)] m = scmutil.matchfiles(repo, names) changes = repo.status(match=m)[:4] modified, added, removed, deleted = map(set, changes)
--- a/mercurial/commands.py Sat May 12 09:43:12 2012 +0200 +++ b/mercurial/commands.py Sat May 12 12:23:49 2012 +0200 @@ -520,10 +520,12 @@ revision as good or bad without checking it out first. If you supply a command, it will be used for automatic bisection. - Its exit status will be used to mark revisions as good or bad: - status 0 means good, 125 means to skip the revision, 127 - (command not found) will abort the bisection, and any other - non-zero exit status means the revision is bad. + The environment variable HG_NODE will contain the ID of the + changeset being tested. The exit status of the command will be + used to mark revisions as good or bad: status 0 means good, 125 + means to skip the revision, 127 (command not found) will abort the + bisection, and any other non-zero exit status means the revision + is bad. .. container:: verbose @@ -563,6 +565,11 @@ hg log -r "bisect(pruned)" + - see the changeset currently being bisected (especially useful + if running with -U/--noupdate):: + + hg log -r "bisect(current)" + - see all changesets that took part in the current bisection:: hg log -r "bisect(range)" @@ -647,10 +654,22 @@ if command: changesets = 1 try: + node = state['current'][0] + except LookupError: + if noupdate: + raise util.Abort(_('current bisect revision is unknown - ' + 'start a new bisect to fix')) + node, p2 = repo.dirstate.parents() + if p2 != nullid: + raise util.Abort(_('current bisect revision is a merge')) + try: while changesets: # update state + state['current'] = [node] hbisect.save_state(repo, state) - status = util.system(command, out=ui.fout) + status = util.system(command, + environ={'HG_NODE': hex(node)}, + out=ui.fout) if status == 125: transition = "skip" elif status == 0: @@ -662,7 +681,7 @@ raise util.Abort(_("%s killed") % command) else: transition = "bad" - ctx = scmutil.revsingle(repo, rev) + ctx = scmutil.revsingle(repo, rev, node) rev = None # clear for future iterations state[transition].append(ctx.node()) ui.status(_('Changeset %d:%s: %s\n') % (ctx, ctx, transition)) @@ -670,9 +689,12 @@ # bisect nodes, changesets, good = hbisect.bisect(repo.changelog, state) # update to next check - cmdutil.bailifchanged(repo) - hg.clean(repo, nodes[0], show_stats=False) + node = nodes[0] + if not noupdate: + cmdutil.bailifchanged(repo) + hg.clean(repo, node, show_stats=False) finally: + state['current'] = [node] hbisect.save_state(repo, state) print_result(nodes, good) return @@ -704,6 +726,8 @@ if extendnode is not None: ui.write(_("Extending search to changeset %d:%s\n" % (extendnode.rev(), extendnode))) + state['current'] = [extendnode.node()] + hbisect.save_state(repo, state) if noupdate: return cmdutil.bailifchanged(repo) @@ -723,6 +747,8 @@ ui.write(_("Testing changeset %d:%s " "(%d changesets remaining, ~%d tests)\n") % (rev, short(node), changesets, tests)) + state['current'] = [node] + hbisect.save_state(repo, state) if not noupdate: cmdutil.bailifchanged(repo) return hg.clean(repo, node) @@ -921,26 +947,26 @@ for isactive, node, tag in branches: if (not active) or isactive: + hn = repo.lookup(node) + if isactive: + label = 'branches.active' + notice = '' + elif hn not in repo.branchheads(tag, closed=False): + if not closed: + continue + label = 'branches.closed' + notice = _(' (closed)') + else: + label = 'branches.inactive' + notice = _(' (inactive)') + if tag == repo.dirstate.branch(): + label = 'branches.current' + rev = str(node).rjust(31 - encoding.colwidth(tag)) + rev = ui.label('%s:%s' % (rev, hexfunc(hn)), 'log.changeset') + tag = ui.label(tag, label) if ui.quiet: ui.write("%s\n" % tag) else: - hn = repo.lookup(node) - if isactive: - label = 'branches.active' - notice = '' - elif hn not in repo.branchheads(tag, closed=False): - if not closed: - continue - label = 'branches.closed' - notice = _(' (closed)') - else: - label = 'branches.inactive' - notice = _(' (inactive)') - if tag == repo.dirstate.branch(): - label = 'branches.current' - rev = str(node).rjust(31 - encoding.colwidth(tag)) - rev = ui.label('%s:%s' % (rev, hexfunc(hn)), 'log.changeset') - tag = ui.label(tag, label) ui.write("%s %s%s\n" % (tag, rev, notice)) @command('bundle', @@ -2555,6 +2581,7 @@ 'graft', [('c', 'continue', False, _('resume interrupted graft')), ('e', 'edit', False, _('invoke editor on commit messages')), + ('', 'log', None, _('append graft info to log message')), ('D', 'currentdate', False, _('record the current date as commit date')), ('U', 'currentuser', False, @@ -2573,6 +2600,11 @@ Changesets that are ancestors of the current revision, that have already been grafted, or that are merges will be skipped. + If --log is specified, log messages will have a comment appended + of the form:: + + (grafted from CHANGESETHASH) + If a graft merge results in conflicts, the graft process is interrupted so that the current merge can be manually resolved. Once all conflicts are addressed, the graft process can be @@ -2722,8 +2754,13 @@ date = ctx.date() if opts.get('date'): date = opts['date'] - repo.commit(text=ctx.description(), user=user, + message = ctx.description() + if opts.get('log'): + message += '\n(grafted from %s)' % ctx.hex() + node = repo.commit(text=message, user=user, date=date, extra=extra, editor=editor) + if node is None: + ui.status(_('graft for revision %s is empty\n') % ctx.rev()) finally: wlock.release() @@ -4347,10 +4384,10 @@ lock = repo.lock() try: # set phase - nodes = [ctx.node() for ctx in repo.set('%ld', revs)] - if not nodes: - raise util.Abort(_('empty revision set')) - olddata = repo._phaserev[:] + if not revs: + raise util.Abort(_('empty revision set')) + nodes = [repo[r].node() for r in revs] + olddata = repo._phasecache.getphaserevs(repo)[:] phases.advanceboundary(repo, targetphase, nodes) if opts['force']: phases.retractboundary(repo, targetphase, nodes) @@ -4358,7 +4395,7 @@ lock.release() if olddata is not None: changes = 0 - newdata = repo._phaserev + newdata = repo._phasecache.getphaserevs(repo) changes = sum(o != newdata[i] for i, o in enumerate(olddata)) rejected = [n for n in nodes if newdata[repo[n].rev()] < targetphase]
--- a/mercurial/context.py Sat May 12 09:43:12 2012 +0200 +++ b/mercurial/context.py Sat May 12 12:23:49 2012 +0200 @@ -8,6 +8,7 @@ from node import nullid, nullrev, short, hex, bin from i18n import _ import ancestor, mdiff, error, util, scmutil, subrepo, patch, encoding, phases +import copies import match as matchmod import os, errno, stat @@ -190,12 +191,7 @@ def bookmarks(self): return self._repo.nodebookmarks(self._node) def phase(self): - if self._rev == -1: - return phases.public - if self._rev >= len(self._repo._phaserev): - # outdated cache - del self._repo._phaserev - return self._repo._phaserev[self._rev] + return self._repo._phasecache.phase(self._repo, self._rev) def phasestr(self): return phases.phasenames[self.phase()] def mutable(self): @@ -634,27 +630,27 @@ return zip(hist[base][0], hist[base][1].splitlines(True)) - def ancestor(self, fc2, actx=None): + def ancestor(self, fc2, actx): """ find the common ancestor file context, if any, of self, and fc2 - If actx is given, it must be the changectx of the common ancestor + actx must be the changectx of the common ancestor of self's and fc2's respective changesets. """ - if actx is None: - actx = self.changectx().ancestor(fc2.changectx()) - - # the trivial case: changesets are unrelated, files must be too - if not actx: - return None - # the easy case: no (relevant) renames if fc2.path() == self.path() and self.path() in actx: return actx[self.path()] - acache = {} + + # the next easiest cases: unambiguous predecessor (name trumps + # history) + if self.path() in actx and fc2.path() not in actx: + return actx[self.path()] + if fc2.path() in actx and self.path() not in actx: + return actx[fc2.path()] # prime the ancestor cache for the working directory + acache = {} for c in (self, fc2): if c._filerev is None: pl = [(n.path(), n.filenode()) for n in c.parents()] @@ -695,6 +691,14 @@ c = visit.pop(max(visit)) yield c + def copies(self, c2): + if not util.safehasattr(self, "_copycache"): + self._copycache = {} + sc2 = str(c2) + if sc2 not in self._copycache: + self._copycache[sc2] = copies.pathcopies(c2) + return self._copycache[sc2] + class workingctx(changectx): """A workingctx object makes access to data related to the current working directory convenient.
--- a/mercurial/discovery.py Sat May 12 09:43:12 2012 +0200 +++ b/mercurial/discovery.py Sat May 12 12:23:49 2012 +0200 @@ -105,7 +105,7 @@ og.commonheads, _any, _hds = commoninc # compute outgoing - if not repo._phaseroots[phases.secret]: + if not repo._phasecache.phaseroots[phases.secret]: og.missingheads = onlyheads or repo.heads() elif onlyheads is None: # use visible heads as it should be cached
--- a/mercurial/hbisect.py Sat May 12 09:43:12 2012 +0200 +++ b/mercurial/hbisect.py Sat May 12 12:23:49 2012 +0200 @@ -132,7 +132,7 @@ def load_state(repo): - state = {'good': [], 'bad': [], 'skip': []} + state = {'current': [], 'good': [], 'bad': [], 'skip': []} if os.path.exists(repo.join("bisect.state")): for l in repo.opener("bisect.state"): kind, node = l[:-1].split() @@ -164,10 +164,11 @@ - ``pruned`` : csets that are goods, bads or skipped - ``untested`` : csets whose fate is yet unknown - ``ignored`` : csets ignored due to DAG topology + - ``current`` : the cset currently being bisected """ state = load_state(repo) - if status in ('good', 'bad', 'skip'): - return [repo.changelog.rev(n) for n in state[status]] + if status in ('good', 'bad', 'skip', 'current'): + return map(repo.changelog.rev, state[status]) else: # In the floowing sets, we do *not* call 'bisect()' with more # than one level of recusrsion, because that can be very, very @@ -233,7 +234,7 @@ if rev in get(repo, 'skip'): # i18n: bisect changeset status return _('skipped') - if rev in get(repo, 'untested'): + if rev in get(repo, 'untested') or rev in get(repo, 'current'): # i18n: bisect changeset status return _('untested') if rev in get(repo, 'ignored'):
--- a/mercurial/httpclient/__init__.py Sat May 12 09:43:12 2012 +0200 +++ b/mercurial/httpclient/__init__.py Sat May 12 12:23:49 2012 +0200 @@ -45,6 +45,7 @@ import select import socket +import _readers import socketutil logger = logging.getLogger(__name__) @@ -54,8 +55,6 @@ HTTP_VER_1_0 = 'HTTP/1.0' HTTP_VER_1_1 = 'HTTP/1.1' -_LEN_CLOSE_IS_END = -1 - OUTGOING_BUFFER_SIZE = 1 << 15 INCOMING_BUFFER_SIZE = 1 << 20 @@ -83,23 +82,19 @@ The response will continue to load as available. If you need the complete response before continuing, check the .complete() method. """ - def __init__(self, sock, timeout): + def __init__(self, sock, timeout, method): self.sock = sock + self.method = method self.raw_response = '' - self._body = None self._headers_len = 0 - self._content_len = 0 self.headers = None self.will_close = False self.status_line = '' self.status = None + self.continued = False self.http_version = None self.reason = None - self._chunked = False - self._chunked_done = False - self._chunked_until_next = 0 - self._chunked_skip_bytes = 0 - self._chunked_preloaded_block = None + self._reader = None self._read_location = 0 self._eol = EOL @@ -117,11 +112,12 @@ socket is closed, this will nearly always return False, even in cases where all the data has actually been loaded. """ - if self._chunked: - return self._chunked_done - if self._content_len == _LEN_CLOSE_IS_END: - return False - return self._body is not None and len(self._body) >= self._content_len + if self._reader: + return self._reader.done() + + def _close(self): + if self._reader is not None: + self._reader._close() def readline(self): """Read a single line from the response body. @@ -129,30 +125,34 @@ This may block until either a line ending is found or the response is complete. """ - eol = self._body.find('\n', self._read_location) - while eol == -1 and not self.complete(): + # TODO: move this into the reader interface where it can be + # smarter (and probably avoid copies) + bytes = [] + while not bytes: + try: + bytes = [self._reader.read(1)] + except _readers.ReadNotReady: + self._select() + while bytes[-1] != '\n' and not self.complete(): self._select() - eol = self._body.find('\n', self._read_location) - if eol != -1: - eol += 1 - else: - eol = len(self._body) - data = self._body[self._read_location:eol] - self._read_location = eol - return data + bytes.append(self._reader.read(1)) + if bytes[-1] != '\n': + next = self._reader.read(1) + while next and next != '\n': + bytes.append(next) + next = self._reader.read(1) + bytes.append(next) + return ''.join(bytes) def read(self, length=None): # if length is None, unbounded read while (not self.complete() # never select on a finished read and (not length # unbounded, so we wait for complete() - or (self._read_location + length) > len(self._body))): + or length > self._reader.available_data)): self._select() if not length: - length = len(self._body) - self._read_location - elif len(self._body) < (self._read_location + length): - length = len(self._body) - self._read_location - r = self._body[self._read_location:self._read_location + length] - self._read_location += len(r) + length = self._reader.available_data + r = self._reader.read(length) if self.complete() and self.will_close: self.sock.close() return r @@ -160,93 +160,35 @@ def _select(self): r, _, _ = select.select([self.sock], [], [], self._timeout) if not r: - # socket was not readable. If the response is not complete - # and we're not a _LEN_CLOSE_IS_END response, raise a timeout. - # If we are a _LEN_CLOSE_IS_END response and we have no data, - # raise a timeout. - if not (self.complete() or - (self._content_len == _LEN_CLOSE_IS_END and self._body)): + # socket was not readable. If the response is not + # complete, raise a timeout. + if not self.complete(): logger.info('timed out with timeout of %s', self._timeout) raise HTTPTimeoutException('timeout reading data') - logger.info('cl: %r body: %r', self._content_len, self._body) try: data = self.sock.recv(INCOMING_BUFFER_SIZE) - # If the socket was readable and no data was read, that - # means the socket was closed. If this isn't a - # _CLOSE_IS_END socket, then something is wrong if we're - # here (we shouldn't enter _select() if the response is - # complete), so abort. - if not data and self._content_len != _LEN_CLOSE_IS_END: - raise HTTPRemoteClosedError( - 'server appears to have closed the socket mid-response') except socket.sslerror, e: if e.args[0] != socket.SSL_ERROR_WANT_READ: raise logger.debug('SSL_WANT_READ in _select, should retry later') return True logger.debug('response read %d data during _select', len(data)) + # If the socket was readable and no data was read, that means + # the socket was closed. Inform the reader (if any) so it can + # raise an exception if this is an invalid situation. if not data: - if self.headers and self._content_len == _LEN_CLOSE_IS_END: - self._content_len = len(self._body) + if self._reader: + self._reader._close() return False else: self._load_response(data) return True - def _chunked_parsedata(self, data): - if self._chunked_preloaded_block: - data = self._chunked_preloaded_block + data - self._chunked_preloaded_block = None - while data: - logger.debug('looping with %d data remaining', len(data)) - # Slice out anything we should skip - if self._chunked_skip_bytes: - if len(data) <= self._chunked_skip_bytes: - self._chunked_skip_bytes -= len(data) - data = '' - break - else: - data = data[self._chunked_skip_bytes:] - self._chunked_skip_bytes = 0 - - # determine how much is until the next chunk - if self._chunked_until_next: - amt = self._chunked_until_next - logger.debug('reading remaining %d of existing chunk', amt) - self._chunked_until_next = 0 - body = data - else: - try: - amt, body = data.split(self._eol, 1) - except ValueError: - self._chunked_preloaded_block = data - logger.debug('saving %r as a preloaded block for chunked', - self._chunked_preloaded_block) - return - amt = int(amt, base=16) - logger.debug('reading chunk of length %d', amt) - if amt == 0: - self._chunked_done = True - - # read through end of what we have or the chunk - self._body += body[:amt] - if len(body) >= amt: - data = body[amt:] - self._chunked_skip_bytes = len(self._eol) - else: - self._chunked_until_next = amt - len(body) - self._chunked_skip_bytes = 0 - data = '' - def _load_response(self, data): - if self._chunked: - self._chunked_parsedata(data) - return - elif self._body is not None: - self._body += data - return - - # We haven't seen end of headers yet + # Being here implies we're not at the end of the headers yet, + # since at the end of this method if headers were completely + # loaded we replace this method with the load() method of the + # reader we created. self.raw_response += data # This is a bogus server with bad line endings if self._eol not in self.raw_response: @@ -270,6 +212,7 @@ http_ver, status = hdrs.split(' ', 1) if status.startswith('100'): self.raw_response = body + self.continued = True logger.debug('continue seen, setting body to %r', body) return @@ -289,23 +232,46 @@ if self._eol != EOL: hdrs = hdrs.replace(self._eol, '\r\n') headers = rfc822.Message(cStringIO.StringIO(hdrs)) + content_len = None if HDR_CONTENT_LENGTH in headers: - self._content_len = int(headers[HDR_CONTENT_LENGTH]) + content_len = int(headers[HDR_CONTENT_LENGTH]) if self.http_version == HTTP_VER_1_0: self.will_close = True elif HDR_CONNECTION_CTRL in headers: self.will_close = ( headers[HDR_CONNECTION_CTRL].lower() == CONNECTION_CLOSE) - if self._content_len == 0: - self._content_len = _LEN_CLOSE_IS_END if (HDR_XFER_ENCODING in headers and headers[HDR_XFER_ENCODING].lower() == XFER_ENCODING_CHUNKED): - self._body = '' - self._chunked_parsedata(body) - self._chunked = True - if self._body is None: - self._body = body + self._reader = _readers.ChunkedReader(self._eol) + logger.debug('using a chunked reader') + else: + # HEAD responses are forbidden from returning a body, and + # it's implausible for a CONNECT response to use + # close-is-end logic for an OK response. + if (self.method == 'HEAD' or + (self.method == 'CONNECT' and content_len is None)): + content_len = 0 + if content_len is not None: + logger.debug('using a content-length reader with length %d', + content_len) + self._reader = _readers.ContentLengthReader(content_len) + else: + # Response body had no length specified and is not + # chunked, so the end of the body will only be + # identifiable by the termination of the socket by the + # server. My interpretation of the spec means that we + # are correct in hitting this case if + # transfer-encoding, content-length, and + # connection-control were left unspecified. + self._reader = _readers.CloseIsEndReader() + logger.debug('using a close-is-end reader') + self.will_close = True + + if body: + self._reader._load(body) + logger.debug('headers complete') self.headers = headers + self._load_response = self._reader._load class HTTPConnection(object): @@ -382,13 +348,14 @@ {}, HTTP_VER_1_0) sock.send(data) sock.setblocking(0) - r = self.response_class(sock, self.timeout) + r = self.response_class(sock, self.timeout, 'CONNECT') timeout_exc = HTTPTimeoutException( 'Timed out waiting for CONNECT response from proxy') while not r.complete(): try: if not r._select(): - raise timeout_exc + if not r.complete(): + raise timeout_exc except HTTPTimeoutException: # This raise/except pattern looks goofy, but # _select can raise the timeout as well as the @@ -527,7 +494,7 @@ out = outgoing_headers or body blocking_on_continue = False if expect_continue and not outgoing_headers and not ( - response and response.headers): + response and (response.headers or response.continued)): logger.info( 'waiting up to %s seconds for' ' continue response from server', @@ -550,11 +517,6 @@ 'server, optimistically sending request body') else: raise HTTPTimeoutException('timeout sending data') - # TODO exceptional conditions with select? (what are those be?) - # TODO if the response is loading, must we finish sending at all? - # - # Certainly not if it's going to close the connection and/or - # the response is already done...I think. was_first = first # incoming data @@ -572,11 +534,11 @@ logger.info('socket appears closed in read') self.sock = None self._current_response = None + if response is not None: + response._close() # This if/elif ladder is a bit subtle, # comments in each branch should help. - if response is not None and ( - response.complete() or - response._content_len == _LEN_CLOSE_IS_END): + if response is not None and response.complete(): # Server responded completely and then # closed the socket. We should just shut # things down and let the caller get their @@ -605,7 +567,7 @@ 'response was missing or incomplete!') logger.debug('read %d bytes in request()', len(data)) if response is None: - response = self.response_class(r[0], self.timeout) + response = self.response_class(r[0], self.timeout, method) response._load_response(data) # Jump to the next select() call so we load more # data if the server is still sending us content. @@ -613,10 +575,6 @@ except socket.error, e: if e[0] != errno.EPIPE and not was_first: raise - if (response._content_len - and response._content_len != _LEN_CLOSE_IS_END): - outgoing_headers = sent_data + outgoing_headers - reconnect('read') # outgoing data if w and out: @@ -661,7 +619,7 @@ # close if the server response said to or responded before eating # the whole request if response is None: - response = self.response_class(self.sock, self.timeout) + response = self.response_class(self.sock, self.timeout, method) complete = response.complete() data_left = bool(outgoing_headers or body) if data_left: @@ -679,7 +637,8 @@ raise httplib.ResponseNotReady() r = self._current_response while r.headers is None: - r._select() + if not r._select() and not r.complete(): + raise _readers.HTTPRemoteClosedError() if r.will_close: self.sock = None self._current_response = None @@ -705,7 +664,7 @@ class HTTPStateError(httplib.HTTPException): """Invalid internal state encountered.""" - -class HTTPRemoteClosedError(httplib.HTTPException): - """The server closed the remote socket in the middle of a response.""" +# Forward this exception type from _readers since it needs to be part +# of the public API. +HTTPRemoteClosedError = _readers.HTTPRemoteClosedError # no-check-code
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/httpclient/_readers.py Sat May 12 12:23:49 2012 +0200 @@ -0,0 +1,195 @@ +# Copyright 2011, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +"""Reader objects to abstract out different body response types. + +This module is package-private. It is not expected that these will +have any clients outside of httpplus. +""" + +import httplib +import itertools +import logging + +logger = logging.getLogger(__name__) + + +class ReadNotReady(Exception): + """Raised when read() is attempted but not enough data is loaded.""" + + +class HTTPRemoteClosedError(httplib.HTTPException): + """The server closed the remote socket in the middle of a response.""" + + +class AbstractReader(object): + """Abstract base class for response readers. + + Subclasses must implement _load, and should implement _close if + it's not an error for the server to close their socket without + some termination condition being detected during _load. + """ + def __init__(self): + self._finished = False + self._done_chunks = [] + + @property + def available_data(self): + return sum(map(len, self._done_chunks)) + + def done(self): + return self._finished + + def read(self, amt): + if self.available_data < amt and not self._finished: + raise ReadNotReady() + need = [amt] + def pred(s): + needed = need[0] > 0 + need[0] -= len(s) + return needed + blocks = list(itertools.takewhile(pred, self._done_chunks)) + self._done_chunks = self._done_chunks[len(blocks):] + over_read = sum(map(len, blocks)) - amt + if over_read > 0 and blocks: + logger.debug('need to reinsert %d data into done chunks', over_read) + last = blocks[-1] + blocks[-1], reinsert = last[:-over_read], last[-over_read:] + self._done_chunks.insert(0, reinsert) + result = ''.join(blocks) + assert len(result) == amt or (self._finished and len(result) < amt) + return result + + def _load(self, data): # pragma: no cover + """Subclasses must implement this. + + As data is available to be read out of this object, it should + be placed into the _done_chunks list. Subclasses should not + rely on data remaining in _done_chunks forever, as it may be + reaped if the client is parsing data as it comes in. + """ + raise NotImplementedError + + def _close(self): + """Default implementation of close. + + The default implementation assumes that the reader will mark + the response as finished on the _finished attribute once the + entire response body has been read. In the event that this is + not true, the subclass should override the implementation of + close (for example, close-is-end responses have to set + self._finished in the close handler.) + """ + if not self._finished: + raise HTTPRemoteClosedError( + 'server appears to have closed the socket mid-response') + + +class AbstractSimpleReader(AbstractReader): + """Abstract base class for simple readers that require no response decoding. + + Examples of such responses are Connection: Close (close-is-end) + and responses that specify a content length. + """ + def _load(self, data): + if data: + assert not self._finished, ( + 'tried to add data (%r) to a closed reader!' % data) + logger.debug('%s read an addtional %d data', self.name, len(data)) + self._done_chunks.append(data) + + +class CloseIsEndReader(AbstractSimpleReader): + """Reader for responses that specify Connection: Close for length.""" + name = 'close-is-end' + + def _close(self): + logger.info('Marking close-is-end reader as closed.') + self._finished = True + + +class ContentLengthReader(AbstractSimpleReader): + """Reader for responses that specify an exact content length.""" + name = 'content-length' + + def __init__(self, amount): + AbstractReader.__init__(self) + self._amount = amount + if amount == 0: + self._finished = True + self._amount_seen = 0 + + def _load(self, data): + AbstractSimpleReader._load(self, data) + self._amount_seen += len(data) + if self._amount_seen >= self._amount: + self._finished = True + logger.debug('content-length read complete') + + +class ChunkedReader(AbstractReader): + """Reader for chunked transfer encoding responses.""" + def __init__(self, eol): + AbstractReader.__init__(self) + self._eol = eol + self._leftover_skip_amt = 0 + self._leftover_data = '' + + def _load(self, data): + assert not self._finished, 'tried to add data to a closed reader!' + logger.debug('chunked read an addtional %d data', len(data)) + position = 0 + if self._leftover_data: + logger.debug('chunked reader trying to finish block from leftover data') + # TODO: avoid this string concatenation if possible + data = self._leftover_data + data + position = self._leftover_skip_amt + self._leftover_data = '' + self._leftover_skip_amt = 0 + datalen = len(data) + while position < datalen: + split = data.find(self._eol, position) + if split == -1: + self._leftover_data = data + self._leftover_skip_amt = position + return + amt = int(data[position:split], base=16) + block_start = split + len(self._eol) + # If the whole data chunk plus the eol trailer hasn't + # loaded, we'll wait for the next load. + if block_start + amt + len(self._eol) > len(data): + self._leftover_data = data + self._leftover_skip_amt = position + return + if amt == 0: + self._finished = True + logger.debug('closing chunked redaer due to chunk of length 0') + return + self._done_chunks.append(data[block_start:block_start + amt]) + position = block_start + amt + len(self._eol) +# no-check-code
--- a/mercurial/httpclient/tests/simple_http_test.py Sat May 12 09:43:12 2012 +0200 +++ b/mercurial/httpclient/tests/simple_http_test.py Sat May 12 12:23:49 2012 +0200 @@ -29,7 +29,7 @@ import socket import unittest -import http +import httpplus # relative import to ease embedding the library import util @@ -38,7 +38,7 @@ class SimpleHttpTest(util.HttpTestBase, unittest.TestCase): def _run_simple_test(self, host, server_data, expected_req, expected_data): - con = http.HTTPConnection(host) + con = httpplus.HTTPConnection(host) con._connect() con.sock.data = server_data con.request('GET', '/') @@ -47,9 +47,9 @@ self.assertEqual(expected_data, con.getresponse().read()) def test_broken_data_obj(self): - con = http.HTTPConnection('1.2.3.4:80') + con = httpplus.HTTPConnection('1.2.3.4:80') con._connect() - self.assertRaises(http.BadRequestData, + self.assertRaises(httpplus.BadRequestData, con.request, 'POST', '/', body=1) def test_no_keepalive_http_1_0(self): @@ -74,7 +74,7 @@ fncache dotencode """ - con = http.HTTPConnection('localhost:9999') + con = httpplus.HTTPConnection('localhost:9999') con._connect() con.sock.data = [expected_response_headers, expected_response_body] con.request('GET', '/remote/.hg/requires', @@ -95,7 +95,7 @@ self.assert_(resp.sock.closed) def test_multiline_header(self): - con = http.HTTPConnection('1.2.3.4:80') + con = httpplus.HTTPConnection('1.2.3.4:80') con._connect() con.sock.data = ['HTTP/1.1 200 OK\r\n', 'Server: BogusServer 1.0\r\n', @@ -122,7 +122,7 @@ self.assertEqual(con.sock.closed, False) def testSimpleRequest(self): - con = http.HTTPConnection('1.2.3.4:80') + con = httpplus.HTTPConnection('1.2.3.4:80') con._connect() con.sock.data = ['HTTP/1.1 200 OK\r\n', 'Server: BogusServer 1.0\r\n', @@ -149,12 +149,13 @@ resp.headers.getheaders('server')) def testHeaderlessResponse(self): - con = http.HTTPConnection('1.2.3.4', use_ssl=False) + con = httpplus.HTTPConnection('1.2.3.4', use_ssl=False) con._connect() con.sock.data = ['HTTP/1.1 200 OK\r\n', '\r\n' '1234567890' ] + con.sock.close_on_empty = True con.request('GET', '/') expected_req = ('GET / HTTP/1.1\r\n' @@ -169,7 +170,30 @@ self.assertEqual(resp.status, 200) def testReadline(self): - con = http.HTTPConnection('1.2.3.4') + con = httpplus.HTTPConnection('1.2.3.4') + con._connect() + con.sock.data = ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'Connection: Close\r\n', + '\r\n' + '1\n2\nabcdefg\n4\n5'] + con.sock.close_on_empty = True + + expected_req = ('GET / HTTP/1.1\r\n' + 'Host: 1.2.3.4\r\n' + 'accept-encoding: identity\r\n\r\n') + + con.request('GET', '/') + self.assertEqual(('1.2.3.4', 80), con.sock.sa) + self.assertEqual(expected_req, con.sock.sent) + r = con.getresponse() + for expected in ['1\n', '2\n', 'abcdefg\n', '4\n', '5']: + actual = r.readline() + self.assertEqual(expected, actual, + 'Expected %r, got %r' % (expected, actual)) + + def testReadlineTrickle(self): + con = httpplus.HTTPConnection('1.2.3.4') con._connect() # make sure it trickles in one byte at a time # so that we touch all the cases in readline @@ -179,6 +203,7 @@ 'Connection: Close\r\n', '\r\n' '1\n2\nabcdefg\n4\n5'])) + con.sock.close_on_empty = True expected_req = ('GET / HTTP/1.1\r\n' 'Host: 1.2.3.4\r\n' @@ -193,6 +218,59 @@ self.assertEqual(expected, actual, 'Expected %r, got %r' % (expected, actual)) + def testVariousReads(self): + con = httpplus.HTTPConnection('1.2.3.4') + con._connect() + # make sure it trickles in one byte at a time + # so that we touch all the cases in readline + con.sock.data = list(''.join( + ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'Connection: Close\r\n', + '\r\n' + '1\n2', + '\na', 'bc', + 'defg\n4\n5'])) + con.sock.close_on_empty = True + + expected_req = ('GET / HTTP/1.1\r\n' + 'Host: 1.2.3.4\r\n' + 'accept-encoding: identity\r\n\r\n') + + con.request('GET', '/') + self.assertEqual(('1.2.3.4', 80), con.sock.sa) + self.assertEqual(expected_req, con.sock.sent) + r = con.getresponse() + for read_amt, expect in [(1, '1'), (1, '\n'), + (4, '2\nab'), + ('line', 'cdefg\n'), + (None, '4\n5')]: + if read_amt == 'line': + self.assertEqual(expect, r.readline()) + else: + self.assertEqual(expect, r.read(read_amt)) + + def testZeroLengthBody(self): + con = httpplus.HTTPConnection('1.2.3.4') + con._connect() + # make sure it trickles in one byte at a time + # so that we touch all the cases in readline + con.sock.data = list(''.join( + ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'Content-length: 0\r\n', + '\r\n'])) + + expected_req = ('GET / HTTP/1.1\r\n' + 'Host: 1.2.3.4\r\n' + 'accept-encoding: identity\r\n\r\n') + + con.request('GET', '/') + self.assertEqual(('1.2.3.4', 80), con.sock.sa) + self.assertEqual(expected_req, con.sock.sent) + r = con.getresponse() + self.assertEqual('', r.read()) + def testIPv6(self): self._run_simple_test('[::1]:8221', ['HTTP/1.1 200 OK\r\n', @@ -226,7 +304,7 @@ '1234567890') def testEarlyContinueResponse(self): - con = http.HTTPConnection('1.2.3.4:80') + con = httpplus.HTTPConnection('1.2.3.4:80') con._connect() sock = con.sock sock.data = ['HTTP/1.1 403 Forbidden\r\n', @@ -240,8 +318,23 @@ self.assertEqual("You can't do that.", con.getresponse().read()) self.assertEqual(sock.closed, True) + def testEarlyContinueResponseNoContentLength(self): + con = httpplus.HTTPConnection('1.2.3.4:80') + con._connect() + sock = con.sock + sock.data = ['HTTP/1.1 403 Forbidden\r\n', + 'Server: BogusServer 1.0\r\n', + '\r\n' + "You can't do that."] + sock.close_on_empty = True + expected_req = self.doPost(con, expect_body=False) + self.assertEqual(('1.2.3.4', 80), sock.sa) + self.assertStringEqual(expected_req, sock.sent) + self.assertEqual("You can't do that.", con.getresponse().read()) + self.assertEqual(sock.closed, True) + def testDeniedAfterContinueTimeoutExpires(self): - con = http.HTTPConnection('1.2.3.4:80') + con = httpplus.HTTPConnection('1.2.3.4:80') con._connect() sock = con.sock sock.data = ['HTTP/1.1 403 Forbidden\r\n', @@ -269,7 +362,7 @@ self.assertEqual(sock.closed, True) def testPostData(self): - con = http.HTTPConnection('1.2.3.4:80') + con = httpplus.HTTPConnection('1.2.3.4:80') con._connect() sock = con.sock sock.read_wait_sentinel = 'POST data' @@ -286,7 +379,7 @@ self.assertEqual(sock.closed, False) def testServerWithoutContinue(self): - con = http.HTTPConnection('1.2.3.4:80') + con = httpplus.HTTPConnection('1.2.3.4:80') con._connect() sock = con.sock sock.read_wait_sentinel = 'POST data' @@ -302,7 +395,7 @@ self.assertEqual(sock.closed, False) def testServerWithSlowContinue(self): - con = http.HTTPConnection('1.2.3.4:80') + con = httpplus.HTTPConnection('1.2.3.4:80') con._connect() sock = con.sock sock.read_wait_sentinel = 'POST data' @@ -321,7 +414,7 @@ self.assertEqual(sock.closed, False) def testSlowConnection(self): - con = http.HTTPConnection('1.2.3.4:80') + con = httpplus.HTTPConnection('1.2.3.4:80') con._connect() # simulate one byte arriving at a time, to check for various # corner cases @@ -340,12 +433,26 @@ self.assertEqual(expected_req, con.sock.sent) self.assertEqual('1234567890', con.getresponse().read()) + def testCloseAfterNotAllOfHeaders(self): + con = httpplus.HTTPConnection('1.2.3.4:80') + con._connect() + con.sock.data = ['HTTP/1.1 200 OK\r\n', + 'Server: NO CARRIER'] + con.sock.close_on_empty = True + con.request('GET', '/') + self.assertRaises(httpplus.HTTPRemoteClosedError, + con.getresponse) + + expected_req = ('GET / HTTP/1.1\r\n' + 'Host: 1.2.3.4\r\n' + 'accept-encoding: identity\r\n\r\n') + def testTimeout(self): - con = http.HTTPConnection('1.2.3.4:80') + con = httpplus.HTTPConnection('1.2.3.4:80') con._connect() con.sock.data = [] con.request('GET', '/') - self.assertRaises(http.HTTPTimeoutException, + self.assertRaises(httpplus.HTTPTimeoutException, con.getresponse) expected_req = ('GET / HTTP/1.1\r\n' @@ -370,7 +477,7 @@ return s socket.socket = closingsocket - con = http.HTTPConnection('1.2.3.4:80') + con = httpplus.HTTPConnection('1.2.3.4:80') con._connect() con.request('GET', '/') r1 = con.getresponse() @@ -381,7 +488,7 @@ self.assertEqual(2, len(sockets)) def test_server_closes_before_end_of_body(self): - con = http.HTTPConnection('1.2.3.4:80') + con = httpplus.HTTPConnection('1.2.3.4:80') con._connect() s = con.sock s.data = ['HTTP/1.1 200 OK\r\n', @@ -393,9 +500,9 @@ s.close_on_empty = True con.request('GET', '/') r1 = con.getresponse() - self.assertRaises(http.HTTPRemoteClosedError, r1.read) + self.assertRaises(httpplus.HTTPRemoteClosedError, r1.read) def test_no_response_raises_response_not_ready(self): - con = http.HTTPConnection('foo') - self.assertRaises(http.httplib.ResponseNotReady, con.getresponse) + con = httpplus.HTTPConnection('foo') + self.assertRaises(httpplus.httplib.ResponseNotReady, con.getresponse) # no-check-code
--- a/mercurial/httpclient/tests/test_bogus_responses.py Sat May 12 09:43:12 2012 +0200 +++ b/mercurial/httpclient/tests/test_bogus_responses.py Sat May 12 12:23:49 2012 +0200 @@ -34,7 +34,7 @@ """ import unittest -import http +import httpplus # relative import to ease embedding the library import util @@ -43,7 +43,7 @@ class SimpleHttpTest(util.HttpTestBase, unittest.TestCase): def bogusEOL(self, eol): - con = http.HTTPConnection('1.2.3.4:80') + con = httpplus.HTTPConnection('1.2.3.4:80') con._connect() con.sock.data = ['HTTP/1.1 200 OK%s' % eol, 'Server: BogusServer 1.0%s' % eol,
--- a/mercurial/httpclient/tests/test_chunked_transfer.py Sat May 12 09:43:12 2012 +0200 +++ b/mercurial/httpclient/tests/test_chunked_transfer.py Sat May 12 12:23:49 2012 +0200 @@ -29,7 +29,7 @@ import cStringIO import unittest -import http +import httpplus # relative import to ease embedding the library import util @@ -50,7 +50,7 @@ class ChunkedTransferTest(util.HttpTestBase, unittest.TestCase): def testChunkedUpload(self): - con = http.HTTPConnection('1.2.3.4:80') + con = httpplus.HTTPConnection('1.2.3.4:80') con._connect() sock = con.sock sock.read_wait_sentinel = '0\r\n\r\n' @@ -77,7 +77,7 @@ self.assertEqual(sock.closed, False) def testChunkedDownload(self): - con = http.HTTPConnection('1.2.3.4:80') + con = httpplus.HTTPConnection('1.2.3.4:80') con._connect() sock = con.sock sock.data = ['HTTP/1.1 200 OK\r\n', @@ -85,14 +85,31 @@ 'transfer-encoding: chunked', '\r\n\r\n', chunkedblock('hi '), - chunkedblock('there'), + ] + list(chunkedblock('there')) + [ chunkedblock(''), ] con.request('GET', '/') self.assertStringEqual('hi there', con.getresponse().read()) + def testChunkedDownloadOddReadBoundaries(self): + con = httpplus.HTTPConnection('1.2.3.4:80') + con._connect() + sock = con.sock + sock.data = ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'transfer-encoding: chunked', + '\r\n\r\n', + chunkedblock('hi '), + ] + list(chunkedblock('there')) + [ + chunkedblock(''), + ] + con.request('GET', '/') + resp = con.getresponse() + for amt, expect in [(1, 'h'), (5, 'i the'), (100, 're')]: + self.assertEqual(expect, resp.read(amt)) + def testChunkedDownloadBadEOL(self): - con = http.HTTPConnection('1.2.3.4:80') + con = httpplus.HTTPConnection('1.2.3.4:80') con._connect() sock = con.sock sock.data = ['HTTP/1.1 200 OK\n', @@ -107,7 +124,7 @@ self.assertStringEqual('hi there', con.getresponse().read()) def testChunkedDownloadPartialChunkBadEOL(self): - con = http.HTTPConnection('1.2.3.4:80') + con = httpplus.HTTPConnection('1.2.3.4:80') con._connect() sock = con.sock sock.data = ['HTTP/1.1 200 OK\n', @@ -122,7 +139,7 @@ con.getresponse().read()) def testChunkedDownloadPartialChunk(self): - con = http.HTTPConnection('1.2.3.4:80') + con = httpplus.HTTPConnection('1.2.3.4:80') con._connect() sock = con.sock sock.data = ['HTTP/1.1 200 OK\r\n', @@ -136,7 +153,7 @@ con.getresponse().read()) def testChunkedDownloadEarlyHangup(self): - con = http.HTTPConnection('1.2.3.4:80') + con = httpplus.HTTPConnection('1.2.3.4:80') con._connect() sock = con.sock broken = chunkedblock('hi'*20)[:-1] @@ -149,5 +166,5 @@ sock.close_on_empty = True con.request('GET', '/') resp = con.getresponse() - self.assertRaises(http.HTTPRemoteClosedError, resp.read) + self.assertRaises(httpplus.HTTPRemoteClosedError, resp.read) # no-check-code
--- a/mercurial/httpclient/tests/test_proxy_support.py Sat May 12 09:43:12 2012 +0200 +++ b/mercurial/httpclient/tests/test_proxy_support.py Sat May 12 12:23:49 2012 +0200 @@ -29,13 +29,13 @@ import unittest import socket -import http +import httpplus # relative import to ease embedding the library import util -def make_preloaded_socket(data): +def make_preloaded_socket(data, close=False): """Make a socket pre-loaded with data so it can be read during connect. Useful for https proxy tests because we have to read from the @@ -44,6 +44,7 @@ def s(*args, **kwargs): sock = util.MockSocket(*args, **kwargs) sock.early_data = data[:] + sock.close_on_empty = close return sock return s @@ -51,7 +52,7 @@ class ProxyHttpTest(util.HttpTestBase, unittest.TestCase): def _run_simple_test(self, host, server_data, expected_req, expected_data): - con = http.HTTPConnection(host) + con = httpplus.HTTPConnection(host) con._connect() con.sock.data = server_data con.request('GET', '/') @@ -60,7 +61,7 @@ self.assertEqual(expected_data, con.getresponse().read()) def testSimpleRequest(self): - con = http.HTTPConnection('1.2.3.4:80', + con = httpplus.HTTPConnection('1.2.3.4:80', proxy_hostport=('magicproxy', 4242)) con._connect() con.sock.data = ['HTTP/1.1 200 OK\r\n', @@ -88,7 +89,7 @@ resp.headers.getheaders('server')) def testSSLRequest(self): - con = http.HTTPConnection('1.2.3.4:443', + con = httpplus.HTTPConnection('1.2.3.4:443', proxy_hostport=('magicproxy', 4242)) socket.socket = make_preloaded_socket( ['HTTP/1.1 200 OK\r\n', @@ -124,12 +125,47 @@ self.assertEqual(['BogusServer 1.0'], resp.headers.getheaders('server')) - def testSSLProxyFailure(self): - con = http.HTTPConnection('1.2.3.4:443', + def testSSLRequestNoConnectBody(self): + con = httpplus.HTTPConnection('1.2.3.4:443', proxy_hostport=('magicproxy', 4242)) socket.socket = make_preloaded_socket( - ['HTTP/1.1 407 Proxy Authentication Required\r\n\r\n']) - self.assertRaises(http.HTTPProxyConnectFailedException, con._connect) - self.assertRaises(http.HTTPProxyConnectFailedException, + ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + '\r\n']) + con._connect() + con.sock.data = ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'Content-Length: 10\r\n', + '\r\n' + '1234567890' + ] + connect_sent = con.sock.sent + con.sock.sent = '' + con.request('GET', '/') + + expected_connect = ('CONNECT 1.2.3.4:443 HTTP/1.0\r\n' + 'Host: 1.2.3.4\r\n' + 'accept-encoding: identity\r\n' + '\r\n') + expected_request = ('GET / HTTP/1.1\r\n' + 'Host: 1.2.3.4\r\n' + 'accept-encoding: identity\r\n\r\n') + + self.assertEqual(('127.0.0.42', 4242), con.sock.sa) + self.assertStringEqual(expected_connect, connect_sent) + self.assertStringEqual(expected_request, con.sock.sent) + resp = con.getresponse() + self.assertEqual(resp.status, 200) + self.assertEqual('1234567890', resp.read()) + self.assertEqual(['BogusServer 1.0'], + resp.headers.getheaders('server')) + + def testSSLProxyFailure(self): + con = httpplus.HTTPConnection('1.2.3.4:443', + proxy_hostport=('magicproxy', 4242)) + socket.socket = make_preloaded_socket( + ['HTTP/1.1 407 Proxy Authentication Required\r\n\r\n'], close=True) + self.assertRaises(httpplus.HTTPProxyConnectFailedException, con._connect) + self.assertRaises(httpplus.HTTPProxyConnectFailedException, con.request, 'GET', '/') # no-check-code
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/httpclient/tests/test_readers.py Sat May 12 12:23:49 2012 +0200 @@ -0,0 +1,70 @@ +# Copyright 2010, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from httpplus import _readers + +def chunkedblock(x, eol='\r\n'): + r"""Make a chunked transfer-encoding block. + + >>> chunkedblock('hi') + '2\r\nhi\r\n' + >>> chunkedblock('hi' * 10) + '14\r\nhihihihihihihihihihi\r\n' + >>> chunkedblock('hi', eol='\n') + '2\nhi\n' + """ + return ''.join((hex(len(x))[2:], eol, x, eol)) + +corpus = 'foo\r\nbar\r\nbaz\r\n' + + +class ChunkedReaderTest(unittest.TestCase): + def test_many_block_boundaries(self): + for step in xrange(1, len(corpus)): + data = ''.join(chunkedblock(corpus[start:start+step]) for + start in xrange(0, len(corpus), step)) + for istep in xrange(1, len(data)): + rdr = _readers.ChunkedReader('\r\n') + print 'step', step, 'load', istep + for start in xrange(0, len(data), istep): + rdr._load(data[start:start+istep]) + rdr._load(chunkedblock('')) + self.assertEqual(corpus, rdr.read(len(corpus) + 1)) + + def test_small_chunk_blocks_large_wire_blocks(self): + data = ''.join(map(chunkedblock, corpus)) + chunkedblock('') + rdr = _readers.ChunkedReader('\r\n') + for start in xrange(0, len(data), 4): + d = data[start:start + 4] + if d: + rdr._load(d) + self.assertEqual(corpus, rdr.read(len(corpus)+100)) +# no-check-code
--- a/mercurial/httpclient/tests/test_ssl.py Sat May 12 09:43:12 2012 +0200 +++ b/mercurial/httpclient/tests/test_ssl.py Sat May 12 12:23:49 2012 +0200 @@ -28,7 +28,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest -import http +import httpplus # relative import to ease embedding the library import util @@ -37,7 +37,7 @@ class HttpSslTest(util.HttpTestBase, unittest.TestCase): def testSslRereadRequired(self): - con = http.HTTPConnection('1.2.3.4:443') + con = httpplus.HTTPConnection('1.2.3.4:443') con._connect() # extend the list instead of assign because of how # MockSSLSocket works. @@ -66,7 +66,7 @@ resp.headers.getheaders('server')) def testSslRereadInEarlyResponse(self): - con = http.HTTPConnection('1.2.3.4:443') + con = httpplus.HTTPConnection('1.2.3.4:443') con._connect() con.sock.early_data = ['HTTP/1.1 200 OK\r\n', 'Server: BogusServer 1.0\r\n',
--- a/mercurial/httpclient/tests/util.py Sat May 12 09:43:12 2012 +0200 +++ b/mercurial/httpclient/tests/util.py Sat May 12 12:23:49 2012 +0200 @@ -29,7 +29,7 @@ import difflib import socket -import http +import httpplus class MockSocket(object): @@ -57,7 +57,7 @@ self.remote_closed = self.closed = False self.close_on_empty = False self.sent = '' - self.read_wait_sentinel = http._END_HEADERS + self.read_wait_sentinel = httpplus._END_HEADERS def close(self): self.closed = True @@ -86,7 +86,7 @@ @property def ready_for_read(self): - return ((self.early_data and http._END_HEADERS in self.sent) + return ((self.early_data and httpplus._END_HEADERS in self.sent) or (self.read_wait_sentinel in self.sent and self.data) or self.closed or self.remote_closed) @@ -132,7 +132,7 @@ def mocksslwrap(sock, keyfile=None, certfile=None, - server_side=False, cert_reqs=http.socketutil.CERT_NONE, + server_side=False, cert_reqs=httpplus.socketutil.CERT_NONE, ssl_version=None, ca_certs=None, do_handshake_on_connect=True, suppress_ragged_eofs=True): @@ -156,16 +156,16 @@ self.orig_getaddrinfo = socket.getaddrinfo socket.getaddrinfo = mockgetaddrinfo - self.orig_select = http.select.select - http.select.select = mockselect + self.orig_select = httpplus.select.select + httpplus.select.select = mockselect - self.orig_sslwrap = http.socketutil.wrap_socket - http.socketutil.wrap_socket = mocksslwrap + self.orig_sslwrap = httpplus.socketutil.wrap_socket + httpplus.socketutil.wrap_socket = mocksslwrap def tearDown(self): socket.socket = self.orig_socket - http.select.select = self.orig_select - http.socketutil.wrap_socket = self.orig_sslwrap + httpplus.select.select = self.orig_select + httpplus.socketutil.wrap_socket = self.orig_sslwrap socket.getaddrinfo = self.orig_getaddrinfo def assertStringEqual(self, l, r):
--- a/mercurial/localrepo.py Sat May 12 09:43:12 2012 +0200 +++ b/mercurial/localrepo.py Sat May 12 12:23:49 2012 +0200 @@ -41,7 +41,6 @@ self.wopener = scmutil.opener(self.root) self.baseui = baseui self.ui = baseui.copy() - self._dirtyphases = False # A list of callback to shape the phase if no data were found. # Callback are in the form: func(repo, roots) --> processed root. # This list it to be filled by extension during repo setup @@ -182,23 +181,8 @@ bookmarks.write(self) @storecache('phaseroots') - def _phaseroots(self): - self._dirtyphases = False - phaseroots = phases.readroots(self) - phases.filterunknown(self, phaseroots) - return phaseroots - - @propertycache - def _phaserev(self): - cache = [phases.public] * len(self) - for phase in phases.trackedphases: - roots = map(self.changelog.rev, self._phaseroots[phase]) - if roots: - for rev in roots: - cache[rev] = phase - for rev in self.changelog.descendants(*roots): - cache[rev] = phase - return cache + def _phasecache(self): + return phases.phasecache(self, self._phasedefaults) @storecache('00changelog.i') def changelog(self): @@ -505,7 +489,7 @@ partial = self._branchcache self._branchtags(partial, lrev) - # this private cache holds all heads (not just tips) + # this private cache holds all heads (not just the branch tips) self._branchcache = partial def branchmap(self): @@ -585,8 +569,8 @@ latest = newnodes.pop() if latest not in bheads: continue - minbhrev = self[bheads[0]].node() - reachable = self.changelog.reachable(latest, minbhrev) + minbhnode = self[bheads[0]].node() + reachable = self.changelog.reachable(latest, minbhnode) reachable.remove(latest) if reachable: bheads = [b for b in bheads if b not in reachable] @@ -605,10 +589,11 @@ def known(self, nodes): nm = self.changelog.nodemap + pc = self._phasecache result = [] for n in nodes: r = nm.get(n) - resp = not (r is None or self._phaserev[r] >= phases.secret) + resp = not (r is None or pc.phase(self, r) >= phases.secret) result.append(resp) return result @@ -864,7 +849,6 @@ pass delcache('_tagscache') - delcache('_phaserev') self._branchcache = None # in UTF-8 self._branchcachetip = None @@ -932,9 +916,8 @@ def unlock(): self.store.write() - if self._dirtyphases: - phases.writeroots(self) - self._dirtyphases = False + if '_phasecache' in vars(self): + self._phasecache.write() for k, ce in self._filecache.items(): if k == 'dirstate': continue @@ -1334,6 +1317,8 @@ def mfmatches(ctx): mf = ctx.manifest().copy() + if match.always(): + return mf for fn in mf.keys(): if not match(fn): del mf[fn] @@ -1419,10 +1404,11 @@ mf2 = mfmatches(ctx2) modified, added, clean = [], [], [] + withflags = mf1.withflags() | mf2.withflags() for fn in mf2: if fn in mf1: if (fn not in deleted and - (mf1.flags(fn) != mf2.flags(fn) or + ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or (mf1[fn] != mf2[fn] and (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))): modified.append(fn) @@ -1694,7 +1680,7 @@ # * missingheads part of comon (::commonheads) common = set(outgoing.common) cheads = [node for node in revs if node in common] - # and + # and # * commonheads parents on missing revset = self.set('%ln and parents(roots(%ln))', outgoing.commonheads,
--- a/mercurial/manifest.py Sat May 12 09:43:12 2012 +0200 +++ b/mercurial/manifest.py Sat May 12 12:23:49 2012 +0200 @@ -19,6 +19,8 @@ self._flags = flags def flags(self, f): return self._flags.get(f, "") + def withflags(self): + return set(self._flags.keys()) def set(self, f, flags): self._flags[f] = flags def copy(self):
--- a/mercurial/match.py Sat May 12 09:43:12 2012 +0200 +++ b/mercurial/match.py Sat May 12 12:23:49 2012 +0200 @@ -118,6 +118,8 @@ return self._files def anypats(self): return self._anypats + def always(self): + return False class exact(match): def __init__(self, root, cwd, files): @@ -126,6 +128,8 @@ class always(match): def __init__(self, root, cwd): match.__init__(self, root, cwd, []) + def always(self): + return True class narrowmatcher(match): """Adapt a matcher to work on a subdirectory only.
--- a/mercurial/parsers.c Sat May 12 09:43:12 2012 +0200 +++ b/mercurial/parsers.c Sat May 12 12:23:49 2012 +0200 @@ -13,8 +13,10 @@ #include "util.h" -static int hexdigit(char c) +static inline int hexdigit(const char *p, Py_ssize_t off) { + char c = p[off]; + if (c >= '0' && c <= '9') return c - '0'; if (c >= 'a' && c <= 'f') @@ -32,8 +34,8 @@ static PyObject *unhexlify(const char *str, int len) { PyObject *ret; - const char *c; char *d; + int i; ret = PyBytes_FromStringAndSize(NULL, len / 2); @@ -42,9 +44,9 @@ d = PyBytes_AsString(ret); - for (c = str; c < str + len;) { - int hi = hexdigit(*c++); - int lo = hexdigit(*c++); + for (i = 0; i < len;) { + int hi = hexdigit(str, i++); + int lo = hexdigit(str, i++); *d++ = (hi << 4) | lo; } @@ -506,13 +508,13 @@ return NULL; #define istat(__n, __d) \ - if (PyDict_SetItemString(obj, __d, PyInt_FromLong(self->__n)) == -1) \ + if (PyDict_SetItemString(obj, __d, PyInt_FromSsize_t(self->__n)) == -1) \ goto bail; if (self->added) { Py_ssize_t len = PyList_GET_SIZE(self->added); if (PyDict_SetItemString(obj, "index entries added", - PyInt_FromLong(len)) == -1) + PyInt_FromSsize_t(len)) == -1) goto bail; } @@ -536,7 +538,7 @@ return NULL; } -static inline int nt_level(const char *node, int level) +static inline int nt_level(const char *node, Py_ssize_t level) { int v = node[level>>1]; if (!(level & 1)) @@ -544,6 +546,13 @@ return v & 0xf; } +/* + * Return values: + * + * -4: match is ambiguous (multiple candidates) + * -2: not found + * rest: valid rev + */ static int nt_find(indexObject *self, const char *node, Py_ssize_t nodelen) { int level, maxlevel, off; @@ -574,7 +583,8 @@ return -2; off = v; } - return -2; + /* multiple matches against an ambiguous prefix */ + return -4; } static int nt_new(indexObject *self) @@ -638,6 +648,24 @@ return -1; } +static int nt_init(indexObject *self) +{ + if (self->nt == NULL) { + self->ntcapacity = self->raw_length < 4 + ? 4 : self->raw_length / 2; + self->nt = calloc(self->ntcapacity, sizeof(nodetree)); + if (self->nt == NULL) { + PyErr_NoMemory(); + return -1; + } + self->ntlength = 1; + self->ntrev = (int)index_length(self) - 1; + self->ntlookups = 1; + self->ntmisses = 0; + } + return 0; +} + /* * Return values: * @@ -655,19 +683,8 @@ if (rev >= -1) return rev; - if (self->nt == NULL) { - self->ntcapacity = self->raw_length < 4 - ? 4 : self->raw_length / 2; - self->nt = calloc(self->ntcapacity, sizeof(nodetree)); - if (self->nt == NULL) { - PyErr_SetString(PyExc_MemoryError, "out of memory"); - return -3; - } - self->ntlength = 1; - self->ntrev = (int)index_length(self) - 1; - self->ntlookups = 1; - self->ntmisses = 0; - } + if (nt_init(self) == -1) + return -3; /* * For the first handful of lookups, we scan the entire index, @@ -692,10 +709,14 @@ } else { for (rev = self->ntrev - 1; rev >= 0; rev--) { const char *n = index_node(self, rev); - if (n == NULL) + if (n == NULL) { + self->ntrev = rev + 1; return -2; - if (nt_insert(self, n, rev) == -1) + } + if (nt_insert(self, n, rev) == -1) { + self->ntrev = rev + 1; return -3; + } if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) { break; }
--- a/mercurial/patch.py Sat May 12 09:43:12 2012 +0200 +++ b/mercurial/patch.py Sat May 12 12:23:49 2012 +0200 @@ -1040,12 +1040,13 @@ hunk.append(l) return l.rstrip('\r\n') - line = getline(lr, self.hunk) - while line and not line.startswith('literal '): + while True: line = getline(lr, self.hunk) - if not line: - raise PatchError(_('could not extract "%s" binary data') - % self._fname) + if not line: + raise PatchError(_('could not extract "%s" binary data') + % self._fname) + if line.startswith('literal '): + break size = int(line[8:].rstrip()) dec = [] line = getline(lr, self.hunk)
--- a/mercurial/phases.py Sat May 12 09:43:12 2012 +0200 +++ b/mercurial/phases.py Sat May 12 12:23:49 2012 +0200 @@ -99,60 +99,165 @@ """ import errno -from node import nullid, bin, hex, short +from node import nullid, nullrev, bin, hex, short from i18n import _ +import util allphases = public, draft, secret = range(3) trackedphases = allphases[1:] phasenames = ['public', 'draft', 'secret'] -def readroots(repo): - """Read phase roots from disk""" +def _filterunknown(ui, changelog, phaseroots): + """remove unknown nodes from the phase boundary + + Nothing is lost as unknown nodes only hold data for their descendants + """ + updated = False + nodemap = changelog.nodemap # to filter unknown nodes + for phase, nodes in enumerate(phaseroots): + missing = [node for node in nodes if node not in nodemap] + if missing: + for mnode in missing: + ui.debug( + 'removing unknown node %s from %i-phase boundary\n' + % (short(mnode), phase)) + nodes.symmetric_difference_update(missing) + updated = True + return updated + +def _readroots(repo, phasedefaults=None): + """Read phase roots from disk + + phasedefaults is a list of fn(repo, roots) callable, which are + executed if the phase roots file does not exist. When phases are + being initialized on an existing repository, this could be used to + set selected changesets phase to something else than public. + + Return (roots, dirty) where dirty is true if roots differ from + what is being stored. + """ + dirty = False roots = [set() for i in allphases] try: f = repo.sopener('phaseroots') try: for line in f: - phase, nh = line.strip().split() + phase, nh = line.split() roots[int(phase)].add(bin(nh)) finally: f.close() except IOError, inst: if inst.errno != errno.ENOENT: raise - for f in repo._phasedefaults: - roots = f(repo, roots) - repo._dirtyphases = True - return roots + if phasedefaults: + for f in phasedefaults: + roots = f(repo, roots) + dirty = True + if _filterunknown(repo.ui, repo.changelog, roots): + dirty = True + return roots, dirty + +class phasecache(object): + def __init__(self, repo, phasedefaults, _load=True): + if _load: + # Cheap trick to allow shallow-copy without copy module + self.phaseroots, self.dirty = _readroots(repo, phasedefaults) + self.opener = repo.sopener + self._phaserevs = None + + def copy(self): + # Shallow copy meant to ensure isolation in + # advance/retractboundary(), nothing more. + ph = phasecache(None, None, _load=False) + ph.phaseroots = self.phaseroots[:] + ph.dirty = self.dirty + ph.opener = self.opener + ph._phaserevs = self._phaserevs + return ph -def writeroots(repo): - """Write phase roots from disk""" - f = repo.sopener('phaseroots', 'w', atomictemp=True) - try: - for phase, roots in enumerate(repo._phaseroots): - for h in roots: - f.write('%i %s\n' % (phase, hex(h))) - repo._dirtyphases = False - finally: - f.close() + def replace(self, phcache): + for a in 'phaseroots dirty opener _phaserevs'.split(): + setattr(self, a, getattr(phcache, a)) + + def getphaserevs(self, repo, rebuild=False): + if rebuild or self._phaserevs is None: + revs = [public] * len(repo.changelog) + for phase in trackedphases: + roots = map(repo.changelog.rev, self.phaseroots[phase]) + if roots: + for rev in roots: + revs[rev] = phase + for rev in repo.changelog.descendants(*roots): + revs[rev] = phase + self._phaserevs = revs + return self._phaserevs + + def phase(self, repo, rev): + # We need a repo argument here to be able to build _phaserev + # if necessary. The repository instance is not stored in + # phasecache to avoid reference cycles. The changelog instance + # is not stored because it is a filecache() property and can + # be replaced without us being notified. + if rev == nullrev: + return public + if self._phaserevs is None or rev >= len(self._phaserevs): + self._phaserevs = self.getphaserevs(repo, rebuild=True) + return self._phaserevs[rev] -def filterunknown(repo, phaseroots=None): - """remove unknown nodes from the phase boundary + def write(self): + if not self.dirty: + return + f = self.opener('phaseroots', 'w', atomictemp=True) + try: + for phase, roots in enumerate(self.phaseroots): + for h in roots: + f.write('%i %s\n' % (phase, hex(h))) + finally: + f.close() + self.dirty = False + + def _updateroots(self, phase, newroots): + self.phaseroots[phase] = newroots + self._phaserevs = None + self.dirty = True + + def advanceboundary(self, repo, targetphase, nodes): + # Be careful to preserve shallow-copied values: do not update + # phaseroots values, replace them. - no data is lost as unknown node only old data for their descentants - """ - if phaseroots is None: - phaseroots = repo._phaseroots - nodemap = repo.changelog.nodemap # to filter unknown nodes - for phase, nodes in enumerate(phaseroots): - missing = [node for node in nodes if node not in nodemap] - if missing: - for mnode in missing: - repo.ui.debug( - 'removing unknown node %s from %i-phase boundary\n' - % (short(mnode), phase)) - nodes.symmetric_difference_update(missing) - repo._dirtyphases = True + delroots = [] # set of root deleted by this path + for phase in xrange(targetphase + 1, len(allphases)): + # filter nodes that are not in a compatible phase already + nodes = [n for n in nodes + if self.phase(repo, repo[n].rev()) >= phase] + if not nodes: + break # no roots to move anymore + olds = self.phaseroots[phase] + roots = set(ctx.node() for ctx in repo.set( + 'roots((%ln::) - (%ln::%ln))', olds, olds, nodes)) + if olds != roots: + self._updateroots(phase, roots) + # some roots may need to be declared for lower phases + delroots.extend(olds - roots) + # declare deleted root in the target phase + if targetphase != 0: + self.retractboundary(repo, targetphase, delroots) + + def retractboundary(self, repo, targetphase, nodes): + # Be careful to preserve shallow-copied values: do not update + # phaseroots values, replace them. + + currentroots = self.phaseroots[targetphase] + newroots = [n for n in nodes + if self.phase(repo, repo[n].rev()) < targetphase] + if newroots: + if nullid in newroots: + raise util.Abort(_('cannot change null revision phase')) + currentroots = currentroots.copy() + currentroots.update(newroots) + ctxs = repo.set('roots(%ln::)', currentroots) + currentroots.intersection_update(ctx.node() for ctx in ctxs) + self._updateroots(targetphase, currentroots) def advanceboundary(repo, targetphase, nodes): """Add nodes to a phase changing other nodes phases if necessary. @@ -161,30 +266,9 @@ in the target phase or kept in a *lower* phase. Simplify boundary to contains phase roots only.""" - delroots = [] # set of root deleted by this path - for phase in xrange(targetphase + 1, len(allphases)): - # filter nodes that are not in a compatible phase already - # XXX rev phase cache might have been invalidated by a previous loop - # XXX we need to be smarter here - nodes = [n for n in nodes if repo[n].phase() >= phase] - if not nodes: - break # no roots to move anymore - roots = repo._phaseroots[phase] - olds = roots.copy() - ctxs = list(repo.set('roots((%ln::) - (%ln::%ln))', olds, olds, nodes)) - roots.clear() - roots.update(ctx.node() for ctx in ctxs) - if olds != roots: - # invalidate cache (we probably could be smarter here - if '_phaserev' in vars(repo): - del repo._phaserev - repo._dirtyphases = True - # some roots may need to be declared for lower phases - delroots.extend(olds - roots) - # declare deleted root in the target phase - if targetphase != 0: - retractboundary(repo, targetphase, delroots) - + phcache = repo._phasecache.copy() + phcache.advanceboundary(repo, targetphase, nodes) + repo._phasecache.replace(phcache) def retractboundary(repo, targetphase, nodes): """Set nodes back to a phase changing other nodes phases if necessary. @@ -193,22 +277,15 @@ in the target phase or kept in a *higher* phase. Simplify boundary to contains phase roots only.""" - currentroots = repo._phaseroots[targetphase] - newroots = [n for n in nodes if repo[n].phase() < targetphase] - if newroots: - currentroots.update(newroots) - ctxs = repo.set('roots(%ln::)', currentroots) - currentroots.intersection_update(ctx.node() for ctx in ctxs) - if '_phaserev' in vars(repo): - del repo._phaserev - repo._dirtyphases = True - + phcache = repo._phasecache.copy() + phcache.retractboundary(repo, targetphase, nodes) + repo._phasecache.replace(phcache) def listphases(repo): """List phases root for serialisation over pushkey""" keys = {} value = '%i' % draft - for root in repo._phaseroots[draft]: + for root in repo._phasecache.phaseroots[draft]: keys[hex(root)] = value if repo.ui.configbool('phases', 'publish', True): @@ -251,7 +328,7 @@ def visibleheads(repo): """return the set of visible head of this repo""" # XXX we want a cache on this - sroots = repo._phaseroots[secret] + sroots = repo._phasecache.phaseroots[secret] if sroots: # XXX very slow revset. storing heads or secret "boundary" would help. revset = repo.set('heads(not (%ln::))', sroots) @@ -267,7 +344,7 @@ """return a branchmap for the visible set""" # XXX Recomputing this data on the fly is very slow. We should build a # XXX cached version while computin the standard branchmap version. - sroots = repo._phaseroots[secret] + sroots = repo._phasecache.phaseroots[secret] if sroots: vbranchmap = {} for branch, nodes in repo.branchmap().iteritems():
--- a/mercurial/repair.py Sat May 12 09:43:12 2012 +0200 +++ b/mercurial/repair.py Sat May 12 12:23:49 2012 +0200 @@ -6,7 +6,7 @@ # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. -from mercurial import changegroup, bookmarks, phases +from mercurial import changegroup, bookmarks from mercurial.node import short from mercurial.i18n import _ import os @@ -38,14 +38,14 @@ """return the changesets which will be broken by the truncation""" s = set() def collectone(revlog): - links = (revlog.linkrev(i) for i in revlog) + linkgen = (revlog.linkrev(i) for i in revlog) # find the truncation point of the revlog - for lrev in links: + for lrev in linkgen: if lrev >= striprev: break # see if any revision after this point has a linkrev # less than striprev (those will be broken by strip) - for lrev in links: + for lrev in linkgen: if lrev < striprev: s.add(lrev) @@ -170,7 +170,3 @@ raise repo.destroyed() - - # remove potential unknown phase - # XXX using to_strip data would be faster - phases.filterunknown(repo)
--- a/mercurial/revset.py Sat May 12 09:43:12 2012 +0200 +++ b/mercurial/revset.py Sat May 12 12:23:49 2012 +0200 @@ -289,6 +289,7 @@ - ``pruned`` : csets that are goods, bads or skipped - ``untested`` : csets whose fate is yet unknown - ``ignored`` : csets ignored due to DAG topology + - ``current`` : the cset currently being bisected """ status = getstring(x, _("bisect requires a string")).lower() state = set(hbisect.get(repo, status)) @@ -462,7 +463,26 @@ """``draft()`` Changeset in draft phase.""" getargs(x, 0, 0, _("draft takes no arguments")) - return [r for r in subset if repo._phaserev[r] == phases.draft] + pc = repo._phasecache + return [r for r in subset if pc.phase(repo, r) == phases.draft] + +def extra(repo, subset, x): + """``extra(label, [value])`` + Changesets with the given label in the extra metadata, with the given + optional value.""" + + l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments')) + label = getstring(l[0], _('first argument to extra must be a string')) + value = None + + if len(l) > 1: + value = getstring(l[1], _('second argument to extra must be a string')) + + def _matchvalue(r): + extra = repo[r].extra() + return label in extra and (value is None or value == extra[label]) + + return [r for r in subset if _matchvalue(r)] def filelog(repo, subset, x): """``filelog(pattern)`` @@ -851,7 +871,8 @@ """``public()`` Changeset in public phase.""" getargs(x, 0, 0, _("public takes no arguments")) - return [r for r in subset if repo._phaserev[r] == phases.public] + pc = repo._phasecache + return [r for r in subset if pc.phase(repo, r) == phases.public] def remote(repo, subset, x): """``remote([id [,path]])`` @@ -1030,7 +1051,8 @@ """``secret()`` Changeset in secret phase.""" getargs(x, 0, 0, _("secret takes no arguments")) - return [r for r in subset if repo._phaserev[r] == phases.secret] + pc = repo._phasecache + return [r for r in subset if pc.phase(repo, r) == phases.secret] def sort(repo, subset, x): """``sort(set[, [-]key...])`` @@ -1143,6 +1165,7 @@ "descendants": descendants, "_firstdescendants": _firstdescendants, "draft": draft, + "extra": extra, "file": hasfile, "filelog": filelog, "first": first,
--- a/mercurial/tags.py Sat May 12 09:43:12 2012 +0200 +++ b/mercurial/tags.py Sat May 12 12:23:49 2012 +0200 @@ -181,7 +181,7 @@ for line in cachelines: if line == "\n": break - line = line.rstrip().split() + line = line.split() cacherevs.append(int(line[0])) headnode = bin(line[1]) cacheheads.append(headnode)
--- a/mercurial/util.h Sat May 12 09:43:12 2012 +0200 +++ b/mercurial/util.h Sat May 12 12:23:49 2012 +0200 @@ -109,6 +109,7 @@ typedef int Py_ssize_t; typedef Py_ssize_t (*lenfunc)(PyObject *); typedef PyObject *(*ssizeargfunc)(PyObject *, Py_ssize_t); +#define PyInt_FromSsize_t PyInt_FromLong #if !defined(PY_SSIZE_T_MIN) #define PY_SSIZE_T_MAX INT_MAX
--- a/tests/run-tests.py Sat May 12 09:43:12 2012 +0200 +++ b/tests/run-tests.py Sat May 12 12:23:49 2012 +0200 @@ -1187,6 +1187,7 @@ os.environ['http_proxy'] = '' os.environ['no_proxy'] = '' os.environ['NO_PROXY'] = '' + os.environ['TERM'] = 'xterm' # unset env related to hooks for k in os.environ.keys():
--- a/tests/test-bisect.t Sat May 12 09:43:12 2012 +0200 +++ b/tests/test-bisect.t Sat May 12 12:23:49 2012 +0200 @@ -224,6 +224,7 @@ Testing changeset 12:1941b52820a5 (23 changesets remaining, ~4 tests) 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ cat .hg/bisect.state + current 1941b52820a544549596820a8ae006842b0e2c64 skip 9d7d07bc967ca98ad0600c24953fd289ad5fa991 skip ce8f0998e922c179e80819d5066fbe46e2998784 skip e7fa0811edb063f6319531f0d0a865882138e180 @@ -396,6 +397,12 @@ date: Thu Jan 01 00:00:06 1970 +0000 summary: msg 6 + $ hg log -r "bisect(current)" + changeset: 5:7874a09ea728 + user: test + date: Thu Jan 01 00:00:05 1970 +0000 + summary: msg 5 + $ hg log -r "bisect(skip)" changeset: 1:5cd978ea5149 user: test @@ -466,3 +473,40 @@ date: Thu Jan 01 00:00:06 1970 +0000 summary: msg 6 + + +test bisecting via a command without updating the working dir, and +ensure that the bisect state file is updated before running a test +command + + $ hg update null + 0 files updated, 0 files merged, 1 files removed, 0 files unresolved + $ cat > script.sh <<'EOF' + > #!/bin/sh + > test -n "$HG_NODE" || (echo HG_NODE missing; exit 127) + > current="`hg log -r \"bisect(current)\" --template {node}`" + > test "$current" = "$HG_NODE" || (echo current is bad: $current; exit 127) + > rev="`hg log -r $HG_NODE --template {rev}`" + > test "$rev" -ge 6 + > EOF + $ chmod +x script.sh + $ hg bisect -r + $ hg bisect --good tip --noupdate + $ hg bisect --bad 0 --noupdate + Testing changeset 15:e7fa0811edb0 (31 changesets remaining, ~4 tests) + $ hg bisect --command "'`pwd`/script.sh' and some params" --noupdate + Changeset 15:e7fa0811edb0: good + Changeset 7:03750880c6b5: good + Changeset 3:b53bea5e2fcb: bad + Changeset 5:7874a09ea728: bad + Changeset 6:a3d5c6fdf0d3: good + The first good revision is: + changeset: 6:a3d5c6fdf0d3 + user: test + date: Thu Jan 01 00:00:06 1970 +0000 + summary: msg 6 + + +ensure that we still don't have a working dir + + $ hg parents
--- a/tests/test-branches.t Sat May 12 09:43:12 2012 +0200 +++ b/tests/test-branches.t Sat May 12 12:23:49 2012 +0200 @@ -241,6 +241,11 @@ default 0:19709c5a4e75 (inactive) $ hg branches -a a branch name much longer than the default justification used by branches 7:10ff5895aa57 + $ hg branches -q + a branch name much longer than the default justification used by branches + c + a + default $ hg heads b no open branch heads found on branches b [1]
--- a/tests/test-debugcomplete.t Sat May 12 09:43:12 2012 +0200 +++ b/tests/test-debugcomplete.t Sat May 12 12:23:49 2012 +0200 @@ -247,7 +247,7 @@ debugsub: rev debugwalk: include, exclude debugwireargs: three, four, five, ssh, remotecmd, insecure - graft: continue, edit, currentdate, currentuser, date, user, tool, dry-run + graft: continue, edit, log, currentdate, currentuser, date, user, tool, dry-run grep: print0, all, text, follow, ignore-case, files-with-matches, line-number, rev, user, date, include, exclude heads: rev, topo, active, closed, style, template help: extension, command
--- a/tests/test-graft.t Sat May 12 09:43:12 2012 +0200 +++ b/tests/test-graft.t Sat May 12 12:23:49 2012 +0200 @@ -72,28 +72,23 @@ # HG changeset patch # User foo # Date 0 0 - # Node ID d2e44c99fd3f31c176ea4efb9eca9f6306c81756 + # Node ID ef0ef43d49e79e81ddafdc7997401ba0041efc82 # Parent 68795b066622ca79a25816a662041d8f78f3cd9e 2 diff --git a/a b/b rename from a rename to b - --- a/a - +++ b/b - @@ -1,1 +1,1 @@ - -a - +b Look for extra:source $ hg log --debug -r tip - changeset: 7:d2e44c99fd3f31c176ea4efb9eca9f6306c81756 + changeset: 7:ef0ef43d49e79e81ddafdc7997401ba0041efc82 tag: tip phase: draft parent: 0:68795b066622ca79a25816a662041d8f78f3cd9e parent: -1:0000000000000000000000000000000000000000 - manifest: 7:5d59766436fd8fbcd38e7bebef0f6eaf3eebe637 + manifest: 7:e59b6b228f9cbf9903d5e9abf996e083a1f533eb user: foo date: Thu Jan 01 00:00:00 1970 +0000 files+: b @@ -128,15 +123,20 @@ checking for directory renames resolving manifests overwrite: False, partial: False - ancestor: 68795b066622, local: d2e44c99fd3f+, remote: 5d205f8b35b6 + ancestor: 68795b066622, local: ef0ef43d49e7+, remote: 5d205f8b35b6 b: local copied/moved to a -> m preserving b for resolve of b updating: b 1/1 files (100.00%) + picked tool 'internal:merge' for b (binary False symlink False) + merging b and a to b + my b@ef0ef43d49e7+ other a@5d205f8b35b6 ancestor a@68795b066622 + premerge successful + b grafting revision 5 searching for copies back to rev 1 resolving manifests overwrite: False, partial: False - ancestor: 4c60f11aa304, local: d2e44c99fd3f+, remote: 97f8bfe72746 + ancestor: 4c60f11aa304, local: 6b9e5368ca4e+, remote: 97f8bfe72746 e: remote is newer -> g updating: e 1/1 files (100.00%) getting e @@ -145,7 +145,7 @@ searching for copies back to rev 1 resolving manifests overwrite: False, partial: False - ancestor: 4c60f11aa304, local: 839a7e8fcf80+, remote: 9c233e8e184d + ancestor: 4c60f11aa304, local: 1905859650ec+, remote: 9c233e8e184d e: versions differ -> m d: remote is newer -> g preserving e for resolve of e @@ -154,7 +154,7 @@ updating: e 2/2 files (100.00%) picked tool 'internal:merge' for e (binary False symlink False) merging e - my e@839a7e8fcf80+ other e@9c233e8e184d ancestor e@68795b066622 + my e@1905859650ec+ other e@9c233e8e184d ancestor e@68795b066622 warning: conflicts during merge. merging e incomplete! (edit conflicts, then use 'hg resolve --mark') abort: unresolved conflicts, can't continue @@ -200,11 +200,13 @@ View graph: $ hg --config extensions.graphlog= log -G --template '{author}@{rev}.{phase}: {desc}\n' - @ test@10.draft: 3 + @ test@11.draft: 3 + | + o test@10.draft: 4 | - o test@9.draft: 4 + o test@9.draft: 5 | - o test@8.draft: 5 + o bar@8.draft: 1 | o foo@7.draft: 2 | @@ -232,17 +234,17 @@ grafting revision 7 $ hg log -r 7 --template '{rev}:{node}\n' - 7:d2e44c99fd3f31c176ea4efb9eca9f6306c81756 + 7:ef0ef43d49e79e81ddafdc7997401ba0041efc82 $ hg log -r 2 --template '{rev}:{node}\n' 2:5c095ad7e90f871700f02dd1fa5012cb4498a2d4 $ hg log --debug -r tip - changeset: 12:95adbe5de6b10f376b699ece9ed5a57cd7b4b0f6 + changeset: 13:9db0f28fd3747e92c57d015f53b5593aeec53c2d tag: tip phase: draft - parent: 11:b592ea63bb0c19a6c5c44685ee29a2284f9f1b8f + parent: 12:b592ea63bb0c19a6c5c44685ee29a2284f9f1b8f parent: -1:0000000000000000000000000000000000000000 - manifest: 12:9944044f82a462bbaccc9bdf7e0ac5b811db7d1b + manifest: 13:dc313617b8c32457c0d589e0dbbedfe71f3cd637 user: foo date: Thu Jan 01 00:00:00 1970 +0000 files+: b @@ -260,7 +262,7 @@ [255] Disallow grafting already grafted csets with the same origin onto each other - $ hg up -q 12 + $ hg up -q 13 $ hg graft 2 skipping already grafted revision 2 [255] @@ -273,5 +275,15 @@ skipping already grafted revision 2 [255] $ hg graft tip - skipping already grafted revision 12 (same origin 2) + skipping already grafted revision 13 (same origin 2) [255] + +Graft with --log + + $ hg up -Cq 1 + $ hg graft 3 --log -u foo + grafting revision 3 + warning: can't find ancestor for 'c' copied from 'b'! + $ hg log --template '{rev} {parents} {desc}\n' -r tip + 14 1:5d205f8b35b6 3 + (grafted from 4c60f11aa304a54ae1c199feb94e7fc771e51ed8)
--- a/tests/test-keyword.t Sat May 12 09:43:12 2012 +0200 +++ b/tests/test-keyword.t Sat May 12 12:23:49 2012 +0200 @@ -558,6 +558,7 @@ $ hg --debug commit -ma2c -d '1 0' -u 'User Name <user@example.com>' c c: copy a:0045e12f6c5791aac80ca6cbfd97709a88307292 + removing unknown node 40a904bbbe4c from 1-phase boundary overwriting c expanding keywords committed changeset 2:25736cf2f5cbe41f6be4e6784ef6ecf9f3bbcc7d $ cat a c @@ -722,6 +723,7 @@ $ hg --debug commit -l log -d '2 0' -u 'User Name <user@example.com>' a + removing unknown node 40a904bbbe4c from 1-phase boundary overwriting a expanding keywords committed changeset 2:bb948857c743469b22bbf51f7ec8112279ca5d83 $ rm log
--- a/tests/test-largefiles.t Sat May 12 09:43:12 2012 +0200 +++ b/tests/test-largefiles.t Sat May 12 12:23:49 2012 +0200 @@ -432,11 +432,21 @@ large11 $ cat sub/large2 large22 + $ cd .. + +Test cloning with --all-largefiles flag + + $ rm -Rf ${USERCACHE}/* + $ hg clone --all-largefiles a a-backup + updating to branch default + 5 files updated, 0 files merged, 0 files removed, 0 files unresolved + getting changed largefiles + 3 largefiles updated, 0 removed + 8 additional largefiles cached Rebasing between two repositories does not revert largefiles to old revisions (this was a very bad bug that took a lot of work to fix). - $ cd .. $ hg clone a d updating to branch default 5 files updated, 0 files merged, 0 files removed, 0 files unresolved @@ -1136,4 +1146,37 @@ abort: uncommitted changes in subrepo subrepo (use --subrepos for recursive commit) [255] + +Add a normal file to the subrepo, then test archiving + + $ echo 'normal file' > subrepo/normal.txt + $ hg -R subrepo add subrepo/normal.txt + +Lock in subrepo, otherwise the change isn't archived + + $ hg ci -S -m "add normal file to top level" + committing subrepository subrepo + Invoking status precommit hook + M large.txt + A normal.txt + Invoking status precommit hook + M .hgsubstate + $ hg archive -S lf_subrepo_archive + $ find lf_subrepo_archive | sort + lf_subrepo_archive + lf_subrepo_archive/.hg_archival.txt + lf_subrepo_archive/.hgsub + lf_subrepo_archive/.hgsubstate + lf_subrepo_archive/a + lf_subrepo_archive/a/b + lf_subrepo_archive/a/b/c + lf_subrepo_archive/a/b/c/d + lf_subrepo_archive/a/b/c/d/e.large.txt + lf_subrepo_archive/a/b/c/d/e.normal.txt + lf_subrepo_archive/a/b/c/x + lf_subrepo_archive/a/b/c/x/y.normal.txt + lf_subrepo_archive/subrepo + lf_subrepo_archive/subrepo/large.txt + lf_subrepo_archive/subrepo/normal.txt + $ cd ..
--- a/tests/test-mq-qpush-fail.t Sat May 12 09:43:12 2012 +0200 +++ b/tests/test-mq-qpush-fail.t Sat May 12 12:23:49 2012 +0200 @@ -202,6 +202,42 @@ $ test -f a.orig && echo 'error: backup with --no-backup' [1] +test qpop --check + + $ hg qpush + applying p1 + now at: p1 + $ hg qpop --check --force + abort: cannot use both --force and --check + [255] + $ echo a >> a + $ hg qpop --check + abort: local changes found, refresh first + [255] + $ hg revert -qa a + $ rm a + $ hg qpop --check + abort: local changes found, refresh first + [255] + $ hg rm -A a + $ hg qpop --check + abort: local changes found, refresh first + [255] + $ hg revert -qa a + $ echo b > b + $ hg add b + $ hg qpop --check + abort: local changes found, refresh first + [255] + $ hg forget b + $ echo d > d + $ hg add d + $ hg qpop --check + popping p1 + patch queue now empty + $ hg forget d + $ rm d + test qpush --force and backup files $ echo a >> a @@ -281,3 +317,108 @@ now at: p2 $ test -f a.orig && echo 'error: backup with --no-backup' [1] + +test qpush --check + + $ hg qpush --check --force + abort: cannot use both --force and --check + [255] + $ hg qpush --check --exact + abort: cannot use --exact and --check together + [255] + $ echo b >> b + $ hg qpush --check + applying p3 + errors during apply, please fix and refresh p2 + [2] + $ rm b + $ hg qpush --check + applying p3 + errors during apply, please fix and refresh p2 + [2] + $ hg rm -A b + $ hg qpush --check + applying p3 + errors during apply, please fix and refresh p2 + [2] + $ hg revert -aq b + $ echo d > d + $ hg add d + $ hg qpush --check + applying p3 + errors during apply, please fix and refresh p2 + [2] + $ hg forget d + $ rm d + $ hg qpop + popping p2 + patch queue now empty + $ echo b >> b + $ hg qpush -a --check + applying p2 + applying p3 + errors during apply, please fix and refresh p2 + [2] + $ hg qtop + p2 + $ hg parents --template "{rev} {desc}\n" + 2 imported patch p2 + $ hg st b + M b + $ cat b + b + b + +test qgoto --check + + $ hg revert -aq b + $ rm e + $ hg qgoto --check --force p3 + abort: cannot use both --force and --check + [255] + $ echo a >> a + $ hg qgoto --check p3 + applying p3 + now at: p3 + $ hg st a + M a + $ hg qgoto --check p2 + popping p3 + now at: p2 + $ hg st a + M a + +test mq.check setting + + $ hg --config mq.check=1 qpush + applying p3 + now at: p3 + $ hg st a + M a + $ hg --config mq.check=1 qpop + popping p3 + now at: p2 + $ hg st a + M a + $ hg --config mq.check=1 qgoto p3 + applying p3 + now at: p3 + $ hg st a + M a + $ echo b >> b + $ hg --config mq.check=1 qpop --force + popping p3 + now at: p2 + $ hg st b + $ hg --config mq.check=1 qpush --exact + abort: local changes found, refresh first + [255] + $ hg revert -qa a + $ hg qpop + popping p2 + patch queue now empty + $ echo a >> a + $ hg --config mq.check=1 qpush --force + applying p2 + now at: p2 + $ hg st a
--- a/tests/test-mq.t Sat May 12 09:43:12 2012 +0200 +++ b/tests/test-mq.t Sat May 12 12:23:49 2012 +0200 @@ -59,6 +59,15 @@ You will by default be managing a patch queue named "patches". You can create other, independent patch queues with the "hg qqueue" command. + If the working directory contains uncommitted files, qpush, qpop and qgoto + abort immediately. If -f/--force is used, the changes are discarded. Setting: + + [mq] check = True + + make them behave as if -c/--check were passed, and non-conflicting local + changes will be tolerated and preserved. If incompatible options such as + -f/--force or --exact are passed, this setting is ignored. + list of commands: qapplied print the patches already applied
--- a/tests/test-phases.t Sat May 12 09:43:12 2012 +0200 +++ b/tests/test-phases.t Sat May 12 12:23:49 2012 +0200 @@ -9,6 +9,15 @@ $ hg init initialrepo $ cd initialrepo + +Cannot change null revision phase + + $ hg phase --force --secret null + abort: cannot change null revision phase + [255] + $ hg phase null + -1: public + $ mkcommit A New commit are draft by default
--- a/tests/test-rebase-parameters.t Sat May 12 09:43:12 2012 +0200 +++ b/tests/test-rebase-parameters.t Sat May 12 12:23:49 2012 +0200 @@ -158,12 +158,12 @@ $ cd .. -Rebase with dest == `hg branch` => same as no arguments (from 3 onto 8): +Rebase with dest == branch(.) => same as no arguments (from 3 onto 8): $ hg clone -q -u 3 a a3 $ cd a3 - $ hg rebase --dest `hg branch` + $ hg rebase --dest 'branch(.)' saved backup bundle to $TESTTMP/a3/.hg/strip-backup/*-backup.hg (glob) $ hg tglog
--- a/tests/test-revset.t Sat May 12 09:43:12 2012 +0200 +++ b/tests/test-revset.t Sat May 12 12:23:49 2012 +0200 @@ -32,6 +32,13 @@ (branches are permanent and global, did you want a bookmark?) $ hg ci -Aqm2 -u Bob + $ hg log -r "extra('branch', 'a-b-c-')" --template '{rev}\n' + 2 + $ hg log -r "extra('branch')" --template '{rev}\n' + 0 + 1 + 2 + $ hg co 1 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg branch +a+b+c+
--- a/tests/test-transplant.t Sat May 12 09:43:12 2012 +0200 +++ b/tests/test-transplant.t Sat May 12 12:23:49 2012 +0200 @@ -120,7 +120,25 @@ 1 r2 0 r1 +test same-parent transplant with --log + $ hg clone -r 1 ../t ../sameparent + adding changesets + adding manifests + adding file changes + added 2 changesets with 2 changes to 2 files + updating to branch default + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd ../sameparent + $ hg transplant --log -s ../prune 5 + searching for changes + applying e234d668f844 + e234d668f844 transplanted to e07aea8ecf9c + $ hg log --template '{rev} {parents} {desc}\n' + 2 b1 + (transplanted from e234d668f844e1b1a765f01db83a32c0c7bfa170) + 1 r2 + 0 r1 remote transplant $ hg clone -r 1 ../t ../remote