--- a/hgext/infinitepush/__init__.py Sun Oct 06 09:45:02 2019 -0400
+++ b/hgext/infinitepush/__init__.py Sun Oct 06 09:48:39 2019 -0400
@@ -138,48 +138,48 @@
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
# be specifying the version(s) of Mercurial they are tested with, or
# leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
configtable = {}
configitem = registrar.configitem(configtable)
configitem(
- 'infinitepush', 'server', default=False,
+ b'infinitepush', b'server', default=False,
)
configitem(
- 'infinitepush', 'storetype', default='',
+ b'infinitepush', b'storetype', default=b'',
)
configitem(
- 'infinitepush', 'indextype', default='',
+ b'infinitepush', b'indextype', default=b'',
)
configitem(
- 'infinitepush', 'indexpath', default='',
+ b'infinitepush', b'indexpath', default=b'',
)
configitem(
- 'infinitepush', 'storeallparts', default=False,
+ b'infinitepush', b'storeallparts', default=False,
)
configitem(
- 'infinitepush', 'reponame', default='',
+ b'infinitepush', b'reponame', default=b'',
)
configitem(
- 'scratchbranch', 'storepath', default='',
+ b'scratchbranch', b'storepath', default=b'',
)
configitem(
- 'infinitepush', 'branchpattern', default='',
+ b'infinitepush', b'branchpattern', default=b'',
)
configitem(
- 'infinitepush', 'pushtobundlestore', default=False,
+ b'infinitepush', b'pushtobundlestore', default=False,
)
configitem(
- 'experimental', 'server-bundlestore-bookmark', default='',
+ b'experimental', b'server-bundlestore-bookmark', default=b'',
)
configitem(
- 'experimental', 'infinitepush-scratchpush', default=False,
+ b'experimental', b'infinitepush-scratchpush', default=False,
)
-experimental = 'experimental'
-configbookmark = 'server-bundlestore-bookmark'
-configscratchpush = 'infinitepush-scratchpush'
+experimental = b'experimental'
+configbookmark = b'server-bundlestore-bookmark'
+configscratchpush = b'infinitepush-scratchpush'
scratchbranchparttype = bundleparts.scratchbranchparttype
revsetpredicate = registrar.revsetpredicate()
@@ -189,31 +189,31 @@
def _buildexternalbundlestore(ui):
- put_args = ui.configlist('infinitepush', 'put_args', [])
- put_binary = ui.config('infinitepush', 'put_binary')
+ put_args = ui.configlist(b'infinitepush', b'put_args', [])
+ put_binary = ui.config(b'infinitepush', b'put_binary')
if not put_binary:
- raise error.Abort('put binary is not specified')
- get_args = ui.configlist('infinitepush', 'get_args', [])
- get_binary = ui.config('infinitepush', 'get_binary')
+ raise error.Abort(b'put binary is not specified')
+ get_args = ui.configlist(b'infinitepush', b'get_args', [])
+ get_binary = ui.config(b'infinitepush', b'get_binary')
if not get_binary:
- raise error.Abort('get binary is not specified')
+ raise error.Abort(b'get binary is not specified')
from . import store
return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
def _buildsqlindex(ui):
- sqlhost = ui.config('infinitepush', 'sqlhost')
+ sqlhost = ui.config(b'infinitepush', b'sqlhost')
if not sqlhost:
- raise error.Abort(_('please set infinitepush.sqlhost'))
- host, port, db, user, password = sqlhost.split(':')
- reponame = ui.config('infinitepush', 'reponame')
+ raise error.Abort(_(b'please set infinitepush.sqlhost'))
+ host, port, db, user, password = sqlhost.split(b':')
+ reponame = ui.config(b'infinitepush', b'reponame')
if not reponame:
- raise error.Abort(_('please set infinitepush.reponame'))
+ raise error.Abort(_(b'please set infinitepush.reponame'))
- logfile = ui.config('infinitepush', 'logfile', '')
- waittimeout = ui.configint('infinitepush', 'waittimeout', 300)
- locktimeout = ui.configint('infinitepush', 'locktimeout', 120)
+ logfile = ui.config(b'infinitepush', b'logfile', b'')
+ waittimeout = ui.configint(b'infinitepush', b'waittimeout', 300)
+ locktimeout = ui.configint(b'infinitepush', b'locktimeout', 120)
from . import sqlindexapi
return sqlindexapi.sqlindexapi(
@@ -231,10 +231,10 @@
def _getloglevel(ui):
- loglevel = ui.config('infinitepush', 'loglevel', 'DEBUG')
+ loglevel = ui.config(b'infinitepush', b'loglevel', b'DEBUG')
numeric_loglevel = getattr(logging, loglevel.upper(), None)
if not isinstance(numeric_loglevel, int):
- raise error.Abort(_('invalid log level %s') % loglevel)
+ raise error.Abort(_(b'invalid log level %s') % loglevel)
return numeric_loglevel
@@ -248,7 +248,7 @@
'''
if common.isremotebooksenabled(ui):
- hoist = ui.config('remotenames', 'hoistedpeer') + '/'
+ hoist = ui.config(b'remotenames', b'hoistedpeer') + b'/'
if remotebookmark.startswith(hoist):
return remotebookmark[len(hoist) :]
return remotebookmark
@@ -257,33 +257,33 @@
class bundlestore(object):
def __init__(self, repo):
self._repo = repo
- storetype = self._repo.ui.config('infinitepush', 'storetype')
- if storetype == 'disk':
+ storetype = self._repo.ui.config(b'infinitepush', b'storetype')
+ if storetype == b'disk':
from . import store
self.store = store.filebundlestore(self._repo.ui, self._repo)
- elif storetype == 'external':
+ elif storetype == b'external':
self.store = _buildexternalbundlestore(self._repo.ui)
else:
raise error.Abort(
- _('unknown infinitepush store type specified %s') % storetype
+ _(b'unknown infinitepush store type specified %s') % storetype
)
- indextype = self._repo.ui.config('infinitepush', 'indextype')
- if indextype == 'disk':
+ indextype = self._repo.ui.config(b'infinitepush', b'indextype')
+ if indextype == b'disk':
from . import fileindexapi
self.index = fileindexapi.fileindexapi(self._repo)
- elif indextype == 'sql':
+ elif indextype == b'sql':
self.index = _buildsqlindex(self._repo.ui)
else:
raise error.Abort(
- _('unknown infinitepush index type specified %s') % indextype
+ _(b'unknown infinitepush index type specified %s') % indextype
)
def _isserver(ui):
- return ui.configbool('infinitepush', 'server')
+ return ui.configbool(b'infinitepush', b'server')
def reposetup(ui, repo):
@@ -300,11 +300,11 @@
def commonsetup(ui):
- wireprotov1server.commands['listkeyspatterns'] = (
+ wireprotov1server.commands[b'listkeyspatterns'] = (
wireprotolistkeyspatterns,
- 'namespace patterns',
+ b'namespace patterns',
)
- scratchbranchpat = ui.config('infinitepush', 'branchpattern')
+ scratchbranchpat = ui.config(b'infinitepush', b'branchpattern')
if scratchbranchpat:
global _scratchbranchmatcher
kind, pat, _scratchbranchmatcher = stringutil.stringmatcher(
@@ -313,53 +313,53 @@
def serverextsetup(ui):
- origpushkeyhandler = bundle2.parthandlermapping['pushkey']
+ origpushkeyhandler = bundle2.parthandlermapping[b'pushkey']
def newpushkeyhandler(*args, **kwargs):
bundle2pushkey(origpushkeyhandler, *args, **kwargs)
newpushkeyhandler.params = origpushkeyhandler.params
- bundle2.parthandlermapping['pushkey'] = newpushkeyhandler
+ bundle2.parthandlermapping[b'pushkey'] = newpushkeyhandler
- orighandlephasehandler = bundle2.parthandlermapping['phase-heads']
+ orighandlephasehandler = bundle2.parthandlermapping[b'phase-heads']
newphaseheadshandler = lambda *args, **kwargs: bundle2handlephases(
orighandlephasehandler, *args, **kwargs
)
newphaseheadshandler.params = orighandlephasehandler.params
- bundle2.parthandlermapping['phase-heads'] = newphaseheadshandler
+ bundle2.parthandlermapping[b'phase-heads'] = newphaseheadshandler
extensions.wrapfunction(
- localrepo.localrepository, 'listkeys', localrepolistkeys
+ localrepo.localrepository, b'listkeys', localrepolistkeys
)
- wireprotov1server.commands['lookup'] = (
- _lookupwrap(wireprotov1server.commands['lookup'][0]),
- 'key',
+ wireprotov1server.commands[b'lookup'] = (
+ _lookupwrap(wireprotov1server.commands[b'lookup'][0]),
+ b'key',
)
- extensions.wrapfunction(exchange, 'getbundlechunks', getbundlechunks)
+ extensions.wrapfunction(exchange, b'getbundlechunks', getbundlechunks)
- extensions.wrapfunction(bundle2, 'processparts', processparts)
+ extensions.wrapfunction(bundle2, b'processparts', processparts)
def clientextsetup(ui):
- entry = extensions.wrapcommand(commands.table, 'push', _push)
+ entry = extensions.wrapcommand(commands.table, b'push', _push)
entry[1].append(
(
- '',
- 'bundle-store',
+ b'',
+ b'bundle-store',
None,
- _('force push to go to bundle store (EXPERIMENTAL)'),
+ _(b'force push to go to bundle store (EXPERIMENTAL)'),
)
)
- extensions.wrapcommand(commands.table, 'pull', _pull)
+ extensions.wrapcommand(commands.table, b'pull', _pull)
- extensions.wrapfunction(discovery, 'checkheads', _checkheads)
+ extensions.wrapfunction(discovery, b'checkheads', _checkheads)
wireprotov1peer.wirepeer.listkeyspatterns = listkeyspatterns
partorder = exchange.b2partsgenorder
- index = partorder.index('changeset')
+ index = partorder.index(b'changeset')
partorder.insert(
index, partorder.pop(partorder.index(scratchbranchparttype))
)
@@ -378,14 +378,14 @@
def localrepolistkeys(orig, self, namespace, patterns=None):
- if namespace == 'bookmarks' and patterns:
+ if namespace == b'bookmarks' and patterns:
index = self.bundlestore.index
results = {}
bookmarks = orig(self, namespace)
for pattern in patterns:
results.update(index.getbookmarks(pattern))
- if pattern.endswith('*'):
- pattern = 're:^' + pattern[:-1] + '.*'
+ if pattern.endswith(b'*'):
+ pattern = b're:^' + pattern[:-1] + b'.*'
kind, pat, matcher = stringutil.stringmatcher(pattern)
for bookmark, node in bookmarks.iteritems():
if matcher(bookmark):
@@ -397,21 +397,23 @@
@wireprotov1peer.batchable
def listkeyspatterns(self, namespace, patterns):
- if not self.capable('pushkey'):
+ if not self.capable(b'pushkey'):
yield {}, None
f = wireprotov1peer.future()
- self.ui.debug('preparing listkeys for "%s"\n' % namespace)
+ self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
yield {
- 'namespace': encoding.fromlocal(namespace),
- 'patterns': wireprototypes.encodelist(patterns),
+ b'namespace': encoding.fromlocal(namespace),
+ b'patterns': wireprototypes.encodelist(patterns),
}, f
d = f.value
- self.ui.debug('received listkey for "%s": %i bytes\n' % (namespace, len(d)))
+ self.ui.debug(
+ b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
+ )
yield pushkey.decodekeys(d)
def _readbundlerevs(bundlerepo):
- return list(bundlerepo.revs('bundle()'))
+ return list(bundlerepo.revs(b'bundle()'))
def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
@@ -428,18 +430,18 @@
if not changedfiles:
return bundlecaps
- changedfiles = '\0'.join(changedfiles)
+ changedfiles = b'\0'.join(changedfiles)
newcaps = []
appended = False
for cap in bundlecaps or []:
- if cap.startswith('excludepattern='):
- newcaps.append('\0'.join((cap, changedfiles)))
+ if cap.startswith(b'excludepattern='):
+ newcaps.append(b'\0'.join((cap, changedfiles)))
appended = True
else:
newcaps.append(cap)
if not appended:
# Not found excludepattern cap. Just append it
- newcaps.append('excludepattern=' + changedfiles)
+ newcaps.append(b'excludepattern=' + changedfiles)
return newcaps
@@ -452,14 +454,14 @@
'''
parts = []
- version = '02'
+ version = b'02'
outgoing = discovery.outgoing(
bundlerepo, commonheads=bundleroots, missingheads=[unknownhead]
)
- cgstream = changegroup.makestream(bundlerepo, outgoing, version, 'pull')
+ cgstream = changegroup.makestream(bundlerepo, outgoing, version, b'pull')
cgstream = util.chunkbuffer(cgstream).read()
- cgpart = bundle2.bundlepart('changegroup', data=cgstream)
- cgpart.addparam('version', version)
+ cgpart = bundle2.bundlepart(b'changegroup', data=cgstream)
+ cgpart.addparam(b'version', version)
parts.append(cgpart)
return parts
@@ -480,7 +482,7 @@
def _needsrebundling(head, bundlerepo):
- bundleheads = list(bundlerepo.revs('heads(bundle())'))
+ bundleheads = list(bundlerepo.revs(b'heads(bundle())'))
return not (
len(bundleheads) == 1 and bundlerepo[bundleheads[0]].node() == head
)
@@ -493,18 +495,18 @@
'''
parts = []
if not _needsrebundling(head, bundlerepo):
- with util.posixfile(bundlefile, "rb") as f:
+ with util.posixfile(bundlefile, b"rb") as f:
unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
if isinstance(unbundler, changegroup.cg1unpacker):
part = bundle2.bundlepart(
- 'changegroup', data=unbundler._stream.read()
+ b'changegroup', data=unbundler._stream.read()
)
- part.addparam('version', '01')
+ part.addparam(b'version', b'01')
parts.append(part)
elif isinstance(unbundler, bundle2.unbundle20):
haschangegroup = False
for part in unbundler.iterparts():
- if part.type == 'changegroup':
+ if part.type == b'changegroup':
haschangegroup = True
newpart = bundle2.bundlepart(part.type, data=part.read())
for key, value in part.params.iteritems():
@@ -513,12 +515,12 @@
if not haschangegroup:
raise error.Abort(
- 'unexpected bundle without changegroup part, '
- + 'head: %s' % hex(head),
- hint='report to administrator',
+ b'unexpected bundle without changegroup part, '
+ + b'head: %s' % hex(head),
+ hint=b'report to administrator',
)
else:
- raise error.Abort('unknown bundle type')
+ raise error.Abort(b'unknown bundle type')
else:
parts = _rebundle(bundlerepo, bundleroots, head)
@@ -539,7 +541,7 @@
if head not in repo.changelog.nodemap:
if head not in nodestobundle:
newbundlefile = common.downloadbundle(repo, head)
- bundlepath = "bundle:%s+%s" % (repo.root, newbundlefile)
+ bundlepath = b"bundle:%s+%s" % (repo.root, newbundlefile)
bundlerepo = hg.repository(repo.ui, bundlepath)
allbundlestocleanup.append((bundlerepo, newbundlefile))
@@ -576,7 +578,7 @@
pullfrombundlestore = bool(scratchbundles)
wrappedchangegrouppart = False
wrappedlistkeys = False
- oldchangegrouppart = exchange.getbundle2partsmapping['changegroup']
+ oldchangegrouppart = exchange.getbundle2partsmapping[b'changegroup']
try:
def _changegrouppart(bundler, *args, **kwargs):
@@ -589,20 +591,20 @@
bundler.addpart(part)
return result
- exchange.getbundle2partsmapping['changegroup'] = _changegrouppart
+ exchange.getbundle2partsmapping[b'changegroup'] = _changegrouppart
wrappedchangegrouppart = True
def _listkeys(orig, self, namespace):
origvalues = orig(self, namespace)
- if namespace == 'phases' and pullfrombundlestore:
- if origvalues.get('publishing') == 'True':
+ if namespace == b'phases' and pullfrombundlestore:
+ if origvalues.get(b'publishing') == b'True':
# Make repo non-publishing to preserve draft phase
- del origvalues['publishing']
+ del origvalues[b'publishing']
origvalues.update(newphases)
return origvalues
extensions.wrapfunction(
- localrepo.localrepository, 'listkeys', _listkeys
+ localrepo.localrepository, b'listkeys', _listkeys
)
wrappedlistkeys = True
heads = list((set(newheads) | set(heads)) - set(scratchheads))
@@ -611,10 +613,10 @@
)
finally:
if wrappedchangegrouppart:
- exchange.getbundle2partsmapping['changegroup'] = oldchangegrouppart
+ exchange.getbundle2partsmapping[b'changegroup'] = oldchangegrouppart
if wrappedlistkeys:
extensions.unwrapfunction(
- localrepo.localrepository, 'listkeys', _listkeys
+ localrepo.localrepository, b'listkeys', _listkeys
)
return result
@@ -626,64 +628,67 @@
if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
scratchnode = repo.bundlestore.index.getnode(localkey)
if scratchnode:
- return "%d %s\n" % (1, scratchnode)
+ return b"%d %s\n" % (1, scratchnode)
else:
- return "%d %s\n" % (0, 'scratch branch %s not found' % localkey)
+ return b"%d %s\n" % (
+ 0,
+ b'scratch branch %s not found' % localkey,
+ )
else:
try:
r = hex(repo.lookup(localkey))
- return "%d %s\n" % (1, r)
+ return b"%d %s\n" % (1, r)
except Exception as inst:
if repo.bundlestore.index.getbundle(localkey):
- return "%d %s\n" % (1, localkey)
+ return b"%d %s\n" % (1, localkey)
else:
r = stringutil.forcebytestr(inst)
- return "%d %s\n" % (0, r)
+ return b"%d %s\n" % (0, r)
return _lookup
-def _pull(orig, ui, repo, source="default", **opts):
+def _pull(orig, ui, repo, source=b"default", **opts):
opts = pycompat.byteskwargs(opts)
# Copy paste from `pull` command
- source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
+ source, branches = hg.parseurl(ui.expandpath(source), opts.get(b'branch'))
scratchbookmarks = {}
unfi = repo.unfiltered()
unknownnodes = []
- for rev in opts.get('rev', []):
+ for rev in opts.get(b'rev', []):
if rev not in unfi:
unknownnodes.append(rev)
- if opts.get('bookmark'):
+ if opts.get(b'bookmark'):
bookmarks = []
- revs = opts.get('rev') or []
- for bookmark in opts.get('bookmark'):
+ revs = opts.get(b'rev') or []
+ for bookmark in opts.get(b'bookmark'):
if _scratchbranchmatcher(bookmark):
# rev is not known yet
# it will be fetched with listkeyspatterns next
- scratchbookmarks[bookmark] = 'REVTOFETCH'
+ scratchbookmarks[bookmark] = b'REVTOFETCH'
else:
bookmarks.append(bookmark)
if scratchbookmarks:
other = hg.peer(repo, opts, source)
fetchedbookmarks = other.listkeyspatterns(
- 'bookmarks', patterns=scratchbookmarks
+ b'bookmarks', patterns=scratchbookmarks
)
for bookmark in scratchbookmarks:
if bookmark not in fetchedbookmarks:
raise error.Abort(
- 'remote bookmark %s not found!' % bookmark
+ b'remote bookmark %s not found!' % bookmark
)
scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
revs.append(fetchedbookmarks[bookmark])
- opts['bookmark'] = bookmarks
- opts['rev'] = revs
+ opts[b'bookmark'] = bookmarks
+ opts[b'rev'] = revs
if scratchbookmarks or unknownnodes:
# Set anyincoming to True
extensions.wrapfunction(
- discovery, 'findcommonincoming', _findcommonincoming
+ discovery, b'findcommonincoming', _findcommonincoming
)
try:
# Remote scratch bookmarks will be deleted because remotenames doesn't
@@ -701,12 +706,12 @@
return result
finally:
if scratchbookmarks:
- extensions.unwrapfunction(discovery, 'findcommonincoming')
+ extensions.unwrapfunction(discovery, b'findcommonincoming')
def _readscratchremotebookmarks(ui, repo, other):
if common.isremotebooksenabled(ui):
- remotenamesext = extensions.find('remotenames')
+ remotenamesext = extensions.find(b'remotenames')
remotepath = remotenamesext.activepath(repo.ui, other)
result = {}
# Let's refresh remotenames to make sure we have it up to date
@@ -714,10 +719,10 @@
# and it results in deleting scratch bookmarks. Our best guess how to
# fix it is to use `clearnames()`
repo._remotenames.clearnames()
- for remotebookmark in repo.names['remotebookmarks'].listnames(repo):
+ for remotebookmark in repo.names[b'remotebookmarks'].listnames(repo):
path, bookname = remotenamesext.splitremotename(remotebookmark)
if path == remotepath and _scratchbranchmatcher(bookname):
- nodes = repo.names['remotebookmarks'].nodes(
+ nodes = repo.names[b'remotebookmarks'].nodes(
repo, remotebookmark
)
if nodes:
@@ -728,7 +733,7 @@
def _saveremotebookmarks(repo, newbookmarks, remote):
- remotenamesext = extensions.find('remotenames')
+ remotenamesext = extensions.find(b'remotenames')
remotepath = remotenamesext.activepath(repo.ui, remote)
branches = collections.defaultdict(list)
bookmarks = {}
@@ -736,14 +741,14 @@
for hexnode, nametype, remote, rname in remotenames:
if remote != remotepath:
continue
- if nametype == 'bookmarks':
+ if nametype == b'bookmarks':
if rname in newbookmarks:
# It's possible if we have a normal bookmark that matches
# scratch branch pattern. In this case just use the current
# bookmark node
del newbookmarks[rname]
bookmarks[rname] = hexnode
- elif nametype == 'branches':
+ elif nametype == b'branches':
# saveremotenames expects 20 byte binary nodes for branches
branches[rname].append(bin(hexnode))
@@ -755,7 +760,7 @@
def _savelocalbookmarks(repo, bookmarks):
if not bookmarks:
return
- with repo.wlock(), repo.lock(), repo.transaction('bookmark') as tr:
+ with repo.wlock(), repo.lock(), repo.transaction(b'bookmark') as tr:
changes = []
for scratchbook, node in bookmarks.iteritems():
changectx = repo[node]
@@ -770,38 +775,38 @@
def _push(orig, ui, repo, dest=None, *args, **opts):
opts = pycompat.byteskwargs(opts)
- bookmark = opts.get('bookmark')
+ bookmark = opts.get(b'bookmark')
# we only support pushing one infinitepush bookmark at once
if len(bookmark) == 1:
bookmark = bookmark[0]
else:
- bookmark = ''
+ bookmark = b''
oldphasemove = None
overrides = {(experimental, configbookmark): bookmark}
- with ui.configoverride(overrides, 'infinitepush'):
- scratchpush = opts.get('bundle_store')
+ with ui.configoverride(overrides, b'infinitepush'):
+ scratchpush = opts.get(b'bundle_store')
if _scratchbranchmatcher(bookmark):
scratchpush = True
# bundle2 can be sent back after push (for example, bundle2
# containing `pushkey` part to update bookmarks)
- ui.setconfig(experimental, 'bundle2.pushback', True)
+ ui.setconfig(experimental, b'bundle2.pushback', True)
if scratchpush:
# this is an infinitepush, we don't want the bookmark to be applied
# rather that should be stored in the bundlestore
- opts['bookmark'] = []
+ opts[b'bookmark'] = []
ui.setconfig(experimental, configscratchpush, True)
oldphasemove = extensions.wrapfunction(
- exchange, '_localphasemove', _phasemove
+ exchange, b'_localphasemove', _phasemove
)
# Copy-paste from `push` command
- path = ui.paths.getpath(dest, default=('default-push', 'default'))
+ path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
if not path:
raise error.Abort(
- _('default repository not configured!'),
- hint=_("see 'hg help config.paths'"),
+ _(b'default repository not configured!'),
+ hint=_(b"see 'hg help config.paths'"),
)
destpath = path.pushloc or path.loc
# Remote scratch bookmarks will be deleted because remotenames doesn't
@@ -812,7 +817,7 @@
if bookmark and scratchpush:
other = hg.peer(repo, opts, destpath)
fetchedbookmarks = other.listkeyspatterns(
- 'bookmarks', patterns=[bookmark]
+ b'bookmarks', patterns=[bookmark]
)
remotescratchbookmarks.update(fetchedbookmarks)
_saveremotebookmarks(repo, remotescratchbookmarks, destpath)
@@ -825,7 +830,7 @@
"""Prune remote names by removing the bookmarks we don't want anymore,
then writing the result back to disk
"""
- remotenamesext = extensions.find('remotenames')
+ remotenamesext = extensions.find(b'remotenames')
# remotename format is:
# (node, nametype ("branches" or "bookmarks"), remote, name)
@@ -840,23 +845,24 @@
remote_bm_names = [
remotename[name_idx]
for remotename in remotenames
- if remotename[nametype_idx] == "bookmarks"
+ if remotename[nametype_idx] == b"bookmarks"
]
for name in names:
if name not in remote_bm_names:
raise error.Abort(
_(
- "infinitepush bookmark '{}' does not exist " "in path '{}'"
+ b"infinitepush bookmark '{}' does not exist "
+ b"in path '{}'"
).format(name, path)
)
bookmarks = {}
branches = collections.defaultdict(list)
for node, nametype, remote, name in remotenames:
- if nametype == "bookmarks" and name not in names:
+ if nametype == b"bookmarks" and name not in names:
bookmarks[name] = node
- elif nametype == "branches":
+ elif nametype == b"branches":
# saveremotenames wants binary nodes for branches
branches[name].append(bin(node))
@@ -877,22 +883,22 @@
def partgen(pushop, bundler):
bookmark = pushop.ui.config(experimental, configbookmark)
scratchpush = pushop.ui.configbool(experimental, configscratchpush)
- if 'changesets' in pushop.stepsdone or not scratchpush:
+ if b'changesets' in pushop.stepsdone or not scratchpush:
return
if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
return
- pushop.stepsdone.add('changesets')
+ pushop.stepsdone.add(b'changesets')
if not pushop.outgoing.missing:
- pushop.ui.status(_('no changes found\n'))
+ pushop.ui.status(_(b'no changes found\n'))
pushop.cgresult = 0
return
# This parameter tells the server that the following bundle is an
# infinitepush. This let's it switch the part processing to our infinitepush
# code path.
- bundler.addparam("infinitepush", "True")
+ bundler.addparam(b"infinitepush", b"True")
scratchparts = bundleparts.getscratchbranchparts(
pushop.repo, pushop.remote, pushop.outgoing, pushop.ui, bookmark
@@ -912,15 +918,15 @@
def _getrevs(bundle, oldnode, force, bookmark):
- 'extracts and validates the revs to be imported'
- revs = [bundle[r] for r in bundle.revs('sort(bundle())')]
+ b'extracts and validates the revs to be imported'
+ revs = [bundle[r] for r in bundle.revs(b'sort(bundle())')]
# new bookmark
if oldnode is None:
return revs
# Fast forward update
- if oldnode in bundle and list(bundle.set('bundle() & %s::', oldnode)):
+ if oldnode in bundle and list(bundle.set(b'bundle() & %s::', oldnode)):
return revs
return revs
@@ -929,19 +935,19 @@
@contextlib.contextmanager
def logservicecall(logger, service, **kwargs):
start = time.time()
- logger(service, eventtype='start', **kwargs)
+ logger(service, eventtype=b'start', **kwargs)
try:
yield
logger(
service,
- eventtype='success',
+ eventtype=b'success',
elapsedms=(time.time() - start) * 1000,
**kwargs
)
except Exception as e:
logger(
service,
- eventtype='failure',
+ eventtype=b'failure',
elapsedms=(time.time() - start) * 1000,
errormsg=str(e),
**kwargs
@@ -950,13 +956,13 @@
def _getorcreateinfinitepushlogger(op):
- logger = op.records['infinitepushlogger']
+ logger = op.records[b'infinitepushlogger']
if not logger:
ui = op.repo.ui
try:
username = procutil.getuser()
except Exception:
- username = 'unknown'
+ username = b'unknown'
# Generate random request id to be able to find all logged entries
# for the same request. Since requestid is pseudo-generated it may
# not be unique, but we assume that (hostname, username, requestid)
@@ -966,13 +972,13 @@
hostname = socket.gethostname()
logger = functools.partial(
ui.log,
- 'infinitepush',
+ b'infinitepush',
user=username,
requestid=requestid,
hostname=hostname,
- reponame=ui.config('infinitepush', 'reponame'),
+ reponame=ui.config(b'infinitepush', b'reponame'),
)
- op.records.add('infinitepushlogger', logger)
+ op.records.add(b'infinitepushlogger', logger)
else:
logger = logger[0]
return logger
@@ -982,14 +988,14 @@
"""stores the incoming bundle coming from push command to the bundlestore
instead of applying on the revlogs"""
- repo.ui.status(_("storing changesets on the bundlestore\n"))
+ repo.ui.status(_(b"storing changesets on the bundlestore\n"))
bundler = bundle2.bundle20(repo.ui)
# processing each part and storing it in bundler
with bundle2.partiterator(repo, op, unbundler) as parts:
for part in parts:
bundlepart = None
- if part.type == 'replycaps':
+ if part.type == b'replycaps':
# This configures the current operation to allow reply parts.
bundle2._processpart(op, part)
else:
@@ -998,15 +1004,15 @@
bundlepart.addparam(key, value)
# Certain parts require a response
- if part.type in ('pushkey', 'changegroup'):
+ if part.type in (b'pushkey', b'changegroup'):
if op.reply is not None:
- rpart = op.reply.newpart('reply:%s' % part.type)
+ rpart = op.reply.newpart(b'reply:%s' % part.type)
rpart.addparam(
- 'in-reply-to', b'%d' % part.id, mandatory=False
+ b'in-reply-to', b'%d' % part.id, mandatory=False
)
- rpart.addparam('return', '1', mandatory=False)
+ rpart.addparam(b'return', b'1', mandatory=False)
- op.records.add(part.type, {'return': 1,})
+ op.records.add(part.type, {b'return': 1,})
if bundlepart:
bundler.addpart(bundlepart)
@@ -1031,24 +1037,24 @@
def processparts(orig, repo, op, unbundler):
# make sure we don't wrap processparts in case of `hg unbundle`
- if op.source == 'unbundle':
+ if op.source == b'unbundle':
return orig(repo, op, unbundler)
# this server routes each push to bundle store
- if repo.ui.configbool('infinitepush', 'pushtobundlestore'):
+ if repo.ui.configbool(b'infinitepush', b'pushtobundlestore'):
return storetobundlestore(orig, repo, op, unbundler)
- if unbundler.params.get('infinitepush') != 'True':
+ if unbundler.params.get(b'infinitepush') != b'True':
return orig(repo, op, unbundler)
- handleallparts = repo.ui.configbool('infinitepush', 'storeallparts')
+ handleallparts = repo.ui.configbool(b'infinitepush', b'storeallparts')
bundler = bundle2.bundle20(repo.ui)
cgparams = None
with bundle2.partiterator(repo, op, unbundler) as parts:
for part in parts:
bundlepart = None
- if part.type == 'replycaps':
+ if part.type == b'replycaps':
# This configures the current operation to allow reply parts.
bundle2._processpart(op, part)
elif part.type == bundleparts.scratchbranchparttype:
@@ -1057,18 +1063,22 @@
# when we upload to the store. Eventually those parameters will
# be put on the actual bundle instead of this part, then we can
# send a vanilla changegroup instead of the scratchbranch part.
- cgversion = part.params.get('cgversion', '01')
- bundlepart = bundle2.bundlepart('changegroup', data=part.read())
- bundlepart.addparam('version', cgversion)
+ cgversion = part.params.get(b'cgversion', b'01')
+ bundlepart = bundle2.bundlepart(
+ b'changegroup', data=part.read()
+ )
+ bundlepart.addparam(b'version', cgversion)
cgparams = part.params
# If we're not dumping all parts into the new bundle, we need to
# alert the future pushkey and phase-heads handler to skip
# the part.
if not handleallparts:
- op.records.add(scratchbranchparttype + '_skippushkey', True)
op.records.add(
- scratchbranchparttype + '_skipphaseheads', True
+ scratchbranchparttype + b'_skippushkey', True
+ )
+ op.records.add(
+ scratchbranchparttype + b'_skipphaseheads', True
)
else:
if handleallparts:
@@ -1081,18 +1091,18 @@
bundlepart.addparam(key, value)
# Certain parts require a response
- if part.type == 'pushkey':
+ if part.type == b'pushkey':
if op.reply is not None:
- rpart = op.reply.newpart('reply:pushkey')
+ rpart = op.reply.newpart(b'reply:pushkey')
rpart.addparam(
- 'in-reply-to', str(part.id), mandatory=False
+ b'in-reply-to', str(part.id), mandatory=False
)
- rpart.addparam('return', '1', mandatory=False)
+ rpart.addparam(b'return', b'1', mandatory=False)
else:
bundle2._processpart(op, part)
if handleallparts:
- op.records.add(part.type, {'return': 1,})
+ op.records.add(part.type, {b'return': 1,})
if bundlepart:
bundler.addpart(bundlepart)
@@ -1118,44 +1128,44 @@
def storebundle(op, params, bundlefile):
log = _getorcreateinfinitepushlogger(op)
parthandlerstart = time.time()
- log(scratchbranchparttype, eventtype='start')
+ log(scratchbranchparttype, eventtype=b'start')
index = op.repo.bundlestore.index
store = op.repo.bundlestore.store
- op.records.add(scratchbranchparttype + '_skippushkey', True)
+ op.records.add(scratchbranchparttype + b'_skippushkey', True)
bundle = None
try: # guards bundle
- bundlepath = "bundle:%s+%s" % (op.repo.root, bundlefile)
+ bundlepath = b"bundle:%s+%s" % (op.repo.root, bundlefile)
bundle = hg.repository(op.repo.ui, bundlepath)
- bookmark = params.get('bookmark')
- bookprevnode = params.get('bookprevnode', '')
- force = params.get('force')
+ bookmark = params.get(b'bookmark')
+ bookprevnode = params.get(b'bookprevnode', b'')
+ force = params.get(b'force')
if bookmark:
oldnode = index.getnode(bookmark)
else:
oldnode = None
- bundleheads = bundle.revs('heads(bundle())')
+ bundleheads = bundle.revs(b'heads(bundle())')
if bookmark and len(bundleheads) > 1:
raise error.Abort(
- _('cannot push more than one head to a scratch branch')
+ _(b'cannot push more than one head to a scratch branch')
)
revs = _getrevs(bundle, oldnode, force, bookmark)
# Notify the user of what is being pushed
- plural = 's' if len(revs) > 1 else ''
- op.repo.ui.warn(_("pushing %d commit%s:\n") % (len(revs), plural))
+ plural = b's' if len(revs) > 1 else b''
+ op.repo.ui.warn(_(b"pushing %d commit%s:\n") % (len(revs), plural))
maxoutput = 10
for i in range(0, min(len(revs), maxoutput)):
- firstline = bundle[revs[i]].description().split('\n')[0][:50]
- op.repo.ui.warn(" %s %s\n" % (revs[i], firstline))
+ firstline = bundle[revs[i]].description().split(b'\n')[0][:50]
+ op.repo.ui.warn(b" %s %s\n" % (revs[i], firstline))
if len(revs) > maxoutput + 1:
- op.repo.ui.warn(" ...\n")
- firstline = bundle[revs[-1]].description().split('\n')[0][:50]
- op.repo.ui.warn(" %s %s\n" % (revs[-1], firstline))
+ op.repo.ui.warn(b" ...\n")
+ firstline = bundle[revs[-1]].description().split(b'\n')[0][:50]
+ op.repo.ui.warn(b" %s %s\n" % (revs[-1], firstline))
nodesctx = [bundle[rev] for rev in revs]
inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
@@ -1170,21 +1180,21 @@
bookmarknode = nodesctx[-1].hex() if nodesctx else None
key = None
if newheadscount:
- with open(bundlefile, 'rb') as f:
+ with open(bundlefile, b'rb') as f:
bundledata = f.read()
with logservicecall(
- log, 'bundlestore', bundlesize=len(bundledata)
+ log, b'bundlestore', bundlesize=len(bundledata)
):
bundlesizelimit = 100 * 1024 * 1024 # 100 MB
if len(bundledata) > bundlesizelimit:
error_msg = (
- 'bundle is too big: %d bytes. '
- + 'max allowed size is 100 MB'
+ b'bundle is too big: %d bytes. '
+ + b'max allowed size is 100 MB'
)
raise error.Abort(error_msg % (len(bundledata),))
key = store.write(bundledata)
- with logservicecall(log, 'index', newheadscount=newheadscount), index:
+ with logservicecall(log, b'index', newheadscount=newheadscount), index:
if key:
index.addbundle(key, nodesctx)
if bookmark:
@@ -1194,14 +1204,14 @@
)
log(
scratchbranchparttype,
- eventtype='success',
+ eventtype=b'success',
elapsedms=(time.time() - parthandlerstart) * 1000,
)
except Exception as e:
log(
scratchbranchparttype,
- eventtype='failure',
+ eventtype=b'failure',
elapsedms=(time.time() - parthandlerstart) * 1000,
errormsg=str(e),
)
@@ -1213,15 +1223,21 @@
@bundle2.parthandler(
scratchbranchparttype,
- ('bookmark', 'bookprevnode', 'force', 'pushbackbookmarks', 'cgversion'),
+ (
+ b'bookmark',
+ b'bookprevnode',
+ b'force',
+ b'pushbackbookmarks',
+ b'cgversion',
+ ),
)
def bundle2scratchbranch(op, part):
'''unbundle a bundle2 part containing a changegroup to store'''
bundler = bundle2.bundle20(op.repo.ui)
- cgversion = part.params.get('cgversion', '01')
- cgpart = bundle2.bundlepart('changegroup', data=part.read())
- cgpart.addparam('version', cgversion)
+ cgversion = part.params.get(b'cgversion', b'01')
+ cgpart = bundle2.bundlepart(b'changegroup', data=part.read())
+ cgpart.addparam(b'version', cgversion)
bundler.addpart(cgpart)
buf = util.chunkbuffer(bundler.getchunks())
@@ -1244,15 +1260,15 @@
def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
- if params.get('pushbackbookmarks'):
- if op.reply and 'pushback' in op.reply.capabilities:
+ if params.get(b'pushbackbookmarks'):
+ if op.reply and b'pushback' in op.reply.capabilities:
params = {
- 'namespace': 'bookmarks',
- 'key': bookmark,
- 'new': newnode,
- 'old': oldnode,
+ b'namespace': b'bookmarks',
+ b'key': bookmark,
+ b'new': newnode,
+ b'old': oldnode,
}
- op.reply.newpart('pushkey', mandatoryparams=params.iteritems())
+ op.reply.newpart(b'pushkey', mandatoryparams=params.iteritems())
def bundle2pushkey(orig, op, part):
@@ -1261,11 +1277,11 @@
The only goal is to skip calling the original function if flag is set.
It's set if infinitepush push is happening.
'''
- if op.records[scratchbranchparttype + '_skippushkey']:
+ if op.records[scratchbranchparttype + b'_skippushkey']:
if op.reply is not None:
- rpart = op.reply.newpart('reply:pushkey')
- rpart.addparam('in-reply-to', str(part.id), mandatory=False)
- rpart.addparam('return', '1', mandatory=False)
+ rpart = op.reply.newpart(b'reply:pushkey')
+ rpart.addparam(b'in-reply-to', str(part.id), mandatory=False)
+ rpart.addparam(b'return', b'1', mandatory=False)
return 1
return orig(op, part)
@@ -1278,7 +1294,7 @@
It's set if infinitepush push is happening.
'''
- if op.records[scratchbranchparttype + '_skipphaseheads']:
+ if op.records[scratchbranchparttype + b'_skipphaseheads']:
return
return orig(op, part)
@@ -1296,13 +1312,13 @@
return
nodesargs = []
for node in nodes:
- nodesargs.append('--node')
+ nodesargs.append(b'--node')
nodesargs.append(node)
- with open(os.devnull, 'w+b') as devnull:
+ with open(os.devnull, b'w+b') as devnull:
cmdline = [
util.hgexecutable(),
- 'debugfillinfinitepushmetadata',
- '-R',
+ b'debugfillinfinitepushmetadata',
+ b'-R',
root,
] + nodesargs
# Process will run in background. We don't care about the return code