Mercurial > hg
view contrib/byteify-strings.py @ 51977:42a116f1cdc1
branchmap-v3: introduce a "stop_rev" argument to `headsrevs`
The `headsrevs` method of the revlog already have a `revs` argument to compute
the headrevs of a limited set of heads. However, it disable the use of the
native compiled code to compute the heads, which slows down the branchmap v3
code a lot.
The branchmap v3 usage is actually quite constrained as we will always only
ignores a part at the top of the graph. So we could be significantly faster.
We start by making small change to the python side to improve the situation and
introduce the new API. More collaboration with the native code are coming later.
This massively speedup operation and close most of the remaining gaps between
branchmap-v3 and branchmap-v2. especially on repository with many revs like
mozilla-try. A small overhead remains mostly because the `headrevs` logic
currently has some inefficiently. We will look into them from there.
### benchmark.name = hg.command.unbundle
# bin-env-vars.hg.py-re2-module = default
# benchmark.variants.issue6528 = disabled
# benchmark.variants.resource-usage = default
# benchmark.variants.reuse-external-delta-parent = yes
# benchmark.variants.revs = any-1-extra-rev
# benchmark.variants.source = unbundle
# benchmark.variants.validate = default
# benchmark.variants.verbosity = quiet
## data-env-vars.name = netbeans-2018-08-01-zstd-sparse-revlog
# bin-env-vars.hg.flavor = default
branch-v2: 0.233711 ~~~~~
branch-v3 before: 0.368769 (+57.79%, +0.14)
branch-v3 after: 0.239857 (+2.63%, +0.01)
# bin-env-vars.hg.flavor = rust
branch-v2: 0.235230 ~~~~~
branch-v3 before: 0.372460 (+58.34%, +0.14)
branch-v3 after: 0.240972 (+2.44%, +0.01)
## data-env-vars.name = netbeans-2018-08-01-ds2-pnm
# bin-env-vars.hg.flavor = rust
branch-v2: 0.255586 ~~~~~
branch-v3 before: 0.318907 (+24.78%, +0.06)
branch-v3 after: 0.268560 (+5.08%, +0.01)
## data-env-vars.name = mozilla-central-2024-03-22-zstd-sparse-revlog
# bin-env-vars.hg.flavor = default
branch-v2: 0.339010 ~~~~~
branch-v3 before: 0.349752 (+3.17%, +0.01)
branch-v3 after: 0.349389 (+3.06%, +0.01)
# bin-env-vars.hg.flavor = rust
branch-v2: 0.346525 ~~~~~
branch-v3 before: 0.354300 (+2.24%, +0.01)
branch-v3 after: 0.355661 (+2.64%, +0.01)
## data-env-vars.name = mozilla-central-2024-03-22-ds2-pnm
# bin-env-vars.hg.flavor = rust
branch-v2: 0.380202 ~~~~~
branch-v3 before: 0.396293 (+4.23%, +0.02)
branch-v3 after: 0.408851 (+7.54%, +0.03)
## data-env-vars.name = mozilla-unified-2024-03-22-zstd-sparse-revlog
# bin-env-vars.hg.flavor = default
branch-v2: 0.412165 ~~~~~
branch-v3 before: 0.424769 (+3.06%, +0.01)
branch-v3 after: 0.427782 (+3.79%, +0.02)
# bin-env-vars.hg.flavor = rust
branch-v2: 0.412397 ~~~~~
branch-v3 before: 0.421796 (+2.28%, +0.01)
branch-v3 after: 0.422354 (+2.41%, +0.01)
## data-env-vars.name = mozilla-unified-2024-03-22-ds2-pnm
# bin-env-vars.hg.flavor = rust
branch-v2: 0.429501 ~~~~~
branch-v3 before: 0.443849 (+3.34%, +0.01)
branch-v3 after: 0.443197 (+3.19%, +0.01)
## data-env-vars.name = mozilla-try-2024-03-26-zstd-sparse-revlog
# bin-env-vars.hg.flavor = default
branch-v2: 3.403171 ~~~~~
branch-v3 before: 6.234055 (+83.18%, +2.83)
branch-v3 after: 3.819477 (+12.23%, +0.42)
# bin-env-vars.hg.flavor = rust
branch-v2: 3.454876 ~~~~~
branch-v3 before: 6.307813 (+82.58%, +2.85)
branch-v3 after: 3.590284 (+3.92%, +0.14)
## data-env-vars.name = mozilla-try-2024-03-26-ds2-pnm
# bin-env-vars.hg.flavor = rust
branch-v2: 3.465435 ~~~~~
branch-v3 before: 5.176076 (+49.36%, +1.71)
branch-v3 after: 3.633278 (+4.84%, +0.17)
author | Pierre-Yves David <pierre-yves.david@octobus.net> |
---|---|
date | Tue, 03 Sep 2024 11:11:17 +0200 |
parents | 8250ecb53f30 |
children |
line wrap: on
line source
#!/usr/bin/env python3 # # byteify-strings.py - transform string literals to be Python 3 safe # # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. import argparse import contextlib import errno import os import sys import tempfile import token import tokenize def adjusttokenpos(t, ofs): """Adjust start/end column of the given token""" return t._replace( start=(t.start[0], t.start[1] + ofs), end=(t.end[0], t.end[1] + ofs) ) def replacetokens(tokens, opts): """Transform a stream of tokens from raw to Python 3. Returns a generator of possibly rewritten tokens. The input token list may be mutated as part of processing. However, its changes do not necessarily match the output token stream. """ sysstrtokens = set() # The following utility functions access the tokens list and i index of # the for i, t enumerate(tokens) loop below def _isop(j, *o): """Assert that tokens[j] is an OP with one of the given values""" try: return tokens[j].type == token.OP and tokens[j].string in o except IndexError: return False def _findargnofcall(n): """Find arg n of a call expression (start at 0) Returns index of the first token of that argument, or None if there is not that many arguments. Assumes that token[i + 1] is '('. """ nested = 0 for j in range(i + 2, len(tokens)): if _isop(j, ')', ']', '}'): # end of call, tuple, subscription or dict / set nested -= 1 if nested < 0: return None elif n == 0: # this is the starting position of arg return j elif _isop(j, '(', '[', '{'): nested += 1 elif _isop(j, ',') and nested == 0: n -= 1 return None def _ensuresysstr(j): """Make sure the token at j is a system string Remember the given token so the string transformer won't add the byte prefix. Ignores tokens that are not strings. Assumes bounds checking has already been done. """ k = j currtoken = tokens[k] while currtoken.type in (token.STRING, token.NEWLINE, tokenize.NL): k += 1 if currtoken.type == token.STRING and currtoken.string.startswith( ("'", '"') ): sysstrtokens.add(currtoken) try: currtoken = tokens[k] except IndexError: break def _isitemaccess(j): """Assert the next tokens form an item access on `tokens[j]` and that `tokens[j]` is a name. """ try: return ( tokens[j].type == token.NAME and _isop(j + 1, '[') and tokens[j + 2].type == token.STRING and _isop(j + 3, ']') ) except IndexError: return False def _ismethodcall(j, *methodnames): """Assert the next tokens form a call to `methodname` with a string as first argument on `tokens[j]` and that `tokens[j]` is a name. """ try: return ( tokens[j].type == token.NAME and _isop(j + 1, '.') and tokens[j + 2].type == token.NAME and tokens[j + 2].string in methodnames and _isop(j + 3, '(') and tokens[j + 4].type == token.STRING ) except IndexError: return False coldelta = 0 # column increment for new opening parens coloffset = -1 # column offset for the current line (-1: TBD) parens = [(0, 0, 0, -1)] # stack of (line, end-column, column-offset, type) ignorenextline = False # don't transform the next line insideignoreblock = False # don't transform until turned off for i, t in enumerate(tokens): # Compute the column offset for the current line, such that # the current line will be aligned to the last opening paren # as before. if coloffset < 0: lastparen = parens[-1] if t.start[1] == lastparen[1]: coloffset = lastparen[2] elif t.start[1] + 1 == lastparen[1] and lastparen[3] not in ( token.NEWLINE, tokenize.NL, ): # fix misaligned indent of s/util.Abort/error.Abort/ coloffset = lastparen[2] + (lastparen[1] - t.start[1]) else: coloffset = 0 # Reset per-line attributes at EOL. if t.type in (token.NEWLINE, tokenize.NL): yield adjusttokenpos(t, coloffset) coldelta = 0 coloffset = -1 if not insideignoreblock: ignorenextline = ( tokens[i - 1].type == token.COMMENT and tokens[i - 1].string == "# no-py3-transform" ) continue if t.type == token.COMMENT: if t.string == "# py3-transform: off": insideignoreblock = True if t.string == "# py3-transform: on": insideignoreblock = False if ignorenextline or insideignoreblock: yield adjusttokenpos(t, coloffset) continue # Remember the last paren position. if _isop(i, '(', '[', '{'): parens.append(t.end + (coloffset + coldelta, tokens[i + 1].type)) elif _isop(i, ')', ']', '}'): parens.pop() # Convert most string literals to byte literals. String literals # in Python 2 are bytes. String literals in Python 3 are unicode. # Most strings in Mercurial are bytes and unicode strings are rare. # Rather than rewrite all string literals to use ``b''`` to indicate # byte strings, we apply this token transformer to insert the ``b`` # prefix nearly everywhere. if t.type == token.STRING and t not in sysstrtokens: s = t.string # Preserve docstrings as string literals. This is inconsistent # with regular unprefixed strings. However, the # "from __future__" parsing (which allows a module docstring to # exist before it) doesn't properly handle the docstring if it # is b''' prefixed, leading to a SyntaxError. We leave all # docstrings as unprefixed to avoid this. This means Mercurial # components touching docstrings need to handle unicode, # unfortunately. if s[0:3] in ("'''", '"""'): # If it's assigned to something, it's not a docstring if not _isop(i - 1, '='): yield adjusttokenpos(t, coloffset) continue # If the first character isn't a quote, it is likely a string # prefixing character (such as 'b', 'u', or 'r'. Ignore. if s[0] not in ("'", '"'): yield adjusttokenpos(t, coloffset) continue # String literal. Prefix to make a b'' string. yield adjusttokenpos(t._replace(string='b%s' % t.string), coloffset) coldelta += 1 continue # This looks like a function call. if t.type == token.NAME and _isop(i + 1, '('): fn = t.string # *attr() builtins don't accept byte strings to 2nd argument. if fn in ( 'getattr', 'setattr', 'hasattr', 'safehasattr', 'wrapfunction', 'wrapclass', 'addattr', ): arg1idx = _findargnofcall(1) if arg1idx is not None: _ensuresysstr(arg1idx) # .encode() and .decode() on str/bytes/unicode don't accept # byte strings on Python 3. elif fn in ('encode', 'decode') and _isop(i - 1, '.'): for argn in range(2): argidx = _findargnofcall(argn) if argidx is not None: _ensuresysstr(argidx) # It changes iteritems/values to items/values as they are not # present in Python 3 world. elif opts['dictiter'] and fn in ('iteritems', 'itervalues'): yield adjusttokenpos(t._replace(string=fn[4:]), coloffset) continue if t.type == token.NAME and t.string in opts['treat-as-kwargs']: if _isitemaccess(i): _ensuresysstr(i + 2) if _ismethodcall(i, 'get', 'pop', 'setdefault', 'popitem'): _ensuresysstr(i + 4) # Looks like "if __name__ == '__main__'". if ( t.type == token.NAME and t.string == '__name__' and _isop(i + 1, '==') ): _ensuresysstr(i + 2) # Emit unmodified token. yield adjusttokenpos(t, coloffset) def process(fin, fout, opts): tokens = tokenize.tokenize(fin.readline) tokens = replacetokens(list(tokens), opts) fout.write(tokenize.untokenize(tokens)) def tryunlink(fname): try: os.unlink(fname) except OSError as err: if err.errno != errno.ENOENT: raise @contextlib.contextmanager def editinplace(fname): n = os.path.basename(fname) d = os.path.dirname(fname) fp = tempfile.NamedTemporaryFile( prefix='.%s-' % n, suffix='~', dir=d, delete=False ) try: yield fp fp.close() if os.name == 'nt': tryunlink(fname) os.rename(fp.name, fname) finally: fp.close() tryunlink(fp.name) def main(): ap = argparse.ArgumentParser() ap.add_argument( '--version', action='version', version='Byteify strings 1.0' ) ap.add_argument( '-i', '--inplace', action='store_true', default=False, help='edit files in place', ) ap.add_argument( '--dictiter', action='store_true', default=False, help='rewrite iteritems() and itervalues()', ), ap.add_argument( '--treat-as-kwargs', nargs="+", default=[], help="ignore kwargs-like objects", ), ap.add_argument('files', metavar='FILE', nargs='+', help='source file') args = ap.parse_args() opts = { 'dictiter': args.dictiter, 'treat-as-kwargs': set(args.treat_as_kwargs), } for fname in args.files: fname = os.path.realpath(fname) if args.inplace: with editinplace(fname) as fout: with open(fname, 'rb') as fin: process(fin, fout, opts) else: with open(fname, 'rb') as fin: fout = sys.stdout.buffer process(fin, fout, opts) if __name__ == '__main__': if sys.version_info[0:2] < (3, 7): print('This script must be run under Python 3.7+') sys.exit(3) main()