Mercurial > hg-stable
changeset 38393:b704da9a9dda
byteify-strings: drop import-line hack
This is ugly, and valid only for Python 3. We'll need to find a different
way if we want to get rid of the code transformer at all.
author | Yuya Nishihara <yuya@tcha.org> |
---|---|
date | Thu, 31 May 2018 22:31:37 +0900 |
parents | 9f42e4a83676 |
children | f701bc936e7f |
files | contrib/byteify-strings.py |
diffstat | 1 files changed, 2 insertions(+), 27 deletions(-) [+] |
line wrap: on
line diff
--- a/contrib/byteify-strings.py Thu May 31 22:28:29 2018 +0900 +++ b/contrib/byteify-strings.py Thu May 31 22:31:37 2018 +0900 @@ -12,7 +12,6 @@ import argparse import contextlib import errno -import io import os import sys import tempfile @@ -20,7 +19,7 @@ import tokenize if True: - def replacetokens(tokens, fullname): + def replacetokens(tokens): """Transform a stream of tokens from raw to Python 3. Returns a generator of possibly rewritten tokens. @@ -28,8 +27,6 @@ The input token list may be mutated as part of processing. However, its changes do not necessarily match the output token stream. """ - futureimpline = False - # The following utility functions access the tokens list and i index of # the for i, t enumerate(tokens) loop below def _isop(j, *o): @@ -111,28 +108,6 @@ yield t._replace(string='b%s' % t.string) continue - # Insert compatibility imports at "from __future__ import" line. - # No '\n' should be added to preserve line numbers. - if (t.type == token.NAME and t.string == 'import' and - all(u.type == token.NAME for u in tokens[i - 2:i]) and - [u.string for u in tokens[i - 2:i]] == ['from', '__future__']): - futureimpline = True - if t.type == token.NEWLINE and futureimpline: - futureimpline = False - if fullname == 'mercurial.pycompat': - yield t - continue - r, c = t.start - l = (b'; from mercurial.pycompat import ' - b'delattr, getattr, hasattr, setattr, xrange, ' - b'open, unicode\n') - for u in tokenize.tokenize(io.BytesIO(l).readline): - if u.type in (tokenize.ENCODING, token.ENDMARKER): - continue - yield u._replace( - start=(r, c + u.start[1]), end=(r, c + u.end[1])) - continue - # This looks like a function call. if t.type == token.NAME and _isop(i + 1, '('): fn = t.string @@ -163,7 +138,7 @@ def process(fin, fout): tokens = tokenize.tokenize(fin.readline) - tokens = replacetokens(list(tokens), fullname='<dummy>') + tokens = replacetokens(list(tokens)) fout.write(tokenize.untokenize(tokens)) def tryunlink(fname):