highlight: fixes garbled text in non-UTF-8 environment
This patch treats all files inside repository as encoded by
locale's encoding when pygmentize.
We can assume that most files are written in locale's encoding,
but current implementation treats them as UTF-8.
So there's no way to specify the encoding of files.
Current implementation,
db7557359636 (
issue1341):
1. Convert original `text`, which is treated as UTF-8, to locale's encoding.
`encoding.tolocal()` is the method to convert from internal UTF-8 to local.
If original `text` is not UTF-8, e.g. Japanese EUC-JP, some characters
become garbled here.
2. pygmentize, with no UnicodeDecodeError.
This patch:
1. Convert original `text`, which is treated as locale's encoding, to unicode.
Pygments prefers unicode object than raw str. [1]_
If original `text` is not encoded by locale's encoding, some characters
become garbled here.
2. pygmentize, also with no UnicodeDecodeError :)
3. Convert unicode back to raw str, which is encoded by locale's.
.. [1] http://pygments.org/docs/unicode/
--- a/hgext/highlight/highlight.py Mon Aug 31 10:58:33 2009 -0500
+++ b/hgext/highlight/highlight.py Sat Aug 29 15:24:15 2009 +0900
@@ -32,26 +32,27 @@
if util.binary(text):
return
- # avoid UnicodeDecodeError in pygments
- text = encoding.tolocal(text)
+ # Pygments is best used with Unicode strings:
+ # <http://pygments.org/docs/unicode/>
+ text = text.decode(encoding.encoding, 'replace')
# To get multi-line strings right, we can't format line-by-line
try:
- lexer = guess_lexer_for_filename(fctx.path(), text[:1024],
- encoding=encoding.encoding)
+ lexer = guess_lexer_for_filename(fctx.path(), text[:1024])
except (ClassNotFound, ValueError):
try:
- lexer = guess_lexer(text[:1024], encoding=encoding.encoding)
+ lexer = guess_lexer(text[:1024])
except (ClassNotFound, ValueError):
- lexer = TextLexer(encoding=encoding.encoding)
+ lexer = TextLexer()
- formatter = HtmlFormatter(style=style, encoding=encoding.encoding)
+ formatter = HtmlFormatter(style=style)
colorized = highlight(text, lexer, formatter)
# strip wrapping div
colorized = colorized[:colorized.find('\n</pre>')]
colorized = colorized[colorized.find('<pre>')+5:]
- coloriter = iter(colorized.splitlines())
+ coloriter = (s.encode(encoding.encoding, 'replace')
+ for s in colorized.splitlines())
tmpl.filters['colorize'] = lambda x: coloriter.next()
--- a/tests/test-highlight Mon Aug 31 10:58:33 2009 -0500
+++ b/tests/test-highlight Sat Aug 29 15:24:15 2009 +0900
@@ -121,3 +121,28 @@
echo % errors encountered
cat errors.log
+
+cd ..
+hg init eucjp
+cd eucjp
+
+printf '\265\376\n' >> eucjp.txt # Japanese kanji "Kyo"
+
+hg ci -Ama
+
+hgserveget () {
+ "$TESTDIR/killdaemons.py"
+ echo % HGENCODING="$1" hg serve
+ HGENCODING="$1" hg serve -p $HGPORT -d -n test --pid-file=hg.pid -E errors.log
+ cat hg.pid >> $DAEMON_PIDS
+
+ echo % hgweb filerevision, html
+ "$TESTDIR/get-with-headers.py" localhost:$HGPORT "/file/tip/$2" \
+ | grep '<div class="parity0 source">' | $TESTDIR/printrepr.py
+ echo % errors encountered
+ cat errors.log
+}
+
+hgserveget euc-jp eucjp.txt
+hgserveget utf-8 eucjp.txt
+hgserveget us-ascii eucjp.txt
--- a/tests/test-highlight.out Mon Aug 31 10:58:33 2009 -0500
+++ b/tests/test-highlight.out Sat Aug 29 15:24:15 2009 +0900
@@ -538,3 +538,16 @@
/* pygments_style = fruity */
% errors encountered
+adding eucjp.txt
+% HGENCODING=euc-jp hg serve
+% hgweb filerevision, html
+<div class="parity0 source"><a href="#l1" id="l1"> 1</a> \xb5\xfe</div>
+% errors encountered
+% HGENCODING=utf-8 hg serve
+% hgweb filerevision, html
+<div class="parity0 source"><a href="#l1" id="l1"> 1</a> \xef\xbf\xbd\xef\xbf\xbd</div>
+% errors encountered
+% HGENCODING=us-ascii hg serve
+% hgweb filerevision, html
+<div class="parity0 source"><a href="#l1" id="l1"> 1</a> ??</div>
+% errors encountered