view mercurial/encoding.py @ 12570:a72c5ff1260c stable

Correct Content-Type header values for archive downloads. The content type for both .tar.gz and .tar.bz2 downloads was application/x-tar, which is correct for .tar files when no Content-Encoding is present, but is not correct for .tar.gz and .tar.bz2 files unless Content-Encoding is set to gzip or x-bzip2, respectively. However, setting Content-Encoding causes browsers to undo that encoding during download, when a .gz or .bz2 file is usually the desired artifact. Omitting the Content-Encoding header is preferred to avoid having browsers uncompress non-render-able files. Additionally, the Content-Disposition line indicates a final desired filename with .tar.gz or .tar.bz2 extension which makes providing a Content-Encoding header inappropriate. With the current configuration browsers (Chrome and Firefox thus far) are registering the application/x-tar Content-Type and not .tar extension and appending that extension, yielding filename.tar.gz.tar as a final on-disk artifact. This was originally reported here: http://stackoverflow.com/questions/3753659 I've changed the .tar.gz and .tar.bz2 Content-Type values to application/x-gzip and application/x-bzip2, respectively. Which yields correctly named download artifacts on Firefox, Chrome, and IE.
author Ry4an Brase <ry4an-hg@ry4an.org>
date Mon, 20 Sep 2010 14:56:08 -0500
parents 2be70ca17311
children c327bfa5e831
line wrap: on
line source

# encoding.py - character transcoding support for Mercurial
#
#  Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.

import error
import sys, unicodedata, locale, os

def _getpreferredencoding():
    '''
    On darwin, getpreferredencoding ignores the locale environment and
    always returns mac-roman. http://bugs.python.org/issue6202 fixes this
    for Python 2.7 and up. This is the same corrected code for earlier
    Python versions.

    However, we can't use a version check for this method, as some distributions 
    patch Python to fix this. Instead, we use it as a 'fixer' for the mac-roman
    encoding, as it is unlikely that this encoding is the actually expected.
    '''
    try:
        locale.CODESET
    except AttributeError:
        # Fall back to parsing environment variables :-(
        return locale.getdefaultlocale()[1]

    oldloc = locale.setlocale(locale.LC_CTYPE)
    locale.setlocale(locale.LC_CTYPE, "")
    result = locale.nl_langinfo(locale.CODESET)
    locale.setlocale(locale.LC_CTYPE, oldloc)

    return result

_encodingfixers = {
    '646': lambda: 'ascii',
    'ANSI_X3.4-1968': lambda: 'ascii',
    'mac-roman': _getpreferredencoding
}

try:
    encoding = os.environ.get("HGENCODING")
    if not encoding:
        encoding = locale.getpreferredencoding() or 'ascii'
        encoding = _encodingfixers.get(encoding, lambda: encoding)()
except locale.Error:
    encoding = 'ascii'
encodingmode = os.environ.get("HGENCODINGMODE", "strict")
fallbackencoding = 'ISO-8859-1'

def tolocal(s):
    """
    Convert a string from internal UTF-8 to local encoding

    All internal strings should be UTF-8 but some repos before the
    implementation of locale support may contain latin1 or possibly
    other character sets. We attempt to decode everything strictly
    using UTF-8, then Latin-1, and failing that, we use UTF-8 and
    replace unknown characters.
    """
    for e in ('UTF-8', fallbackencoding):
        try:
            u = s.decode(e) # attempt strict decoding
            return u.encode(encoding, "replace")
        except LookupError, k:
            raise error.Abort("%s, please check your locale settings" % k)
        except UnicodeDecodeError:
            pass
    u = s.decode("utf-8", "replace") # last ditch
    return u.encode(encoding, "replace")

def fromlocal(s):
    """
    Convert a string from the local character encoding to UTF-8

    We attempt to decode strings using the encoding mode set by
    HGENCODINGMODE, which defaults to 'strict'. In this mode, unknown
    characters will cause an error message. Other modes include
    'replace', which replaces unknown characters with a special
    Unicode character, and 'ignore', which drops the character.
    """
    try:
        return s.decode(encoding, encodingmode).encode("utf-8")
    except UnicodeDecodeError, inst:
        sub = s[max(0, inst.start - 10):inst.start + 10]
        raise error.Abort("decoding near '%s': %s!" % (sub, inst))
    except LookupError, k:
        raise error.Abort("%s, please check your locale settings" % k)

def colwidth(s):
    "Find the column width of a UTF-8 string for display"
    d = s.decode(encoding, 'replace')
    if hasattr(unicodedata, 'east_asian_width'):
        w = unicodedata.east_asian_width
        return sum([w(c) in 'WFA' and 2 or 1 for c in d])
    return len(d)