Mercurial > hg
view mercurial/urllibcompat.py @ 37562:e5cd8d1a094d
lfs: special case the null:// usercache instead of treating it as a url
The previous code worked on Windows, but not on Unix, and a pending patch's test
failed. The url being used was something like "/tmp/.../client1/null://",
courtesy of ui.configpath(). Looking at the doc comment, this seems like it's
maybe not the right function to call (why should a relative cache path be
expanded relative to the repo root or config file?), but largefiles has been
using it since 8b8dd13295db (Oct 2011). It was introduced in 1b591f9b7fd2 (Jan
2011) without comment or callers. A grep over the whole history shows that only
largefiles used it until lfs and infinitepush came along recently.
It looks like if the `if not os.path.isabs(v) or "://" not in v` in configpath()
is changed to an 'and', both Linux and Windows are happy. I'm guessing that
"://" is to pick off URLs, so that seems reasonable. But I'm not sure why it
isn't explicitly "file://", and I thought that "file://foo" is relative anyway.
(At least, there are doctests for file:///tmp in util.url.) There is no mention
of this setting in the help, but it is referenced on the wiki page for
largefiles. (There's no mention that this is intended to be a URL, and the
example uses an absolute path.)
I don't want this blocking the rest of the lfs server discovery stuff. It was
also wrong to allow a file:// URL here, but not in largefiles.
author | Matt Harbison <matt_harbison@yahoo.com> |
---|---|
date | Wed, 11 Apr 2018 17:29:55 -0400 |
parents | 5bc7ff103081 |
children | 5774fc623a18 |
line wrap: on
line source
# urllibcompat.py - adapters to ease using urllib2 on Py2 and urllib on Py3 # # Copyright 2017 Google, Inc. # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. from __future__ import absolute_import from . import pycompat _sysstr = pycompat.sysstr class _pycompatstub(object): def __init__(self): self._aliases = {} def _registeraliases(self, origin, items): """Add items that will be populated at the first access""" items = map(_sysstr, items) self._aliases.update( (item.replace(r'_', r'').lower(), (origin, item)) for item in items) def _registeralias(self, origin, attr, name): """Alias ``origin``.``attr`` as ``name``""" self._aliases[_sysstr(name)] = (origin, _sysstr(attr)) def __getattr__(self, name): try: origin, item = self._aliases[name] except KeyError: raise AttributeError(name) self.__dict__[name] = obj = getattr(origin, item) return obj httpserver = _pycompatstub() urlreq = _pycompatstub() urlerr = _pycompatstub() if pycompat.ispy3: import urllib.parse urlreq._registeraliases(urllib.parse, ( "splitattr", "splitpasswd", "splitport", "splituser", "urlparse", "urlunparse", )) urlreq._registeralias(urllib.parse, "parse_qs", "parseqs") urlreq._registeralias(urllib.parse, "parse_qsl", "parseqsl") urlreq._registeralias(urllib.parse, "unquote_to_bytes", "unquote") import urllib.request urlreq._registeraliases(urllib.request, ( "AbstractHTTPHandler", "BaseHandler", "build_opener", "FileHandler", "FTPHandler", "ftpwrapper", "HTTPHandler", "HTTPSHandler", "install_opener", "pathname2url", "HTTPBasicAuthHandler", "HTTPDigestAuthHandler", "HTTPPasswordMgrWithDefaultRealm", "ProxyHandler", "Request", "url2pathname", "urlopen", )) import urllib.response urlreq._registeraliases(urllib.response, ( "addclosehook", "addinfourl", )) import urllib.error urlerr._registeraliases(urllib.error, ( "HTTPError", "URLError", )) import http.server httpserver._registeraliases(http.server, ( "HTTPServer", "BaseHTTPRequestHandler", "SimpleHTTPRequestHandler", "CGIHTTPRequestHandler", )) # urllib.parse.quote() accepts both str and bytes, decodes bytes # (if necessary), and returns str. This is wonky. We provide a custom # implementation that only accepts bytes and emits bytes. def quote(s, safe=r'/'): s = urllib.parse.quote_from_bytes(s, safe=safe) return s.encode('ascii', 'strict') # urllib.parse.urlencode() returns str. We use this function to make # sure we return bytes. def urlencode(query, doseq=False): s = urllib.parse.urlencode(query, doseq=doseq) return s.encode('ascii') urlreq.quote = quote urlreq.urlencode = urlencode def getfullurl(req): return req.full_url def gethost(req): return req.host def getselector(req): return req.selector def getdata(req): return req.data def hasdata(req): return req.data is not None else: import BaseHTTPServer import CGIHTTPServer import SimpleHTTPServer import urllib2 import urllib import urlparse urlreq._registeraliases(urllib, ( "addclosehook", "addinfourl", "ftpwrapper", "pathname2url", "quote", "splitattr", "splitpasswd", "splitport", "splituser", "unquote", "url2pathname", "urlencode", )) urlreq._registeraliases(urllib2, ( "AbstractHTTPHandler", "BaseHandler", "build_opener", "FileHandler", "FTPHandler", "HTTPBasicAuthHandler", "HTTPDigestAuthHandler", "HTTPHandler", "HTTPPasswordMgrWithDefaultRealm", "HTTPSHandler", "install_opener", "ProxyHandler", "Request", "urlopen", )) urlreq._registeraliases(urlparse, ( "urlparse", "urlunparse", )) urlreq._registeralias(urlparse, "parse_qs", "parseqs") urlreq._registeralias(urlparse, "parse_qsl", "parseqsl") urlerr._registeraliases(urllib2, ( "HTTPError", "URLError", )) httpserver._registeraliases(BaseHTTPServer, ( "HTTPServer", "BaseHTTPRequestHandler", )) httpserver._registeraliases(SimpleHTTPServer, ( "SimpleHTTPRequestHandler", )) httpserver._registeraliases(CGIHTTPServer, ( "CGIHTTPRequestHandler", )) def gethost(req): return req.get_host() def getselector(req): return req.get_selector() def getfullurl(req): return req.get_full_url() def getdata(req): return req.get_data() def hasdata(req): return req.has_data()