changeset 37564:31a4ea773369

lfs: infer the blob store URL from an explicit push dest or default-push Unlike pull, the blobs are uploaded within the exchange.push() window, so simply wrap it and swap in a properly configured remote store. The '_subtoppath' field shouldn't be available during this window, but give the passed path priority for clarity. At one point I hit an AttributeError in one of the convert tests when trying to save the original remote blobstore when the swap was run unconditionally. I wrapped it in a util.safehasattr(), but then today I wasn't able to reproduce it. But now the whole thing is tucked under the requirement guard because without the requirement, there are no blobs in the repo, even if the extension is loaded.
author Matt Harbison <matt_harbison@yahoo.com>
date Sun, 08 Apr 2018 14:22:12 -0400
parents be1cc65bdb1c
children 9c7a25ef5b49
files hgext/lfs/__init__.py hgext/lfs/blobstore.py hgext/lfs/wrapper.py tests/test-lfs-serve.t tests/test-lfs-test-server.t
diffstat 5 files changed, 41 insertions(+), 8 deletions(-) [+]
line wrap: on
line diff
--- a/hgext/lfs/__init__.py	Sun Apr 08 01:23:39 2018 -0400
+++ b/hgext/lfs/__init__.py	Sun Apr 08 14:22:12 2018 -0400
@@ -87,9 +87,9 @@
     #   git-lfs endpoint
     # - file:///tmp/path
     #   local filesystem, usually for testing
-    # if unset, lfs will assume the repository at ``paths.default`` also handles
-    # blob storage for http(s) URLs.  Otherwise, lfs will prompt to set this
-    # when it must use this value.
+    # if unset, lfs will assume the remote repository also handles blob storage
+    # for http(s) URLs.  Otherwise, lfs will prompt to set this when it must
+    # use this value.
     # (default: unset)
     url = https://example.com/repo.git/info/lfs
 
--- a/hgext/lfs/blobstore.py	Sun Apr 08 01:23:39 2018 -0400
+++ b/hgext/lfs/blobstore.py	Sun Apr 08 14:22:12 2018 -0400
@@ -529,7 +529,7 @@
         raise error.Abort(_('detected corrupt lfs object: %s') % oid,
                           hint=_('run hg verify'))
 
-def remote(repo):
+def remote(repo, remote=None):
     """remotestore factory. return a store in _storemap depending on config
 
     If ``lfs.url`` is specified, use that remote endpoint.  Otherwise, try to
@@ -541,7 +541,9 @@
     """
     url = util.url(repo.ui.config('lfs', 'url') or '')
     if url.scheme is None:
-        if util.safehasattr(repo, '_subtoppath'):
+        if remote:
+            defaulturl = util.url(remote)
+        elif util.safehasattr(repo, '_subtoppath'):
             # The pull command sets this during the optional update phase, which
             # tells exactly where the pull originated, whether 'paths.default'
             # or explicit.
--- a/hgext/lfs/wrapper.py	Sun Apr 08 01:23:39 2018 -0400
+++ b/hgext/lfs/wrapper.py	Sun Apr 08 14:22:12 2018 -0400
@@ -289,7 +289,8 @@
     return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
 
 def push(orig, repo, remote, *args, **kwargs):
-    """bail on push if the extension isn't enabled on remote when needed"""
+    """bail on push if the extension isn't enabled on remote when needed, and
+    update the remote store based on the destination path."""
     if 'lfs' in repo.requirements:
         # If the remote peer is for a local repo, the requirement tests in the
         # base class method enforce lfs support.  Otherwise, some revisions in
@@ -300,7 +301,18 @@
             m = _("required features are not supported in the destination: %s")
             raise error.Abort(m % 'lfs',
                               hint=_('enable the lfs extension on the server'))
-    return orig(repo, remote, *args, **kwargs)
+
+        # Repositories where this extension is disabled won't have the field.
+        # But if there's a requirement, then the extension must be loaded AND
+        # there may be blobs to push.
+        remotestore = repo.svfs.lfsremoteblobstore
+        try:
+            repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url())
+            return orig(repo, remote, *args, **kwargs)
+        finally:
+            repo.svfs.lfsremoteblobstore = remotestore
+    else:
+        return orig(repo, remote, *args, **kwargs)
 
 def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing,
                    *args, **kwargs):
--- a/tests/test-lfs-serve.t	Sun Apr 08 01:23:39 2018 -0400
+++ b/tests/test-lfs-serve.t	Sun Apr 08 14:22:12 2018 -0400
@@ -242,7 +242,23 @@
   $ echo 'this is another lfs file' > lfs2.txt
   $ hg ci -Aqm 'lfs file with lfs client'
 
-  $ hg push -q
+  $ hg --config paths.default= push -v http://localhost:$HGPORT
+  pushing to http://localhost:$HGPORT/
+  lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
+  searching for changes
+  remote has heads on branch 'default' that are not known locally: 8374dc4052cb
+  lfs: uploading a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de (25 bytes)
+  lfs: processed: a82f1c5cea0d40e3bb3a849686bb4e6ae47ca27e614de55c1ed0325698ef68de
+  lfs: uploaded 1 files (25 bytes)
+  1 changesets found
+  uncompressed size of bundle content:
+       206 (changelog)
+       172 (manifests)
+       275  lfs2.txt
+  remote: adding changesets
+  remote: adding manifests
+  remote: adding file changes
+  remote: added 1 changesets with 1 changes to 1 files
   $ grep 'lfs' .hg/requires $SERVER_REQUIRES
   .hg/requires:lfs
   $TESTTMP/server/.hg/requires:lfs
--- a/tests/test-lfs-test-server.t	Sun Apr 08 01:23:39 2018 -0400
+++ b/tests/test-lfs-test-server.t	Sun Apr 08 14:22:12 2018 -0400
@@ -74,6 +74,7 @@
   http auth: user foo, password ***
   pushing to ../repo2
   http auth: user foo, password ***
+  http auth: user foo, password ***
   query 1; heads
   searching for changes
   1 total queries in *s (glob)
@@ -209,6 +210,7 @@
   http auth: user foo, password ***
   pushing to ../repo1
   http auth: user foo, password ***
+  http auth: user foo, password ***
   query 1; heads
   searching for changes
   all remote heads known locally
@@ -477,6 +479,7 @@
   http auth: user foo, password ***
   pushing to ../repo1
   http auth: user foo, password ***
+  http auth: user foo, password ***
   query 1; heads
   searching for changes
   all remote heads known locally