45 pointer, |
43 pointer, |
46 ) |
44 ) |
47 |
45 |
48 eh = exthelper.exthelper() |
46 eh = exthelper.exthelper() |
49 |
47 |
|
48 |
50 @eh.wrapfunction(localrepo, 'makefilestorage') |
49 @eh.wrapfunction(localrepo, 'makefilestorage') |
51 def localrepomakefilestorage(orig, requirements, features, **kwargs): |
50 def localrepomakefilestorage(orig, requirements, features, **kwargs): |
52 if b'lfs' in requirements: |
51 if b'lfs' in requirements: |
53 features.add(repository.REPO_FEATURE_LFS) |
52 features.add(repository.REPO_FEATURE_LFS) |
54 |
53 |
55 return orig(requirements=requirements, features=features, **kwargs) |
54 return orig(requirements=requirements, features=features, **kwargs) |
|
55 |
56 |
56 |
57 @eh.wrapfunction(changegroup, 'allsupportedversions') |
57 @eh.wrapfunction(changegroup, 'allsupportedversions') |
58 def allsupportedversions(orig, ui): |
58 def allsupportedversions(orig, ui): |
59 versions = orig(ui) |
59 versions = orig(ui) |
60 versions.add('03') |
60 versions.add('03') |
61 return versions |
61 return versions |
|
62 |
62 |
63 |
63 @eh.wrapfunction(wireprotov1server, '_capabilities') |
64 @eh.wrapfunction(wireprotov1server, '_capabilities') |
64 def _capabilities(orig, repo, proto): |
65 def _capabilities(orig, repo, proto): |
65 '''Wrap server command to announce lfs server capability''' |
66 '''Wrap server command to announce lfs server capability''' |
66 caps = orig(repo, proto) |
67 caps = orig(repo, proto) |
97 |
100 |
98 # pack hg filelog metadata |
101 # pack hg filelog metadata |
99 hgmeta = {} |
102 hgmeta = {} |
100 for k in p.keys(): |
103 for k in p.keys(): |
101 if k.startswith('x-hg-'): |
104 if k.startswith('x-hg-'): |
102 name = k[len('x-hg-'):] |
105 name = k[len('x-hg-') :] |
103 hgmeta[name] = p[k] |
106 hgmeta[name] = p[k] |
104 if hgmeta or text.startswith('\1\n'): |
107 if hgmeta or text.startswith('\1\n'): |
105 text = storageutil.packmeta(hgmeta, text) |
108 text = storageutil.packmeta(hgmeta, text) |
106 |
109 |
107 return (text, True, {}) |
110 return (text, True, {}) |
|
111 |
108 |
112 |
109 def writetostore(self, text, sidedata): |
113 def writetostore(self, text, sidedata): |
110 # hg filelog metadata (includes rename, etc) |
114 # hg filelog metadata (includes rename, etc) |
111 hgmeta, offset = storageutil.parsemeta(text) |
115 hgmeta, offset = storageutil.parsemeta(text) |
112 if offset and offset > 0: |
116 if offset and offset > 0: |
133 for k, v in hgmeta.iteritems(): |
137 for k, v in hgmeta.iteritems(): |
134 metadata['x-hg-%s' % k] = v |
138 metadata['x-hg-%s' % k] = v |
135 |
139 |
136 rawtext = metadata.serialize() |
140 rawtext = metadata.serialize() |
137 return (rawtext, False) |
141 return (rawtext, False) |
|
142 |
138 |
143 |
139 def _islfs(rlog, node=None, rev=None): |
144 def _islfs(rlog, node=None, rev=None): |
140 if rev is None: |
145 if rev is None: |
141 if node is None: |
146 if node is None: |
142 # both None - likely working copy content where node is not ready |
147 # both None - likely working copy content where node is not ready |
147 if node == nullid: |
152 if node == nullid: |
148 return False |
153 return False |
149 flags = rlog._revlog.flags(rev) |
154 flags = rlog._revlog.flags(rev) |
150 return bool(flags & revlog.REVIDX_EXTSTORED) |
155 return bool(flags & revlog.REVIDX_EXTSTORED) |
151 |
156 |
|
157 |
152 # Wrapping may also be applied by remotefilelog |
158 # Wrapping may also be applied by remotefilelog |
153 def filelogaddrevision(orig, self, text, transaction, link, p1, p2, |
159 def filelogaddrevision( |
154 cachedelta=None, node=None, |
160 orig, |
155 flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds): |
161 self, |
|
162 text, |
|
163 transaction, |
|
164 link, |
|
165 p1, |
|
166 p2, |
|
167 cachedelta=None, |
|
168 node=None, |
|
169 flags=revlog.REVIDX_DEFAULT_FLAGS, |
|
170 **kwds |
|
171 ): |
156 # The matcher isn't available if reposetup() wasn't called. |
172 # The matcher isn't available if reposetup() wasn't called. |
157 lfstrack = self._revlog.opener.options.get('lfstrack') |
173 lfstrack = self._revlog.opener.options.get('lfstrack') |
158 |
174 |
159 if lfstrack: |
175 if lfstrack: |
160 textlen = len(text) |
176 textlen = len(text) |
164 textlen -= offset |
180 textlen -= offset |
165 |
181 |
166 if lfstrack(self._revlog.filename, textlen): |
182 if lfstrack(self._revlog.filename, textlen): |
167 flags |= revlog.REVIDX_EXTSTORED |
183 flags |= revlog.REVIDX_EXTSTORED |
168 |
184 |
169 return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta, |
185 return orig( |
170 node=node, flags=flags, **kwds) |
186 self, |
|
187 text, |
|
188 transaction, |
|
189 link, |
|
190 p1, |
|
191 p2, |
|
192 cachedelta=cachedelta, |
|
193 node=node, |
|
194 flags=flags, |
|
195 **kwds |
|
196 ) |
|
197 |
171 |
198 |
172 # Wrapping may also be applied by remotefilelog |
199 # Wrapping may also be applied by remotefilelog |
173 def filelogrenamed(orig, self, node): |
200 def filelogrenamed(orig, self, node): |
174 if _islfs(self, node): |
201 if _islfs(self, node): |
175 rawtext = self._revlog.rawdata(node) |
202 rawtext = self._revlog.rawdata(node) |
180 return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev']) |
207 return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev']) |
181 else: |
208 else: |
182 return False |
209 return False |
183 return orig(self, node) |
210 return orig(self, node) |
184 |
211 |
|
212 |
185 # Wrapping may also be applied by remotefilelog |
213 # Wrapping may also be applied by remotefilelog |
186 def filelogsize(orig, self, rev): |
214 def filelogsize(orig, self, rev): |
187 if _islfs(self, rev=rev): |
215 if _islfs(self, rev=rev): |
188 # fast path: use lfs metadata to answer size |
216 # fast path: use lfs metadata to answer size |
189 rawtext = self._revlog.rawdata(rev) |
217 rawtext = self._revlog.rawdata(rev) |
190 metadata = pointer.deserialize(rawtext) |
218 metadata = pointer.deserialize(rawtext) |
191 return int(metadata['size']) |
219 return int(metadata['size']) |
192 return orig(self, rev) |
220 return orig(self, rev) |
|
221 |
193 |
222 |
194 @eh.wrapfunction(context.basefilectx, 'cmp') |
223 @eh.wrapfunction(context.basefilectx, 'cmp') |
195 def filectxcmp(orig, self, fctx): |
224 def filectxcmp(orig, self, fctx): |
196 """returns True if text is different than fctx""" |
225 """returns True if text is different than fctx""" |
197 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs |
226 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs |
200 p1 = pointer.deserialize(self.rawdata()) |
229 p1 = pointer.deserialize(self.rawdata()) |
201 p2 = pointer.deserialize(fctx.rawdata()) |
230 p2 = pointer.deserialize(fctx.rawdata()) |
202 return p1.oid() != p2.oid() |
231 return p1.oid() != p2.oid() |
203 return orig(self, fctx) |
232 return orig(self, fctx) |
204 |
233 |
|
234 |
205 @eh.wrapfunction(context.basefilectx, 'isbinary') |
235 @eh.wrapfunction(context.basefilectx, 'isbinary') |
206 def filectxisbinary(orig, self): |
236 def filectxisbinary(orig, self): |
207 if self.islfs(): |
237 if self.islfs(): |
208 # fast path: use lfs metadata to answer isbinary |
238 # fast path: use lfs metadata to answer isbinary |
209 metadata = pointer.deserialize(self.rawdata()) |
239 metadata = pointer.deserialize(self.rawdata()) |
210 # if lfs metadata says nothing, assume it's binary by default |
240 # if lfs metadata says nothing, assume it's binary by default |
211 return bool(int(metadata.get('x-is-binary', 1))) |
241 return bool(int(metadata.get('x-is-binary', 1))) |
212 return orig(self) |
242 return orig(self) |
213 |
243 |
|
244 |
214 def filectxislfs(self): |
245 def filectxislfs(self): |
215 return _islfs(self.filelog(), self.filenode()) |
246 return _islfs(self.filelog(), self.filenode()) |
|
247 |
216 |
248 |
217 @eh.wrapfunction(cmdutil, '_updatecatformatter') |
249 @eh.wrapfunction(cmdutil, '_updatecatformatter') |
218 def _updatecatformatter(orig, fm, ctx, matcher, path, decode): |
250 def _updatecatformatter(orig, fm, ctx, matcher, path, decode): |
219 orig(fm, ctx, matcher, path, decode) |
251 orig(fm, ctx, matcher, path, decode) |
220 fm.data(rawdata=ctx[path].rawdata()) |
252 fm.data(rawdata=ctx[path].rawdata()) |
221 |
253 |
|
254 |
222 @eh.wrapfunction(scmutil, 'wrapconvertsink') |
255 @eh.wrapfunction(scmutil, 'wrapconvertsink') |
223 def convertsink(orig, sink): |
256 def convertsink(orig, sink): |
224 sink = orig(sink) |
257 sink = orig(sink) |
225 if sink.repotype == 'hg': |
258 if sink.repotype == 'hg': |
|
259 |
226 class lfssink(sink.__class__): |
260 class lfssink(sink.__class__): |
227 def putcommit(self, files, copies, parents, commit, source, revmap, |
261 def putcommit( |
228 full, cleanp2): |
262 self, |
|
263 files, |
|
264 copies, |
|
265 parents, |
|
266 commit, |
|
267 source, |
|
268 revmap, |
|
269 full, |
|
270 cleanp2, |
|
271 ): |
229 pc = super(lfssink, self).putcommit |
272 pc = super(lfssink, self).putcommit |
230 node = pc(files, copies, parents, commit, source, revmap, full, |
273 node = pc( |
231 cleanp2) |
274 files, |
|
275 copies, |
|
276 parents, |
|
277 commit, |
|
278 source, |
|
279 revmap, |
|
280 full, |
|
281 cleanp2, |
|
282 ) |
232 |
283 |
233 if 'lfs' not in self.repo.requirements: |
284 if 'lfs' not in self.repo.requirements: |
234 ctx = self.repo[node] |
285 ctx = self.repo[node] |
235 |
286 |
236 # The file list may contain removed files, so check for |
287 # The file list may contain removed files, so check for |
258 # blobstore attributes are not always ready at this time. |
310 # blobstore attributes are not always ready at this time. |
259 for name in ['lfslocalblobstore', 'lfsremoteblobstore']: |
311 for name in ['lfslocalblobstore', 'lfsremoteblobstore']: |
260 if util.safehasattr(othervfs, name): |
312 if util.safehasattr(othervfs, name): |
261 setattr(self, name, getattr(othervfs, name)) |
313 setattr(self, name, getattr(othervfs, name)) |
262 |
314 |
|
315 |
263 def _prefetchfiles(repo, revs, match): |
316 def _prefetchfiles(repo, revs, match): |
264 """Ensure that required LFS blobs are present, fetching them as a group if |
317 """Ensure that required LFS blobs are present, fetching them as a group if |
265 needed.""" |
318 needed.""" |
266 if not util.safehasattr(repo.svfs, 'lfslocalblobstore'): |
319 if not util.safehasattr(repo.svfs, 'lfslocalblobstore'): |
267 return |
320 return |
282 if pointers: |
335 if pointers: |
283 # Recalculating the repo store here allows 'paths.default' that is set |
336 # Recalculating the repo store here allows 'paths.default' that is set |
284 # on the repo by a clone command to be used for the update. |
337 # on the repo by a clone command to be used for the update. |
285 blobstore.remote(repo).readbatch(pointers, localstore) |
338 blobstore.remote(repo).readbatch(pointers, localstore) |
286 |
339 |
|
340 |
287 def _canskipupload(repo): |
341 def _canskipupload(repo): |
288 # Skip if this hasn't been passed to reposetup() |
342 # Skip if this hasn't been passed to reposetup() |
289 if not util.safehasattr(repo.svfs, 'lfsremoteblobstore'): |
343 if not util.safehasattr(repo.svfs, 'lfsremoteblobstore'): |
290 return True |
344 return True |
291 |
345 |
292 # if remotestore is a null store, upload is a no-op and can be skipped |
346 # if remotestore is a null store, upload is a no-op and can be skipped |
293 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) |
347 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) |
294 |
348 |
|
349 |
295 def candownload(repo): |
350 def candownload(repo): |
296 # Skip if this hasn't been passed to reposetup() |
351 # Skip if this hasn't been passed to reposetup() |
297 if not util.safehasattr(repo.svfs, 'lfsremoteblobstore'): |
352 if not util.safehasattr(repo.svfs, 'lfsremoteblobstore'): |
298 return False |
353 return False |
299 |
354 |
300 # if remotestore is a null store, downloads will lead to nothing |
355 # if remotestore is a null store, downloads will lead to nothing |
301 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) |
356 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) |
|
357 |
302 |
358 |
303 def uploadblobsfromrevs(repo, revs): |
359 def uploadblobsfromrevs(repo, revs): |
304 '''upload lfs blobs introduced by revs |
360 '''upload lfs blobs introduced by revs |
305 |
361 |
306 Note: also used by other extensions e. g. infinitepush. avoid renaming. |
362 Note: also used by other extensions e. g. infinitepush. avoid renaming. |
308 if _canskipupload(repo): |
364 if _canskipupload(repo): |
309 return |
365 return |
310 pointers = extractpointers(repo, revs) |
366 pointers = extractpointers(repo, revs) |
311 uploadblobs(repo, pointers) |
367 uploadblobs(repo, pointers) |
312 |
368 |
|
369 |
313 def prepush(pushop): |
370 def prepush(pushop): |
314 """Prepush hook. |
371 """Prepush hook. |
315 |
372 |
316 Read through the revisions to push, looking for filelog entries that can be |
373 Read through the revisions to push, looking for filelog entries that can be |
317 deserialized into metadata so that we can block the push on their upload to |
374 deserialized into metadata so that we can block the push on their upload to |
318 the remote blobstore. |
375 the remote blobstore. |
319 """ |
376 """ |
320 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing) |
377 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing) |
|
378 |
321 |
379 |
322 @eh.wrapfunction(exchange, 'push') |
380 @eh.wrapfunction(exchange, 'push') |
323 def push(orig, repo, remote, *args, **kwargs): |
381 def push(orig, repo, remote, *args, **kwargs): |
324 """bail on push if the extension isn't enabled on remote when needed, and |
382 """bail on push if the extension isn't enabled on remote when needed, and |
325 update the remote store based on the destination path.""" |
383 update the remote store based on the destination path.""" |
329 # this repo use lfs, and the remote repo needs the extension loaded. |
387 # this repo use lfs, and the remote repo needs the extension loaded. |
330 if not remote.local() and not remote.capable('lfs'): |
388 if not remote.local() and not remote.capable('lfs'): |
331 # This is a copy of the message in exchange.push() when requirements |
389 # This is a copy of the message in exchange.push() when requirements |
332 # are missing between local repos. |
390 # are missing between local repos. |
333 m = _("required features are not supported in the destination: %s") |
391 m = _("required features are not supported in the destination: %s") |
334 raise error.Abort(m % 'lfs', |
392 raise error.Abort( |
335 hint=_('enable the lfs extension on the server')) |
393 m % 'lfs', hint=_('enable the lfs extension on the server') |
|
394 ) |
336 |
395 |
337 # Repositories where this extension is disabled won't have the field. |
396 # Repositories where this extension is disabled won't have the field. |
338 # But if there's a requirement, then the extension must be loaded AND |
397 # But if there's a requirement, then the extension must be loaded AND |
339 # there may be blobs to push. |
398 # there may be blobs to push. |
340 remotestore = repo.svfs.lfsremoteblobstore |
399 remotestore = repo.svfs.lfsremoteblobstore |
344 finally: |
403 finally: |
345 repo.svfs.lfsremoteblobstore = remotestore |
404 repo.svfs.lfsremoteblobstore = remotestore |
346 else: |
405 else: |
347 return orig(repo, remote, *args, **kwargs) |
406 return orig(repo, remote, *args, **kwargs) |
348 |
407 |
|
408 |
349 # when writing a bundle via "hg bundle" command, upload related LFS blobs |
409 # when writing a bundle via "hg bundle" command, upload related LFS blobs |
350 @eh.wrapfunction(bundle2, 'writenewbundle') |
410 @eh.wrapfunction(bundle2, 'writenewbundle') |
351 def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing, |
411 def writenewbundle( |
352 *args, **kwargs): |
412 orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs |
|
413 ): |
353 """upload LFS blobs added by outgoing revisions on 'hg bundle'""" |
414 """upload LFS blobs added by outgoing revisions on 'hg bundle'""" |
354 uploadblobsfromrevs(repo, outgoing.missing) |
415 uploadblobsfromrevs(repo, outgoing.missing) |
355 return orig(ui, repo, source, filename, bundletype, outgoing, *args, |
416 return orig( |
356 **kwargs) |
417 ui, repo, source, filename, bundletype, outgoing, *args, **kwargs |
|
418 ) |
|
419 |
357 |
420 |
358 def extractpointers(repo, revs): |
421 def extractpointers(repo, revs): |
359 """return a list of lfs pointers added by given revs""" |
422 """return a list of lfs pointers added by given revs""" |
360 repo.ui.debug('lfs: computing set of blobs to upload\n') |
423 repo.ui.debug('lfs: computing set of blobs to upload\n') |
361 pointers = {} |
424 pointers = {} |
394 p = pointer.deserialize(fctx.rawdata()) |
458 p = pointer.deserialize(fctx.rawdata()) |
395 if ctx == _ctx: |
459 if ctx == _ctx: |
396 return p |
460 return p |
397 return {} |
461 return {} |
398 except pointer.InvalidPointer as ex: |
462 except pointer.InvalidPointer as ex: |
399 raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n') |
463 raise error.Abort( |
400 % (f, short(_ctx.node()), ex)) |
464 _('lfs: corrupted pointer (%s@%s): %s\n') |
|
465 % (f, short(_ctx.node()), ex) |
|
466 ) |
|
467 |
401 |
468 |
402 def pointersfromctx(ctx, removed=False): |
469 def pointersfromctx(ctx, removed=False): |
403 """return a dict {path: pointer} for given single changectx. |
470 """return a dict {path: pointer} for given single changectx. |
404 |
471 |
405 If ``removed`` == True and the LFS file was removed from ``ctx``, the value |
472 If ``removed`` == True and the LFS file was removed from ``ctx``, the value |
415 p = pointerfromctx(ctx, f, removed=removed) |
482 p = pointerfromctx(ctx, f, removed=removed) |
416 if p is not None: |
483 if p is not None: |
417 result[f] = p |
484 result[f] = p |
418 return result |
485 return result |
419 |
486 |
|
487 |
420 def uploadblobs(repo, pointers): |
488 def uploadblobs(repo, pointers): |
421 """upload given pointers from local blobstore""" |
489 """upload given pointers from local blobstore""" |
422 if not pointers: |
490 if not pointers: |
423 return |
491 return |
424 |
492 |
425 remoteblob = repo.svfs.lfsremoteblobstore |
493 remoteblob = repo.svfs.lfsremoteblobstore |
426 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore) |
494 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore) |
427 |
495 |
|
496 |
428 @eh.wrapfunction(upgrade, '_finishdatamigration') |
497 @eh.wrapfunction(upgrade, '_finishdatamigration') |
429 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements): |
498 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements): |
430 orig(ui, srcrepo, dstrepo, requirements) |
499 orig(ui, srcrepo, dstrepo, requirements) |
431 |
500 |
432 # Skip if this hasn't been passed to reposetup() |
501 # Skip if this hasn't been passed to reposetup() |
433 if (util.safehasattr(srcrepo.svfs, 'lfslocalblobstore') and |
502 if util.safehasattr(srcrepo.svfs, 'lfslocalblobstore') and util.safehasattr( |
434 util.safehasattr(dstrepo.svfs, 'lfslocalblobstore')): |
503 dstrepo.svfs, 'lfslocalblobstore' |
|
504 ): |
435 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs |
505 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs |
436 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs |
506 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs |
437 |
507 |
438 for dirpath, dirs, files in srclfsvfs.walk(): |
508 for dirpath, dirs, files in srclfsvfs.walk(): |
439 for oid in files: |
509 for oid in files: |
440 ui.write(_('copying lfs blob %s\n') % oid) |
510 ui.write(_('copying lfs blob %s\n') % oid) |
441 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid)) |
511 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid)) |
|
512 |
442 |
513 |
443 @eh.wrapfunction(upgrade, 'preservedrequirements') |
514 @eh.wrapfunction(upgrade, 'preservedrequirements') |
444 @eh.wrapfunction(upgrade, 'supporteddestrequirements') |
515 @eh.wrapfunction(upgrade, 'supporteddestrequirements') |
445 def upgraderequirements(orig, repo): |
516 def upgraderequirements(orig, repo): |
446 reqs = orig(repo) |
517 reqs = orig(repo) |