35 shortnameslash = shortname + '/' |
35 shortnameslash = shortname + '/' |
36 longname = 'largefiles' |
36 longname = 'largefiles' |
37 |
37 |
38 # -- Private worker functions ------------------------------------------ |
38 # -- Private worker functions ------------------------------------------ |
39 |
39 |
|
40 |
40 def getminsize(ui, assumelfiles, opt, default=10): |
41 def getminsize(ui, assumelfiles, opt, default=10): |
41 lfsize = opt |
42 lfsize = opt |
42 if not lfsize and assumelfiles: |
43 if not lfsize and assumelfiles: |
43 lfsize = ui.config(longname, 'minsize', default=default) |
44 lfsize = ui.config(longname, 'minsize', default=default) |
44 if lfsize: |
45 if lfsize: |
45 try: |
46 try: |
46 lfsize = float(lfsize) |
47 lfsize = float(lfsize) |
47 except ValueError: |
48 except ValueError: |
48 raise error.Abort(_('largefiles: size must be number (not %s)\n') |
49 raise error.Abort( |
49 % lfsize) |
50 _('largefiles: size must be number (not %s)\n') % lfsize |
|
51 ) |
50 if lfsize is None: |
52 if lfsize is None: |
51 raise error.Abort(_('minimum size for largefiles must be specified')) |
53 raise error.Abort(_('minimum size for largefiles must be specified')) |
52 return lfsize |
54 return lfsize |
|
55 |
53 |
56 |
54 def link(src, dest): |
57 def link(src, dest): |
55 """Try to create hardlink - if that fails, efficiently make a copy.""" |
58 """Try to create hardlink - if that fails, efficiently make a copy.""" |
56 util.makedirs(os.path.dirname(dest)) |
59 util.makedirs(os.path.dirname(dest)) |
57 try: |
60 try: |
61 with open(src, 'rb') as srcf, util.atomictempfile(dest) as dstf: |
64 with open(src, 'rb') as srcf, util.atomictempfile(dest) as dstf: |
62 for chunk in util.filechunkiter(srcf): |
65 for chunk in util.filechunkiter(srcf): |
63 dstf.write(chunk) |
66 dstf.write(chunk) |
64 os.chmod(dest, os.stat(src).st_mode) |
67 os.chmod(dest, os.stat(src).st_mode) |
65 |
68 |
|
69 |
66 def usercachepath(ui, hash): |
70 def usercachepath(ui, hash): |
67 '''Return the correct location in the "global" largefiles cache for a file |
71 '''Return the correct location in the "global" largefiles cache for a file |
68 with the given hash. |
72 with the given hash. |
69 This cache is used for sharing of largefiles across repositories - both |
73 This cache is used for sharing of largefiles across repositories - both |
70 to preserve download bandwidth and storage space.''' |
74 to preserve download bandwidth and storage space.''' |
71 return os.path.join(_usercachedir(ui), hash) |
75 return os.path.join(_usercachedir(ui), hash) |
72 |
76 |
|
77 |
73 def _usercachedir(ui, name=longname): |
78 def _usercachedir(ui, name=longname): |
74 '''Return the location of the "global" largefiles cache.''' |
79 '''Return the location of the "global" largefiles cache.''' |
75 path = ui.configpath(name, 'usercache') |
80 path = ui.configpath(name, 'usercache') |
76 if path: |
81 if path: |
77 return path |
82 return path |
78 if pycompat.iswindows: |
83 if pycompat.iswindows: |
79 appdata = encoding.environ.get('LOCALAPPDATA', |
84 appdata = encoding.environ.get( |
80 encoding.environ.get('APPDATA')) |
85 'LOCALAPPDATA', encoding.environ.get('APPDATA') |
|
86 ) |
81 if appdata: |
87 if appdata: |
82 return os.path.join(appdata, name) |
88 return os.path.join(appdata, name) |
83 elif pycompat.isdarwin: |
89 elif pycompat.isdarwin: |
84 home = encoding.environ.get('HOME') |
90 home = encoding.environ.get('HOME') |
85 if home: |
91 if home: |
90 return os.path.join(path, name) |
96 return os.path.join(path, name) |
91 home = encoding.environ.get('HOME') |
97 home = encoding.environ.get('HOME') |
92 if home: |
98 if home: |
93 return os.path.join(home, '.cache', name) |
99 return os.path.join(home, '.cache', name) |
94 else: |
100 else: |
95 raise error.Abort(_('unknown operating system: %s\n') |
101 raise error.Abort(_('unknown operating system: %s\n') % pycompat.osname) |
96 % pycompat.osname) |
|
97 raise error.Abort(_('unknown %s usercache location') % name) |
102 raise error.Abort(_('unknown %s usercache location') % name) |
|
103 |
98 |
104 |
99 def inusercache(ui, hash): |
105 def inusercache(ui, hash): |
100 path = usercachepath(ui, hash) |
106 path = usercachepath(ui, hash) |
101 return os.path.exists(path) |
107 return os.path.exists(path) |
|
108 |
102 |
109 |
103 def findfile(repo, hash): |
110 def findfile(repo, hash): |
104 '''Return store path of the largefile with the specified hash. |
111 '''Return store path of the largefile with the specified hash. |
105 As a side effect, the file might be linked from user cache. |
112 As a side effect, the file might be linked from user cache. |
106 Return None if the file can't be found locally.''' |
113 Return None if the file can't be found locally.''' |
113 path = storepath(repo, hash) |
120 path = storepath(repo, hash) |
114 link(usercachepath(repo.ui, hash), path) |
121 link(usercachepath(repo.ui, hash), path) |
115 return path |
122 return path |
116 return None |
123 return None |
117 |
124 |
|
125 |
118 class largefilesdirstate(dirstate.dirstate): |
126 class largefilesdirstate(dirstate.dirstate): |
119 def __getitem__(self, key): |
127 def __getitem__(self, key): |
120 return super(largefilesdirstate, self).__getitem__(unixpath(key)) |
128 return super(largefilesdirstate, self).__getitem__(unixpath(key)) |
|
129 |
121 def normal(self, f): |
130 def normal(self, f): |
122 return super(largefilesdirstate, self).normal(unixpath(f)) |
131 return super(largefilesdirstate, self).normal(unixpath(f)) |
|
132 |
123 def remove(self, f): |
133 def remove(self, f): |
124 return super(largefilesdirstate, self).remove(unixpath(f)) |
134 return super(largefilesdirstate, self).remove(unixpath(f)) |
|
135 |
125 def add(self, f): |
136 def add(self, f): |
126 return super(largefilesdirstate, self).add(unixpath(f)) |
137 return super(largefilesdirstate, self).add(unixpath(f)) |
|
138 |
127 def drop(self, f): |
139 def drop(self, f): |
128 return super(largefilesdirstate, self).drop(unixpath(f)) |
140 return super(largefilesdirstate, self).drop(unixpath(f)) |
|
141 |
129 def forget(self, f): |
142 def forget(self, f): |
130 return super(largefilesdirstate, self).forget(unixpath(f)) |
143 return super(largefilesdirstate, self).forget(unixpath(f)) |
|
144 |
131 def normallookup(self, f): |
145 def normallookup(self, f): |
132 return super(largefilesdirstate, self).normallookup(unixpath(f)) |
146 return super(largefilesdirstate, self).normallookup(unixpath(f)) |
|
147 |
133 def _ignore(self, f): |
148 def _ignore(self, f): |
134 return False |
149 return False |
|
150 |
135 def write(self, tr=False): |
151 def write(self, tr=False): |
136 # (1) disable PENDING mode always |
152 # (1) disable PENDING mode always |
137 # (lfdirstate isn't yet managed as a part of the transaction) |
153 # (lfdirstate isn't yet managed as a part of the transaction) |
138 # (2) avoid develwarn 'use dirstate.write with ....' |
154 # (2) avoid develwarn 'use dirstate.write with ....' |
139 super(largefilesdirstate, self).write(None) |
155 super(largefilesdirstate, self).write(None) |
|
156 |
140 |
157 |
141 def openlfdirstate(ui, repo, create=True): |
158 def openlfdirstate(ui, repo, create=True): |
142 ''' |
159 ''' |
143 Return a dirstate object that tracks largefiles: i.e. its root is |
160 Return a dirstate object that tracks largefiles: i.e. its root is |
144 the repo root, but it is saved in .hg/largefiles/dirstate. |
161 the repo root, but it is saved in .hg/largefiles/dirstate. |
145 ''' |
162 ''' |
146 vfs = repo.vfs |
163 vfs = repo.vfs |
147 lfstoredir = longname |
164 lfstoredir = longname |
148 opener = vfsmod.vfs(vfs.join(lfstoredir)) |
165 opener = vfsmod.vfs(vfs.join(lfstoredir)) |
149 lfdirstate = largefilesdirstate(opener, ui, repo.root, |
166 lfdirstate = largefilesdirstate( |
150 repo.dirstate._validate, |
167 opener, |
151 lambda: sparse.matcher(repo)) |
168 ui, |
|
169 repo.root, |
|
170 repo.dirstate._validate, |
|
171 lambda: sparse.matcher(repo), |
|
172 ) |
152 |
173 |
153 # If the largefiles dirstate does not exist, populate and create |
174 # If the largefiles dirstate does not exist, populate and create |
154 # it. This ensures that we create it on the first meaningful |
175 # it. This ensures that we create it on the first meaningful |
155 # largefiles operation in a new clone. |
176 # largefiles operation in a new clone. |
156 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')): |
177 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')): |
157 matcher = getstandinmatcher(repo) |
178 matcher = getstandinmatcher(repo) |
158 standins = repo.dirstate.walk(matcher, subrepos=[], unknown=False, |
179 standins = repo.dirstate.walk( |
159 ignored=False) |
180 matcher, subrepos=[], unknown=False, ignored=False |
|
181 ) |
160 |
182 |
161 if len(standins) > 0: |
183 if len(standins) > 0: |
162 vfs.makedirs(lfstoredir) |
184 vfs.makedirs(lfstoredir) |
163 |
185 |
164 for standin in standins: |
186 for standin in standins: |
165 lfile = splitstandin(standin) |
187 lfile = splitstandin(standin) |
166 lfdirstate.normallookup(lfile) |
188 lfdirstate.normallookup(lfile) |
167 return lfdirstate |
189 return lfdirstate |
168 |
190 |
|
191 |
169 def lfdirstatestatus(lfdirstate, repo): |
192 def lfdirstatestatus(lfdirstate, repo): |
170 pctx = repo['.'] |
193 pctx = repo['.'] |
171 match = matchmod.always() |
194 match = matchmod.always() |
172 unsure, s = lfdirstate.status(match, subrepos=[], ignored=False, |
195 unsure, s = lfdirstate.status( |
173 clean=False, unknown=False) |
196 match, subrepos=[], ignored=False, clean=False, unknown=False |
|
197 ) |
174 modified, clean = s.modified, s.clean |
198 modified, clean = s.modified, s.clean |
175 for lfile in unsure: |
199 for lfile in unsure: |
176 try: |
200 try: |
177 fctx = pctx[standin(lfile)] |
201 fctx = pctx[standin(lfile)] |
178 except LookupError: |
202 except LookupError: |
182 else: |
206 else: |
183 clean.append(lfile) |
207 clean.append(lfile) |
184 lfdirstate.normal(lfile) |
208 lfdirstate.normal(lfile) |
185 return s |
209 return s |
186 |
210 |
|
211 |
187 def listlfiles(repo, rev=None, matcher=None): |
212 def listlfiles(repo, rev=None, matcher=None): |
188 '''return a list of largefiles in the working copy or the |
213 '''return a list of largefiles in the working copy or the |
189 specified changeset''' |
214 specified changeset''' |
190 |
215 |
191 if matcher is None: |
216 if matcher is None: |
192 matcher = getstandinmatcher(repo) |
217 matcher = getstandinmatcher(repo) |
193 |
218 |
194 # ignore unknown files in working directory |
219 # ignore unknown files in working directory |
195 return [splitstandin(f) |
220 return [ |
196 for f in repo[rev].walk(matcher) |
221 splitstandin(f) |
197 if rev is not None or repo.dirstate[f] != '?'] |
222 for f in repo[rev].walk(matcher) |
|
223 if rev is not None or repo.dirstate[f] != '?' |
|
224 ] |
|
225 |
198 |
226 |
199 def instore(repo, hash, forcelocal=False): |
227 def instore(repo, hash, forcelocal=False): |
200 '''Return true if a largefile with the given hash exists in the store''' |
228 '''Return true if a largefile with the given hash exists in the store''' |
201 return os.path.exists(storepath(repo, hash, forcelocal)) |
229 return os.path.exists(storepath(repo, hash, forcelocal)) |
|
230 |
202 |
231 |
203 def storepath(repo, hash, forcelocal=False): |
232 def storepath(repo, hash, forcelocal=False): |
204 '''Return the correct location in the repository largefiles store for a |
233 '''Return the correct location in the repository largefiles store for a |
205 file with the given hash.''' |
234 file with the given hash.''' |
206 if not forcelocal and repo.shared(): |
235 if not forcelocal and repo.shared(): |
207 return repo.vfs.reljoin(repo.sharedpath, longname, hash) |
236 return repo.vfs.reljoin(repo.sharedpath, longname, hash) |
208 return repo.vfs.join(longname, hash) |
237 return repo.vfs.join(longname, hash) |
|
238 |
209 |
239 |
210 def findstorepath(repo, hash): |
240 def findstorepath(repo, hash): |
211 '''Search through the local store path(s) to find the file for the given |
241 '''Search through the local store path(s) to find the file for the given |
212 hash. If the file is not found, its path in the primary store is returned. |
242 hash. If the file is not found, its path in the primary store is returned. |
213 The return value is a tuple of (path, exists(path)). |
243 The return value is a tuple of (path, exists(path)). |
236 return False |
267 return False |
237 wvfs.makedirs(wvfs.dirname(wvfs.join(filename))) |
268 wvfs.makedirs(wvfs.dirname(wvfs.join(filename))) |
238 # The write may fail before the file is fully written, but we |
269 # The write may fail before the file is fully written, but we |
239 # don't use atomic writes in the working copy. |
270 # don't use atomic writes in the working copy. |
240 with open(path, 'rb') as srcfd, wvfs(filename, 'wb') as destfd: |
271 with open(path, 'rb') as srcfd, wvfs(filename, 'wb') as destfd: |
241 gothash = copyandhash( |
272 gothash = copyandhash(util.filechunkiter(srcfd), destfd) |
242 util.filechunkiter(srcfd), destfd) |
|
243 if gothash != hash: |
273 if gothash != hash: |
244 repo.ui.warn(_('%s: data corruption in %s with hash %s\n') |
274 repo.ui.warn( |
245 % (filename, path, gothash)) |
275 _('%s: data corruption in %s with hash %s\n') |
|
276 % (filename, path, gothash) |
|
277 ) |
246 wvfs.unlink(filename) |
278 wvfs.unlink(filename) |
247 return False |
279 return False |
248 return True |
280 return True |
|
281 |
249 |
282 |
250 def copytostore(repo, ctx, file, fstandin): |
283 def copytostore(repo, ctx, file, fstandin): |
251 wvfs = repo.wvfs |
284 wvfs = repo.wvfs |
252 hash = readasstandin(ctx[fstandin]) |
285 hash = readasstandin(ctx[fstandin]) |
253 if instore(repo, hash): |
286 if instore(repo, hash): |
254 return |
287 return |
255 if wvfs.exists(file): |
288 if wvfs.exists(file): |
256 copytostoreabsolute(repo, wvfs.join(file), hash) |
289 copytostoreabsolute(repo, wvfs.join(file), hash) |
257 else: |
290 else: |
258 repo.ui.warn(_("%s: largefile %s not available from local store\n") % |
291 repo.ui.warn( |
259 (file, hash)) |
292 _("%s: largefile %s not available from local store\n") |
|
293 % (file, hash) |
|
294 ) |
|
295 |
260 |
296 |
261 def copyalltostore(repo, node): |
297 def copyalltostore(repo, node): |
262 '''Copy all largefiles in a given revision to the store''' |
298 '''Copy all largefiles in a given revision to the store''' |
263 |
299 |
264 ctx = repo[node] |
300 ctx = repo[node] |
265 for filename in ctx.files(): |
301 for filename in ctx.files(): |
266 realfile = splitstandin(filename) |
302 realfile = splitstandin(filename) |
267 if realfile is not None and filename in ctx.manifest(): |
303 if realfile is not None and filename in ctx.manifest(): |
268 copytostore(repo, ctx, realfile, filename) |
304 copytostore(repo, ctx, realfile, filename) |
|
305 |
269 |
306 |
270 def copytostoreabsolute(repo, file, hash): |
307 def copytostoreabsolute(repo, file, hash): |
271 if inusercache(repo.ui, hash): |
308 if inusercache(repo.ui, hash): |
272 link(usercachepath(repo.ui, hash), storepath(repo, hash)) |
309 link(usercachepath(repo.ui, hash), storepath(repo, hash)) |
273 else: |
310 else: |
274 util.makedirs(os.path.dirname(storepath(repo, hash))) |
311 util.makedirs(os.path.dirname(storepath(repo, hash))) |
275 with open(file, 'rb') as srcf: |
312 with open(file, 'rb') as srcf: |
276 with util.atomictempfile(storepath(repo, hash), |
313 with util.atomictempfile( |
277 createmode=repo.store.createmode) as dstf: |
314 storepath(repo, hash), createmode=repo.store.createmode |
|
315 ) as dstf: |
278 for chunk in util.filechunkiter(srcf): |
316 for chunk in util.filechunkiter(srcf): |
279 dstf.write(chunk) |
317 dstf.write(chunk) |
280 linktousercache(repo, hash) |
318 linktousercache(repo, hash) |
|
319 |
281 |
320 |
282 def linktousercache(repo, hash): |
321 def linktousercache(repo, hash): |
283 '''Link / copy the largefile with the specified hash from the store |
322 '''Link / copy the largefile with the specified hash from the store |
284 to the cache.''' |
323 to the cache.''' |
285 path = usercachepath(repo.ui, hash) |
324 path = usercachepath(repo.ui, hash) |
286 link(storepath(repo, hash), path) |
325 link(storepath(repo, hash), path) |
|
326 |
287 |
327 |
288 def getstandinmatcher(repo, rmatcher=None): |
328 def getstandinmatcher(repo, rmatcher=None): |
289 '''Return a match object that applies rmatcher to the standin directory''' |
329 '''Return a match object that applies rmatcher to the standin directory''' |
290 wvfs = repo.wvfs |
330 wvfs = repo.wvfs |
291 standindir = shortname |
331 standindir = shortname |
301 else: |
341 else: |
302 # no patterns: relative to repo root |
342 # no patterns: relative to repo root |
303 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn) |
343 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn) |
304 return match |
344 return match |
305 |
345 |
|
346 |
306 def composestandinmatcher(repo, rmatcher): |
347 def composestandinmatcher(repo, rmatcher): |
307 '''Return a matcher that accepts standins corresponding to the |
348 '''Return a matcher that accepts standins corresponding to the |
308 files accepted by rmatcher. Pass the list of files in the matcher |
349 files accepted by rmatcher. Pass the list of files in the matcher |
309 as the paths specified by the user.''' |
350 as the paths specified by the user.''' |
310 smatcher = getstandinmatcher(repo, rmatcher) |
351 smatcher = getstandinmatcher(repo, rmatcher) |
311 isstandin = smatcher.matchfn |
352 isstandin = smatcher.matchfn |
|
353 |
312 def composedmatchfn(f): |
354 def composedmatchfn(f): |
313 return isstandin(f) and rmatcher.matchfn(splitstandin(f)) |
355 return isstandin(f) and rmatcher.matchfn(splitstandin(f)) |
|
356 |
314 smatcher.matchfn = composedmatchfn |
357 smatcher.matchfn = composedmatchfn |
315 |
358 |
316 return smatcher |
359 return smatcher |
|
360 |
317 |
361 |
318 def standin(filename): |
362 def standin(filename): |
319 '''Return the repo-relative path to the standin for the specified big |
363 '''Return the repo-relative path to the standin for the specified big |
320 file.''' |
364 file.''' |
321 # Notes: |
365 # Notes: |
325 # 2) Join with '/' because that's what dirstate always uses, even on |
369 # 2) Join with '/' because that's what dirstate always uses, even on |
326 # Windows. Change existing separator to '/' first in case we are |
370 # Windows. Change existing separator to '/' first in case we are |
327 # passed filenames from an external source (like the command line). |
371 # passed filenames from an external source (like the command line). |
328 return shortnameslash + util.pconvert(filename) |
372 return shortnameslash + util.pconvert(filename) |
329 |
373 |
|
374 |
330 def isstandin(filename): |
375 def isstandin(filename): |
331 '''Return true if filename is a big file standin. filename must be |
376 '''Return true if filename is a big file standin. filename must be |
332 in Mercurial's internal form (slash-separated).''' |
377 in Mercurial's internal form (slash-separated).''' |
333 return filename.startswith(shortnameslash) |
378 return filename.startswith(shortnameslash) |
|
379 |
334 |
380 |
335 def splitstandin(filename): |
381 def splitstandin(filename): |
336 # Split on / because that's what dirstate always uses, even on Windows. |
382 # Split on / because that's what dirstate always uses, even on Windows. |
337 # Change local separator to / first just in case we are passed filenames |
383 # Change local separator to / first just in case we are passed filenames |
338 # from an external source (like the command line). |
384 # from an external source (like the command line). |
353 executable = getexecutable(file) |
400 executable = getexecutable(file) |
354 writestandin(repo, standin, hash, executable) |
401 writestandin(repo, standin, hash, executable) |
355 else: |
402 else: |
356 raise error.Abort(_('%s: file not found!') % lfile) |
403 raise error.Abort(_('%s: file not found!') % lfile) |
357 |
404 |
|
405 |
358 def readasstandin(fctx): |
406 def readasstandin(fctx): |
359 '''read hex hash from given filectx of standin file |
407 '''read hex hash from given filectx of standin file |
360 |
408 |
361 This encapsulates how "standin" data is stored into storage layer.''' |
409 This encapsulates how "standin" data is stored into storage layer.''' |
362 return fctx.data().strip() |
410 return fctx.data().strip() |
|
411 |
363 |
412 |
364 def writestandin(repo, standin, hash, executable): |
413 def writestandin(repo, standin, hash, executable): |
365 '''write hash to <repo.root>/<standin>''' |
414 '''write hash to <repo.root>/<standin>''' |
366 repo.wwrite(standin, hash + '\n', executable and 'x' or '') |
415 repo.wwrite(standin, hash + '\n', executable and 'x' or '') |
|
416 |
367 |
417 |
368 def copyandhash(instream, outfile): |
418 def copyandhash(instream, outfile): |
369 '''Read bytes from instream (iterable) and write them to outfile, |
419 '''Read bytes from instream (iterable) and write them to outfile, |
370 computing the SHA-1 hash of the data along the way. Return the hash.''' |
420 computing the SHA-1 hash of the data along the way. Return the hash.''' |
371 hasher = hashlib.sha1('') |
421 hasher = hashlib.sha1('') |
372 for data in instream: |
422 for data in instream: |
373 hasher.update(data) |
423 hasher.update(data) |
374 outfile.write(data) |
424 outfile.write(data) |
375 return hex(hasher.digest()) |
425 return hex(hasher.digest()) |
376 |
426 |
|
427 |
377 def hashfile(file): |
428 def hashfile(file): |
378 if not os.path.exists(file): |
429 if not os.path.exists(file): |
379 return '' |
430 return '' |
380 with open(file, 'rb') as fd: |
431 with open(file, 'rb') as fd: |
381 return hexsha1(fd) |
432 return hexsha1(fd) |
382 |
433 |
|
434 |
383 def getexecutable(filename): |
435 def getexecutable(filename): |
384 mode = os.stat(filename).st_mode |
436 mode = os.stat(filename).st_mode |
385 return ((mode & stat.S_IXUSR) and |
437 return ( |
386 (mode & stat.S_IXGRP) and |
438 (mode & stat.S_IXUSR) |
387 (mode & stat.S_IXOTH)) |
439 and (mode & stat.S_IXGRP) |
|
440 and (mode & stat.S_IXOTH) |
|
441 ) |
|
442 |
388 |
443 |
389 def urljoin(first, second, *arg): |
444 def urljoin(first, second, *arg): |
390 def join(left, right): |
445 def join(left, right): |
391 if not left.endswith('/'): |
446 if not left.endswith('/'): |
392 left += '/' |
447 left += '/' |
397 url = join(first, second) |
452 url = join(first, second) |
398 for a in arg: |
453 for a in arg: |
399 url = join(url, a) |
454 url = join(url, a) |
400 return url |
455 return url |
401 |
456 |
|
457 |
402 def hexsha1(fileobj): |
458 def hexsha1(fileobj): |
403 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like |
459 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like |
404 object data""" |
460 object data""" |
405 h = hashlib.sha1() |
461 h = hashlib.sha1() |
406 for chunk in util.filechunkiter(fileobj): |
462 for chunk in util.filechunkiter(fileobj): |
407 h.update(chunk) |
463 h.update(chunk) |
408 return hex(h.digest()) |
464 return hex(h.digest()) |
409 |
465 |
|
466 |
410 def httpsendfile(ui, filename): |
467 def httpsendfile(ui, filename): |
411 return httpconnection.httpsendfile(ui, filename, 'rb') |
468 return httpconnection.httpsendfile(ui, filename, 'rb') |
|
469 |
412 |
470 |
413 def unixpath(path): |
471 def unixpath(path): |
414 '''Return a version of path normalized for use with the lfdirstate.''' |
472 '''Return a version of path normalized for use with the lfdirstate.''' |
415 return util.pconvert(os.path.normpath(path)) |
473 return util.pconvert(os.path.normpath(path)) |
416 |
474 |
|
475 |
417 def islfilesrepo(repo): |
476 def islfilesrepo(repo): |
418 '''Return true if the repo is a largefile repo.''' |
477 '''Return true if the repo is a largefile repo.''' |
419 if ('largefiles' in repo.requirements and |
478 if 'largefiles' in repo.requirements and any( |
420 any(shortnameslash in f[0] for f in repo.store.datafiles())): |
479 shortnameslash in f[0] for f in repo.store.datafiles() |
|
480 ): |
421 return True |
481 return True |
422 |
482 |
423 return any(openlfdirstate(repo.ui, repo, False)) |
483 return any(openlfdirstate(repo.ui, repo, False)) |
|
484 |
424 |
485 |
425 class storeprotonotcapable(Exception): |
486 class storeprotonotcapable(Exception): |
426 def __init__(self, storetypes): |
487 def __init__(self, storetypes): |
427 self.storetypes = storetypes |
488 self.storetypes = storetypes |
|
489 |
428 |
490 |
429 def getstandinsstate(repo): |
491 def getstandinsstate(repo): |
430 standins = [] |
492 standins = [] |
431 matcher = getstandinmatcher(repo) |
493 matcher = getstandinmatcher(repo) |
432 wctx = repo[None] |
494 wctx = repo[None] |
433 for standin in repo.dirstate.walk(matcher, subrepos=[], unknown=False, |
495 for standin in repo.dirstate.walk( |
434 ignored=False): |
496 matcher, subrepos=[], unknown=False, ignored=False |
|
497 ): |
435 lfile = splitstandin(standin) |
498 lfile = splitstandin(standin) |
436 try: |
499 try: |
437 hash = readasstandin(wctx[standin]) |
500 hash = readasstandin(wctx[standin]) |
438 except IOError: |
501 except IOError: |
439 hash = None |
502 hash = None |
440 standins.append((lfile, hash)) |
503 standins.append((lfile, hash)) |
441 return standins |
504 return standins |
|
505 |
442 |
506 |
443 def synclfdirstate(repo, lfdirstate, lfile, normallookup): |
507 def synclfdirstate(repo, lfdirstate, lfile, normallookup): |
444 lfstandin = standin(lfile) |
508 lfstandin = standin(lfile) |
445 if lfstandin in repo.dirstate: |
509 if lfstandin in repo.dirstate: |
446 stat = repo.dirstate._map[lfstandin] |
510 stat = repo.dirstate._map[lfstandin] |
447 state, mtime = stat[0], stat[3] |
511 state, mtime = stat[0], stat[3] |
448 else: |
512 else: |
449 state, mtime = '?', -1 |
513 state, mtime = '?', -1 |
450 if state == 'n': |
514 if state == 'n': |
451 if (normallookup or mtime < 0 or |
515 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile): |
452 not repo.wvfs.exists(lfile)): |
|
453 # state 'n' doesn't ensure 'clean' in this case |
516 # state 'n' doesn't ensure 'clean' in this case |
454 lfdirstate.normallookup(lfile) |
517 lfdirstate.normallookup(lfile) |
455 else: |
518 else: |
456 lfdirstate.normal(lfile) |
519 lfdirstate.normal(lfile) |
457 elif state == 'm': |
520 elif state == 'm': |
490 # lookup while copyalltostore(), but can omit redundant check for |
554 # lookup while copyalltostore(), but can omit redundant check for |
491 # files comming from the 2nd parent, which should exist in store |
555 # files comming from the 2nd parent, which should exist in store |
492 # at merging. |
556 # at merging. |
493 copyalltostore(repo, node) |
557 copyalltostore(repo, node) |
494 |
558 |
|
559 |
495 def getlfilestoupdate(oldstandins, newstandins): |
560 def getlfilestoupdate(oldstandins, newstandins): |
496 changedstandins = set(oldstandins).symmetric_difference(set(newstandins)) |
561 changedstandins = set(oldstandins).symmetric_difference(set(newstandins)) |
497 filelist = [] |
562 filelist = [] |
498 for f in changedstandins: |
563 for f in changedstandins: |
499 if f[0] not in filelist: |
564 if f[0] not in filelist: |
500 filelist.append(f[0]) |
565 filelist.append(f[0]) |
501 return filelist |
566 return filelist |
502 |
567 |
|
568 |
503 def getlfilestoupload(repo, missing, addfunc): |
569 def getlfilestoupload(repo, missing, addfunc): |
504 makeprogress = repo.ui.makeprogress |
570 makeprogress = repo.ui.makeprogress |
505 with makeprogress(_('finding outgoing largefiles'), |
571 with makeprogress( |
506 unit=_('revisions'), total=len(missing)) as progress: |
572 _('finding outgoing largefiles'), |
|
573 unit=_('revisions'), |
|
574 total=len(missing), |
|
575 ) as progress: |
507 for i, n in enumerate(missing): |
576 for i, n in enumerate(missing): |
508 progress.update(i) |
577 progress.update(i) |
509 parents = [p for p in repo[n].parents() if p != node.nullid] |
578 parents = [p for p in repo[n].parents() if p != node.nullid] |
510 |
579 |
511 oldlfstatus = repo.lfstatus |
580 oldlfstatus = repo.lfstatus |
551 # It can cost a lot of time (several seconds) |
621 # It can cost a lot of time (several seconds) |
552 # otherwise to update all standins if the largefiles are |
622 # otherwise to update all standins if the largefiles are |
553 # large. |
623 # large. |
554 lfdirstate = openlfdirstate(ui, repo) |
624 lfdirstate = openlfdirstate(ui, repo) |
555 dirtymatch = matchmod.always() |
625 dirtymatch = matchmod.always() |
556 unsure, s = lfdirstate.status(dirtymatch, subrepos=[], ignored=False, |
626 unsure, s = lfdirstate.status( |
557 clean=False, unknown=False) |
627 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False |
|
628 ) |
558 modifiedfiles = unsure + s.modified + s.added + s.removed |
629 modifiedfiles = unsure + s.modified + s.added + s.removed |
559 lfiles = listlfiles(repo) |
630 lfiles = listlfiles(repo) |
560 # this only loops through largefiles that exist (not |
631 # this only loops through largefiles that exist (not |
561 # removed/renamed) |
632 # removed/renamed) |
562 for lfile in lfiles: |
633 for lfile in lfiles: |
645 |
718 |
646 But the 1st commit of resuming automated committing (e.g. ``rebase |
719 But the 1st commit of resuming automated committing (e.g. ``rebase |
647 --continue``) should update them, because largefiles may be |
720 --continue``) should update them, because largefiles may be |
648 modified manually. |
721 modified manually. |
649 ''' |
722 ''' |
|
723 |
650 def __init__(self, resuming): |
724 def __init__(self, resuming): |
651 self.resuming = resuming |
725 self.resuming = resuming |
652 |
726 |
653 def __call__(self, repo, match): |
727 def __call__(self, repo, match): |
654 if self.resuming: |
728 if self.resuming: |
655 self.resuming = False # avoids updating at subsequent commits |
729 self.resuming = False # avoids updating at subsequent commits |
656 return updatestandinsbymatch(repo, match) |
730 return updatestandinsbymatch(repo, match) |
657 else: |
731 else: |
658 return match |
732 return match |
|
733 |
659 |
734 |
660 def getstatuswriter(ui, repo, forcibly=None): |
735 def getstatuswriter(ui, repo, forcibly=None): |
661 '''Return the function to write largefiles specific status out |
736 '''Return the function to write largefiles specific status out |
662 |
737 |
663 If ``forcibly`` is ``None``, this returns the last element of |
738 If ``forcibly`` is ``None``, this returns the last element of |