Mercurial > hg
annotate hgext/largefiles/lfutil.py @ 15227:a7686abf73a6
largefiles: factor out lfutil.getminsize()
author | Greg Ward <greg@gerg.ca> |
---|---|
date | Tue, 11 Oct 2011 21:11:01 -0400 |
parents | 2223ea21c98f |
children | ee625de3541e |
rev | line source |
---|---|
15168 | 1 # Copyright 2009-2010 Gregory P. Ward |
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated | |
3 # Copyright 2010-2011 Fog Creek Software | |
4 # Copyright 2010-2011 Unity Technologies | |
5 # | |
6 # This software may be used and distributed according to the terms of the | |
7 # GNU General Public License version 2 or any later version. | |
8 | |
9 '''largefiles utility code: must not import other modules in this package.''' | |
10 | |
11 import os | |
12 import errno | |
13 import shutil | |
14 import stat | |
15 import hashlib | |
16 | |
15226
2223ea21c98f
largefiles: cleanup import, now that we can assume > 1.9 for bundled extension
Na'Tosha Bard <natosha@unity3d.com>
parents:
15224
diff
changeset
|
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil |
15168 | 18 from mercurial.i18n import _ |
19 | |
20 shortname = '.hglf' | |
21 longname = 'largefiles' | |
22 | |
23 | |
24 # -- Portability wrappers ---------------------------------------------- | |
25 | |
15224
7c604d8c7e83
largefiles: remove pre-1.9 code from extension first bundled with 1.9
Na'Tosha Bard <natosha@unity3d.com>
parents:
15206
diff
changeset
|
26 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False): |
7c604d8c7e83
largefiles: remove pre-1.9 code from extension first bundled with 1.9
Na'Tosha Bard <natosha@unity3d.com>
parents:
15206
diff
changeset
|
27 return dirstate.walk(matcher, [], unknown, ignored) |
15168 | 28 |
29 def repo_add(repo, list): | |
15224
7c604d8c7e83
largefiles: remove pre-1.9 code from extension first bundled with 1.9
Na'Tosha Bard <natosha@unity3d.com>
parents:
15206
diff
changeset
|
30 add = repo[None].add |
15168 | 31 return add(list) |
32 | |
33 def repo_remove(repo, list, unlink=False): | |
15224
7c604d8c7e83
largefiles: remove pre-1.9 code from extension first bundled with 1.9
Na'Tosha Bard <natosha@unity3d.com>
parents:
15206
diff
changeset
|
34 def remove(list, unlink): |
7c604d8c7e83
largefiles: remove pre-1.9 code from extension first bundled with 1.9
Na'Tosha Bard <natosha@unity3d.com>
parents:
15206
diff
changeset
|
35 wlock = repo.wlock() |
15168 | 36 try: |
15224
7c604d8c7e83
largefiles: remove pre-1.9 code from extension first bundled with 1.9
Na'Tosha Bard <natosha@unity3d.com>
parents:
15206
diff
changeset
|
37 if unlink: |
7c604d8c7e83
largefiles: remove pre-1.9 code from extension first bundled with 1.9
Na'Tosha Bard <natosha@unity3d.com>
parents:
15206
diff
changeset
|
38 for f in list: |
7c604d8c7e83
largefiles: remove pre-1.9 code from extension first bundled with 1.9
Na'Tosha Bard <natosha@unity3d.com>
parents:
15206
diff
changeset
|
39 try: |
7c604d8c7e83
largefiles: remove pre-1.9 code from extension first bundled with 1.9
Na'Tosha Bard <natosha@unity3d.com>
parents:
15206
diff
changeset
|
40 util.unlinkpath(repo.wjoin(f)) |
7c604d8c7e83
largefiles: remove pre-1.9 code from extension first bundled with 1.9
Na'Tosha Bard <natosha@unity3d.com>
parents:
15206
diff
changeset
|
41 except OSError, inst: |
7c604d8c7e83
largefiles: remove pre-1.9 code from extension first bundled with 1.9
Na'Tosha Bard <natosha@unity3d.com>
parents:
15206
diff
changeset
|
42 if inst.errno != errno.ENOENT: |
7c604d8c7e83
largefiles: remove pre-1.9 code from extension first bundled with 1.9
Na'Tosha Bard <natosha@unity3d.com>
parents:
15206
diff
changeset
|
43 raise |
7c604d8c7e83
largefiles: remove pre-1.9 code from extension first bundled with 1.9
Na'Tosha Bard <natosha@unity3d.com>
parents:
15206
diff
changeset
|
44 repo[None].forget(list) |
7c604d8c7e83
largefiles: remove pre-1.9 code from extension first bundled with 1.9
Na'Tosha Bard <natosha@unity3d.com>
parents:
15206
diff
changeset
|
45 finally: |
7c604d8c7e83
largefiles: remove pre-1.9 code from extension first bundled with 1.9
Na'Tosha Bard <natosha@unity3d.com>
parents:
15206
diff
changeset
|
46 wlock.release() |
15168 | 47 return remove(list, unlink=unlink) |
48 | |
49 def repo_forget(repo, list): | |
15224
7c604d8c7e83
largefiles: remove pre-1.9 code from extension first bundled with 1.9
Na'Tosha Bard <natosha@unity3d.com>
parents:
15206
diff
changeset
|
50 forget = repo[None].forget |
15168 | 51 return forget(list) |
52 | |
53 def findoutgoing(repo, remote, force): | |
15224
7c604d8c7e83
largefiles: remove pre-1.9 code from extension first bundled with 1.9
Na'Tosha Bard <natosha@unity3d.com>
parents:
15206
diff
changeset
|
54 from mercurial import discovery |
7c604d8c7e83
largefiles: remove pre-1.9 code from extension first bundled with 1.9
Na'Tosha Bard <natosha@unity3d.com>
parents:
15206
diff
changeset
|
55 common, _anyinc, _heads = discovery.findcommonincoming(repo, |
7c604d8c7e83
largefiles: remove pre-1.9 code from extension first bundled with 1.9
Na'Tosha Bard <natosha@unity3d.com>
parents:
15206
diff
changeset
|
56 remote, force=force) |
7c604d8c7e83
largefiles: remove pre-1.9 code from extension first bundled with 1.9
Na'Tosha Bard <natosha@unity3d.com>
parents:
15206
diff
changeset
|
57 return repo.changelog.findmissing(common) |
15168 | 58 |
59 # -- Private worker functions ------------------------------------------ | |
60 | |
15227
a7686abf73a6
largefiles: factor out lfutil.getminsize()
Greg Ward <greg@gerg.ca>
parents:
15226
diff
changeset
|
61 def getminsize(ui, assumelfiles, opt, default=10): |
a7686abf73a6
largefiles: factor out lfutil.getminsize()
Greg Ward <greg@gerg.ca>
parents:
15226
diff
changeset
|
62 lfsize = opt |
a7686abf73a6
largefiles: factor out lfutil.getminsize()
Greg Ward <greg@gerg.ca>
parents:
15226
diff
changeset
|
63 if not lfsize and assumelfiles: |
a7686abf73a6
largefiles: factor out lfutil.getminsize()
Greg Ward <greg@gerg.ca>
parents:
15226
diff
changeset
|
64 lfsize = ui.config(longname, 'size', default=default) |
a7686abf73a6
largefiles: factor out lfutil.getminsize()
Greg Ward <greg@gerg.ca>
parents:
15226
diff
changeset
|
65 if lfsize: |
a7686abf73a6
largefiles: factor out lfutil.getminsize()
Greg Ward <greg@gerg.ca>
parents:
15226
diff
changeset
|
66 try: |
a7686abf73a6
largefiles: factor out lfutil.getminsize()
Greg Ward <greg@gerg.ca>
parents:
15226
diff
changeset
|
67 lfsize = int(lfsize) |
a7686abf73a6
largefiles: factor out lfutil.getminsize()
Greg Ward <greg@gerg.ca>
parents:
15226
diff
changeset
|
68 except ValueError: |
a7686abf73a6
largefiles: factor out lfutil.getminsize()
Greg Ward <greg@gerg.ca>
parents:
15226
diff
changeset
|
69 raise util.Abort(_('largefiles: size must be an integer, was %s\n') |
a7686abf73a6
largefiles: factor out lfutil.getminsize()
Greg Ward <greg@gerg.ca>
parents:
15226
diff
changeset
|
70 % lfsize) |
a7686abf73a6
largefiles: factor out lfutil.getminsize()
Greg Ward <greg@gerg.ca>
parents:
15226
diff
changeset
|
71 if lfsize is None: |
a7686abf73a6
largefiles: factor out lfutil.getminsize()
Greg Ward <greg@gerg.ca>
parents:
15226
diff
changeset
|
72 raise util.Abort(_('minimum size for largefiles must be specified')) |
a7686abf73a6
largefiles: factor out lfutil.getminsize()
Greg Ward <greg@gerg.ca>
parents:
15226
diff
changeset
|
73 return lfsize |
a7686abf73a6
largefiles: factor out lfutil.getminsize()
Greg Ward <greg@gerg.ca>
parents:
15226
diff
changeset
|
74 |
15168 | 75 def link(src, dest): |
76 try: | |
15206
f85c76b16f27
largefiles: fix commit of specified file on non-windows
Na'Tosha Bard <natosha@unity3d.com>
parents:
15188
diff
changeset
|
77 util.oslink(src, dest) |
15168 | 78 except OSError: |
79 # If hardlinks fail fall back on copy | |
80 shutil.copyfile(src, dest) | |
81 os.chmod(dest, os.stat(src).st_mode) | |
82 | |
83 def systemcachepath(ui, hash): | |
84 path = ui.config(longname, 'systemcache', None) | |
85 if path: | |
86 path = os.path.join(path, hash) | |
87 else: | |
88 if os.name == 'nt': | |
89 path = os.path.join(os.getenv('LOCALAPPDATA') or \ | |
90 os.getenv('APPDATA'), longname, hash) | |
91 elif os.name == 'posix': | |
92 path = os.path.join(os.getenv('HOME'), '.' + longname, hash) | |
93 else: | |
94 raise util.Abort(_('Unknown operating system: %s\n') % os.name) | |
95 return path | |
96 | |
97 def insystemcache(ui, hash): | |
98 return os.path.exists(systemcachepath(ui, hash)) | |
99 | |
100 def findfile(repo, hash): | |
101 if incache(repo, hash): | |
102 repo.ui.note(_('Found %s in cache\n') % hash) | |
103 return cachepath(repo, hash) | |
104 if insystemcache(repo.ui, hash): | |
105 repo.ui.note(_('Found %s in system cache\n') % hash) | |
106 return systemcachepath(repo.ui, hash) | |
107 return None | |
108 | |
109 class largefiles_dirstate(dirstate.dirstate): | |
110 def __getitem__(self, key): | |
111 return super(largefiles_dirstate, self).__getitem__(unixpath(key)) | |
112 def normal(self, f): | |
113 return super(largefiles_dirstate, self).normal(unixpath(f)) | |
114 def remove(self, f): | |
115 return super(largefiles_dirstate, self).remove(unixpath(f)) | |
116 def add(self, f): | |
117 return super(largefiles_dirstate, self).add(unixpath(f)) | |
118 def drop(self, f): | |
119 return super(largefiles_dirstate, self).drop(unixpath(f)) | |
120 def forget(self, f): | |
121 return super(largefiles_dirstate, self).forget(unixpath(f)) | |
122 | |
123 def openlfdirstate(ui, repo): | |
124 ''' | |
125 Return a dirstate object that tracks big files: i.e. its root is the | |
126 repo root, but it is saved in .hg/largefiles/dirstate. | |
127 ''' | |
128 admin = repo.join(longname) | |
15224
7c604d8c7e83
largefiles: remove pre-1.9 code from extension first bundled with 1.9
Na'Tosha Bard <natosha@unity3d.com>
parents:
15206
diff
changeset
|
129 opener = scmutil.opener(admin) |
15169
aa262fff87ac
largefile: fix up hasattr usage
Matt Mackall <mpm@selenic.com>
parents:
15168
diff
changeset
|
130 if util.safehasattr(repo.dirstate, '_validate'): |
15168 | 131 lfdirstate = largefiles_dirstate(opener, ui, repo.root, |
132 repo.dirstate._validate) | |
133 else: | |
134 lfdirstate = largefiles_dirstate(opener, ui, repo.root) | |
135 | |
136 # If the largefiles dirstate does not exist, populate and create it. This | |
137 # ensures that we create it on the first meaningful largefiles operation in | |
138 # a new clone. It also gives us an easy way to forcibly rebuild largefiles | |
139 # state: | |
140 # rm .hg/largefiles/dirstate && hg status | |
141 # Or even, if things are really messed up: | |
142 # rm -rf .hg/largefiles && hg status | |
143 if not os.path.exists(os.path.join(admin, 'dirstate')): | |
144 util.makedirs(admin) | |
145 matcher = getstandinmatcher(repo) | |
146 for standin in dirstate_walk(repo.dirstate, matcher): | |
147 lfile = splitstandin(standin) | |
148 hash = readstandin(repo, lfile) | |
149 lfdirstate.normallookup(lfile) | |
150 try: | |
151 if hash == hashfile(lfile): | |
152 lfdirstate.normal(lfile) | |
153 except IOError, err: | |
154 if err.errno != errno.ENOENT: | |
155 raise | |
156 | |
157 lfdirstate.write() | |
158 | |
159 return lfdirstate | |
160 | |
161 def lfdirstate_status(lfdirstate, repo, rev): | |
162 wlock = repo.wlock() | |
163 try: | |
164 match = match_.always(repo.root, repo.getcwd()) | |
165 s = lfdirstate.status(match, [], False, False, False) | |
166 unsure, modified, added, removed, missing, unknown, ignored, clean = s | |
167 for lfile in unsure: | |
168 if repo[rev][standin(lfile)].data().strip() != \ | |
169 hashfile(repo.wjoin(lfile)): | |
170 modified.append(lfile) | |
171 else: | |
172 clean.append(lfile) | |
173 lfdirstate.normal(lfile) | |
174 lfdirstate.write() | |
175 finally: | |
176 wlock.release() | |
177 return (modified, added, removed, missing, unknown, ignored, clean) | |
178 | |
179 def listlfiles(repo, rev=None, matcher=None): | |
180 '''list largefiles in the working copy or specified changeset''' | |
181 | |
182 if matcher is None: | |
183 matcher = getstandinmatcher(repo) | |
184 | |
185 # ignore unknown files in working directory | |
186 return [splitstandin(f) for f in repo[rev].walk(matcher) \ | |
187 if rev is not None or repo.dirstate[f] != '?'] | |
188 | |
189 def incache(repo, hash): | |
190 return os.path.exists(cachepath(repo, hash)) | |
191 | |
192 def createdir(dir): | |
193 if not os.path.exists(dir): | |
194 os.makedirs(dir) | |
195 | |
196 def cachepath(repo, hash): | |
197 return repo.join(os.path.join(longname, hash)) | |
198 | |
199 def copyfromcache(repo, hash, filename): | |
200 '''copyfromcache copies the specified largefile from the repo or system | |
201 cache to the specified location in the repository. It will not throw an | |
202 exception on failure, as it is meant to be called only after ensuring that | |
203 the needed largefile exists in the cache.''' | |
204 path = findfile(repo, hash) | |
205 if path is None: | |
206 return False | |
207 util.makedirs(os.path.dirname(repo.wjoin(filename))) | |
208 shutil.copy(path, repo.wjoin(filename)) | |
209 return True | |
210 | |
211 def copytocache(repo, rev, file, uploaded=False): | |
212 hash = readstandin(repo, file) | |
213 if incache(repo, hash): | |
214 return | |
215 copytocacheabsolute(repo, repo.wjoin(file), hash) | |
216 | |
217 def copytocacheabsolute(repo, file, hash): | |
218 createdir(os.path.dirname(cachepath(repo, hash))) | |
219 if insystemcache(repo.ui, hash): | |
220 link(systemcachepath(repo.ui, hash), cachepath(repo, hash)) | |
221 else: | |
222 shutil.copyfile(file, cachepath(repo, hash)) | |
223 os.chmod(cachepath(repo, hash), os.stat(file).st_mode) | |
224 linktosystemcache(repo, hash) | |
225 | |
226 def linktosystemcache(repo, hash): | |
227 createdir(os.path.dirname(systemcachepath(repo.ui, hash))) | |
228 link(cachepath(repo, hash), systemcachepath(repo.ui, hash)) | |
229 | |
230 def getstandinmatcher(repo, pats=[], opts={}): | |
231 '''Return a match object that applies pats to the standin directory''' | |
232 standindir = repo.pathto(shortname) | |
233 if pats: | |
234 # patterns supplied: search standin directory relative to current dir | |
235 cwd = repo.getcwd() | |
236 if os.path.isabs(cwd): | |
237 # cwd is an absolute path for hg -R <reponame> | |
238 # work relative to the repository root in this case | |
239 cwd = '' | |
240 pats = [os.path.join(standindir, cwd, pat) for pat in pats] | |
241 elif os.path.isdir(standindir): | |
242 # no patterns: relative to repo root | |
243 pats = [standindir] | |
244 else: | |
245 # no patterns and no standin dir: return matcher that matches nothing | |
246 match = match_.match(repo.root, None, [], exact=True) | |
247 match.matchfn = lambda f: False | |
248 return match | |
249 return getmatcher(repo, pats, opts, showbad=False) | |
250 | |
251 def getmatcher(repo, pats=[], opts={}, showbad=True): | |
252 '''Wrapper around scmutil.match() that adds showbad: if false, neuter | |
253 the match object\'s bad() method so it does not print any warnings | |
254 about missing files or directories.''' | |
15224
7c604d8c7e83
largefiles: remove pre-1.9 code from extension first bundled with 1.9
Na'Tosha Bard <natosha@unity3d.com>
parents:
15206
diff
changeset
|
255 match = scmutil.match(repo[None], pats, opts) |
15168 | 256 |
257 if not showbad: | |
258 match.bad = lambda f, msg: None | |
259 return match | |
260 | |
261 def composestandinmatcher(repo, rmatcher): | |
262 '''Return a matcher that accepts standins corresponding to the files | |
263 accepted by rmatcher. Pass the list of files in the matcher as the | |
264 paths specified by the user.''' | |
265 smatcher = getstandinmatcher(repo, rmatcher.files()) | |
266 isstandin = smatcher.matchfn | |
267 def composed_matchfn(f): | |
268 return isstandin(f) and rmatcher.matchfn(splitstandin(f)) | |
269 smatcher.matchfn = composed_matchfn | |
270 | |
271 return smatcher | |
272 | |
273 def standin(filename): | |
274 '''Return the repo-relative path to the standin for the specified big | |
275 file.''' | |
276 # Notes: | |
277 # 1) Most callers want an absolute path, but _create_standin() needs | |
278 # it repo-relative so lfadd() can pass it to repo_add(). So leave | |
279 # it up to the caller to use repo.wjoin() to get an absolute path. | |
280 # 2) Join with '/' because that's what dirstate always uses, even on | |
281 # Windows. Change existing separator to '/' first in case we are | |
282 # passed filenames from an external source (like the command line). | |
283 return shortname + '/' + filename.replace(os.sep, '/') | |
284 | |
285 def isstandin(filename): | |
286 '''Return true if filename is a big file standin. filename must | |
287 be in Mercurial\'s internal form (slash-separated).''' | |
288 return filename.startswith(shortname + '/') | |
289 | |
290 def splitstandin(filename): | |
291 # Split on / because that's what dirstate always uses, even on Windows. | |
292 # Change local separator to / first just in case we are passed filenames | |
293 # from an external source (like the command line). | |
294 bits = filename.replace(os.sep, '/').split('/', 1) | |
295 if len(bits) == 2 and bits[0] == shortname: | |
296 return bits[1] | |
297 else: | |
298 return None | |
299 | |
300 def updatestandin(repo, standin): | |
301 file = repo.wjoin(splitstandin(standin)) | |
302 if os.path.exists(file): | |
303 hash = hashfile(file) | |
304 executable = getexecutable(file) | |
305 writestandin(repo, standin, hash, executable) | |
306 | |
307 def readstandin(repo, filename, node=None): | |
308 '''read hex hash from standin for filename at given node, or working | |
309 directory if no node is given''' | |
310 return repo[node][standin(filename)].data().strip() | |
311 | |
312 def writestandin(repo, standin, hash, executable): | |
313 '''write hhash to <repo.root>/<standin>''' | |
314 writehash(hash, repo.wjoin(standin), executable) | |
315 | |
316 def copyandhash(instream, outfile): | |
317 '''Read bytes from instream (iterable) and write them to outfile, | |
318 computing the SHA-1 hash of the data along the way. Close outfile | |
319 when done and return the binary hash.''' | |
320 hasher = util.sha1('') | |
321 for data in instream: | |
322 hasher.update(data) | |
323 outfile.write(data) | |
324 | |
325 # Blecch: closing a file that somebody else opened is rude and | |
326 # wrong. But it's so darn convenient and practical! After all, | |
327 # outfile was opened just to copy and hash. | |
328 outfile.close() | |
329 | |
330 return hasher.digest() | |
331 | |
332 def hashrepofile(repo, file): | |
333 return hashfile(repo.wjoin(file)) | |
334 | |
335 def hashfile(file): | |
336 if not os.path.exists(file): | |
337 return '' | |
338 hasher = util.sha1('') | |
339 fd = open(file, 'rb') | |
340 for data in blockstream(fd): | |
341 hasher.update(data) | |
342 fd.close() | |
343 return hasher.hexdigest() | |
344 | |
345 class limitreader(object): | |
346 def __init__(self, f, limit): | |
347 self.f = f | |
348 self.limit = limit | |
349 | |
350 def read(self, length): | |
351 if self.limit == 0: | |
352 return '' | |
353 length = length > self.limit and self.limit or length | |
354 self.limit -= length | |
355 return self.f.read(length) | |
356 | |
357 def close(self): | |
358 pass | |
359 | |
360 def blockstream(infile, blocksize=128 * 1024): | |
361 """Generator that yields blocks of data from infile and closes infile.""" | |
362 while True: | |
363 data = infile.read(blocksize) | |
364 if not data: | |
365 break | |
366 yield data | |
367 # Same blecch as above. | |
368 infile.close() | |
369 | |
370 def readhash(filename): | |
371 rfile = open(filename, 'rb') | |
372 hash = rfile.read(40) | |
373 rfile.close() | |
374 if len(hash) < 40: | |
375 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)') | |
376 % (filename, len(hash))) | |
377 return hash | |
378 | |
379 def writehash(hash, filename, executable): | |
380 util.makedirs(os.path.dirname(filename)) | |
381 if os.path.exists(filename): | |
382 os.unlink(filename) | |
383 wfile = open(filename, 'wb') | |
384 | |
385 try: | |
386 wfile.write(hash) | |
387 wfile.write('\n') | |
388 finally: | |
389 wfile.close() | |
390 if os.path.exists(filename): | |
391 os.chmod(filename, getmode(executable)) | |
392 | |
393 def getexecutable(filename): | |
394 mode = os.stat(filename).st_mode | |
395 return (mode & stat.S_IXUSR) and (mode & stat.S_IXGRP) and (mode & \ | |
396 stat.S_IXOTH) | |
397 | |
398 def getmode(executable): | |
399 if executable: | |
400 return 0755 | |
401 else: | |
402 return 0644 | |
403 | |
404 def urljoin(first, second, *arg): | |
405 def join(left, right): | |
406 if not left.endswith('/'): | |
407 left += '/' | |
408 if right.startswith('/'): | |
409 right = right[1:] | |
410 return left + right | |
411 | |
412 url = join(first, second) | |
413 for a in arg: | |
414 url = join(url, a) | |
415 return url | |
416 | |
417 def hexsha1(data): | |
418 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like | |
419 object data""" | |
420 h = hashlib.sha1() | |
421 for chunk in util.filechunkiter(data): | |
422 h.update(chunk) | |
423 return h.hexdigest() | |
424 | |
425 def httpsendfile(ui, filename): | |
15224
7c604d8c7e83
largefiles: remove pre-1.9 code from extension first bundled with 1.9
Na'Tosha Bard <natosha@unity3d.com>
parents:
15206
diff
changeset
|
426 return httpconnection.httpsendfile(ui, filename, 'rb') |
15168 | 427 |
428 # Convert a path to a unix style path. This is used to give a | |
429 # canonical path to the lfdirstate. | |
430 def unixpath(path): | |
431 return os.path.normpath(path).replace(os.sep, '/') | |
432 | |
433 def islfilesrepo(repo): | |
15170
c1a4a3220711
largefiles: fix over-long lines
Matt Mackall <mpm@selenic.com>
parents:
15169
diff
changeset
|
434 return ('largefiles' in repo.requirements and |
15188
8e115063950d
largefiles: don't break existing tests (syntax error, bad imports)
Greg Ward <greg@gerg.ca>
parents:
15171
diff
changeset
|
435 any_(shortname + '/' in f[0] for f in repo.store.datafiles())) |
15168 | 436 |
437 def any_(gen): | |
438 for x in gen: | |
439 if x: | |
440 return True | |
441 return False | |
442 | |
443 class storeprotonotcapable(BaseException): | |
444 def __init__(self, storetypes): | |
445 self.storetypes = storetypes |