Mercurial > hg
annotate hgext/largefiles/lfutil.py @ 15171:547da6115d1d
largefiles: eliminate naked exceptions
author | Matt Mackall <mpm@selenic.com> |
---|---|
date | Thu, 29 Sep 2011 17:14:47 -0500 |
parents | c1a4a3220711 |
children | 8e115063950d |
rev | line source |
---|---|
15168 | 1 # Copyright 2009-2010 Gregory P. Ward |
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated | |
3 # Copyright 2010-2011 Fog Creek Software | |
4 # Copyright 2010-2011 Unity Technologies | |
5 # | |
6 # This software may be used and distributed according to the terms of the | |
7 # GNU General Public License version 2 or any later version. | |
8 | |
9 '''largefiles utility code: must not import other modules in this package.''' | |
10 | |
11 import os | |
12 import errno | |
13 import inspect | |
14 import shutil | |
15 import stat | |
16 import hashlib | |
17 | |
18 from mercurial import cmdutil, dirstate, httpconnection, match as match_, \ | |
19 url as url_, util | |
20 from mercurial.i18n import _ | |
21 | |
22 try: | |
23 from mercurial import scmutil | |
24 except ImportError: | |
25 pass | |
26 | |
27 shortname = '.hglf' | |
28 longname = 'largefiles' | |
29 | |
30 | |
31 # -- Portability wrappers ---------------------------------------------- | |
32 | |
33 if 'subrepos' in inspect.getargspec(dirstate.dirstate.status)[0]: | |
34 # for Mercurial >= 1.5 | |
35 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False): | |
36 return dirstate.walk(matcher, [], unknown, ignored) | |
37 else: | |
38 # for Mercurial <= 1.4 | |
39 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False): | |
40 return dirstate.walk(matcher, unknown, ignored) | |
41 | |
42 def repo_add(repo, list): | |
43 try: | |
44 # Mercurial <= 1.5 | |
45 add = repo.add | |
46 except AttributeError: | |
47 # Mercurial >= 1.6 | |
48 add = repo[None].add | |
49 return add(list) | |
50 | |
51 def repo_remove(repo, list, unlink=False): | |
52 try: | |
53 # Mercurial <= 1.5 | |
54 remove = repo.remove | |
55 except AttributeError: | |
56 # Mercurial >= 1.6 | |
57 try: | |
58 # Mercurial <= 1.8 | |
59 remove = repo[None].remove | |
60 except AttributeError: | |
61 # Mercurial >= 1.9 | |
62 def remove(list, unlink): | |
63 wlock = repo.wlock() | |
64 try: | |
65 if unlink: | |
66 for f in list: | |
67 try: | |
68 util.unlinkpath(repo.wjoin(f)) | |
69 except OSError, inst: | |
70 if inst.errno != errno.ENOENT: | |
71 raise | |
72 repo[None].forget(list) | |
73 finally: | |
74 wlock.release() | |
75 | |
76 return remove(list, unlink=unlink) | |
77 | |
78 def repo_forget(repo, list): | |
79 try: | |
80 # Mercurial <= 1.5 | |
81 forget = repo.forget | |
82 except AttributeError: | |
83 # Mercurial >= 1.6 | |
84 forget = repo[None].forget | |
85 return forget(list) | |
86 | |
87 def findoutgoing(repo, remote, force): | |
88 # First attempt is for Mercurial <= 1.5 second is for >= 1.6 | |
89 try: | |
90 return repo.findoutgoing(remote) | |
91 except AttributeError: | |
92 from mercurial import discovery | |
93 try: | |
94 # Mercurial <= 1.8 | |
95 return discovery.findoutgoing(repo, remote, force=force) | |
96 except AttributeError: | |
97 # Mercurial >= 1.9 | |
98 common, _anyinc, _heads = discovery.findcommonincoming(repo, | |
99 remote, force=force) | |
100 return repo.changelog.findmissing(common) | |
101 | |
102 # -- Private worker functions ------------------------------------------ | |
103 | |
104 if os.name == 'nt': | |
105 from mercurial import win32 | |
15171
547da6115d1d
largefiles: eliminate naked exceptions
Matt Mackall <mpm@selenic.com>
parents:
15170
diff
changeset
|
106 linkfn = win32.oslink |
15168 | 107 |
108 def link(src, dest): | |
109 try: | |
110 linkfn(src, dest) | |
111 except OSError: | |
112 # If hardlinks fail fall back on copy | |
113 shutil.copyfile(src, dest) | |
114 os.chmod(dest, os.stat(src).st_mode) | |
115 | |
116 def systemcachepath(ui, hash): | |
117 path = ui.config(longname, 'systemcache', None) | |
118 if path: | |
119 path = os.path.join(path, hash) | |
120 else: | |
121 if os.name == 'nt': | |
122 path = os.path.join(os.getenv('LOCALAPPDATA') or \ | |
123 os.getenv('APPDATA'), longname, hash) | |
124 elif os.name == 'posix': | |
125 path = os.path.join(os.getenv('HOME'), '.' + longname, hash) | |
126 else: | |
127 raise util.Abort(_('Unknown operating system: %s\n') % os.name) | |
128 return path | |
129 | |
130 def insystemcache(ui, hash): | |
131 return os.path.exists(systemcachepath(ui, hash)) | |
132 | |
133 def findfile(repo, hash): | |
134 if incache(repo, hash): | |
135 repo.ui.note(_('Found %s in cache\n') % hash) | |
136 return cachepath(repo, hash) | |
137 if insystemcache(repo.ui, hash): | |
138 repo.ui.note(_('Found %s in system cache\n') % hash) | |
139 return systemcachepath(repo.ui, hash) | |
140 return None | |
141 | |
142 class largefiles_dirstate(dirstate.dirstate): | |
143 def __getitem__(self, key): | |
144 return super(largefiles_dirstate, self).__getitem__(unixpath(key)) | |
145 def normal(self, f): | |
146 return super(largefiles_dirstate, self).normal(unixpath(f)) | |
147 def remove(self, f): | |
148 return super(largefiles_dirstate, self).remove(unixpath(f)) | |
149 def add(self, f): | |
150 return super(largefiles_dirstate, self).add(unixpath(f)) | |
151 def drop(self, f): | |
152 return super(largefiles_dirstate, self).drop(unixpath(f)) | |
153 def forget(self, f): | |
154 return super(largefiles_dirstate, self).forget(unixpath(f)) | |
155 | |
156 def openlfdirstate(ui, repo): | |
157 ''' | |
158 Return a dirstate object that tracks big files: i.e. its root is the | |
159 repo root, but it is saved in .hg/largefiles/dirstate. | |
160 ''' | |
161 admin = repo.join(longname) | |
162 try: | |
163 # Mercurial >= 1.9 | |
164 opener = scmutil.opener(admin) | |
165 except ImportError: | |
166 # Mercurial <= 1.8 | |
167 opener = util.opener(admin) | |
15169
aa262fff87ac
largefile: fix up hasattr usage
Matt Mackall <mpm@selenic.com>
parents:
15168
diff
changeset
|
168 if util.safehasattr(repo.dirstate, '_validate'): |
15168 | 169 lfdirstate = largefiles_dirstate(opener, ui, repo.root, |
170 repo.dirstate._validate) | |
171 else: | |
172 lfdirstate = largefiles_dirstate(opener, ui, repo.root) | |
173 | |
174 # If the largefiles dirstate does not exist, populate and create it. This | |
175 # ensures that we create it on the first meaningful largefiles operation in | |
176 # a new clone. It also gives us an easy way to forcibly rebuild largefiles | |
177 # state: | |
178 # rm .hg/largefiles/dirstate && hg status | |
179 # Or even, if things are really messed up: | |
180 # rm -rf .hg/largefiles && hg status | |
181 if not os.path.exists(os.path.join(admin, 'dirstate')): | |
182 util.makedirs(admin) | |
183 matcher = getstandinmatcher(repo) | |
184 for standin in dirstate_walk(repo.dirstate, matcher): | |
185 lfile = splitstandin(standin) | |
186 hash = readstandin(repo, lfile) | |
187 lfdirstate.normallookup(lfile) | |
188 try: | |
189 if hash == hashfile(lfile): | |
190 lfdirstate.normal(lfile) | |
191 except IOError, err: | |
192 if err.errno != errno.ENOENT: | |
193 raise | |
194 | |
195 lfdirstate.write() | |
196 | |
197 return lfdirstate | |
198 | |
199 def lfdirstate_status(lfdirstate, repo, rev): | |
200 wlock = repo.wlock() | |
201 try: | |
202 match = match_.always(repo.root, repo.getcwd()) | |
203 s = lfdirstate.status(match, [], False, False, False) | |
204 unsure, modified, added, removed, missing, unknown, ignored, clean = s | |
205 for lfile in unsure: | |
206 if repo[rev][standin(lfile)].data().strip() != \ | |
207 hashfile(repo.wjoin(lfile)): | |
208 modified.append(lfile) | |
209 else: | |
210 clean.append(lfile) | |
211 lfdirstate.normal(lfile) | |
212 lfdirstate.write() | |
213 finally: | |
214 wlock.release() | |
215 return (modified, added, removed, missing, unknown, ignored, clean) | |
216 | |
217 def listlfiles(repo, rev=None, matcher=None): | |
218 '''list largefiles in the working copy or specified changeset''' | |
219 | |
220 if matcher is None: | |
221 matcher = getstandinmatcher(repo) | |
222 | |
223 # ignore unknown files in working directory | |
224 return [splitstandin(f) for f in repo[rev].walk(matcher) \ | |
225 if rev is not None or repo.dirstate[f] != '?'] | |
226 | |
227 def incache(repo, hash): | |
228 return os.path.exists(cachepath(repo, hash)) | |
229 | |
230 def createdir(dir): | |
231 if not os.path.exists(dir): | |
232 os.makedirs(dir) | |
233 | |
234 def cachepath(repo, hash): | |
235 return repo.join(os.path.join(longname, hash)) | |
236 | |
237 def copyfromcache(repo, hash, filename): | |
238 '''copyfromcache copies the specified largefile from the repo or system | |
239 cache to the specified location in the repository. It will not throw an | |
240 exception on failure, as it is meant to be called only after ensuring that | |
241 the needed largefile exists in the cache.''' | |
242 path = findfile(repo, hash) | |
243 if path is None: | |
244 return False | |
245 util.makedirs(os.path.dirname(repo.wjoin(filename))) | |
246 shutil.copy(path, repo.wjoin(filename)) | |
247 return True | |
248 | |
249 def copytocache(repo, rev, file, uploaded=False): | |
250 hash = readstandin(repo, file) | |
251 if incache(repo, hash): | |
252 return | |
253 copytocacheabsolute(repo, repo.wjoin(file), hash) | |
254 | |
255 def copytocacheabsolute(repo, file, hash): | |
256 createdir(os.path.dirname(cachepath(repo, hash))) | |
257 if insystemcache(repo.ui, hash): | |
258 link(systemcachepath(repo.ui, hash), cachepath(repo, hash)) | |
259 else: | |
260 shutil.copyfile(file, cachepath(repo, hash)) | |
261 os.chmod(cachepath(repo, hash), os.stat(file).st_mode) | |
262 linktosystemcache(repo, hash) | |
263 | |
264 def linktosystemcache(repo, hash): | |
265 createdir(os.path.dirname(systemcachepath(repo.ui, hash))) | |
266 link(cachepath(repo, hash), systemcachepath(repo.ui, hash)) | |
267 | |
268 def getstandinmatcher(repo, pats=[], opts={}): | |
269 '''Return a match object that applies pats to the standin directory''' | |
270 standindir = repo.pathto(shortname) | |
271 if pats: | |
272 # patterns supplied: search standin directory relative to current dir | |
273 cwd = repo.getcwd() | |
274 if os.path.isabs(cwd): | |
275 # cwd is an absolute path for hg -R <reponame> | |
276 # work relative to the repository root in this case | |
277 cwd = '' | |
278 pats = [os.path.join(standindir, cwd, pat) for pat in pats] | |
279 elif os.path.isdir(standindir): | |
280 # no patterns: relative to repo root | |
281 pats = [standindir] | |
282 else: | |
283 # no patterns and no standin dir: return matcher that matches nothing | |
284 match = match_.match(repo.root, None, [], exact=True) | |
285 match.matchfn = lambda f: False | |
286 return match | |
287 return getmatcher(repo, pats, opts, showbad=False) | |
288 | |
289 def getmatcher(repo, pats=[], opts={}, showbad=True): | |
290 '''Wrapper around scmutil.match() that adds showbad: if false, neuter | |
291 the match object\'s bad() method so it does not print any warnings | |
292 about missing files or directories.''' | |
293 try: | |
294 # Mercurial >= 1.9 | |
295 match = scmutil.match(repo[None], pats, opts) | |
296 except ImportError: | |
297 # Mercurial <= 1.8 | |
298 match = cmdutil.match(repo, pats, opts) | |
299 | |
300 if not showbad: | |
301 match.bad = lambda f, msg: None | |
302 return match | |
303 | |
304 def composestandinmatcher(repo, rmatcher): | |
305 '''Return a matcher that accepts standins corresponding to the files | |
306 accepted by rmatcher. Pass the list of files in the matcher as the | |
307 paths specified by the user.''' | |
308 smatcher = getstandinmatcher(repo, rmatcher.files()) | |
309 isstandin = smatcher.matchfn | |
310 def composed_matchfn(f): | |
311 return isstandin(f) and rmatcher.matchfn(splitstandin(f)) | |
312 smatcher.matchfn = composed_matchfn | |
313 | |
314 return smatcher | |
315 | |
316 def standin(filename): | |
317 '''Return the repo-relative path to the standin for the specified big | |
318 file.''' | |
319 # Notes: | |
320 # 1) Most callers want an absolute path, but _create_standin() needs | |
321 # it repo-relative so lfadd() can pass it to repo_add(). So leave | |
322 # it up to the caller to use repo.wjoin() to get an absolute path. | |
323 # 2) Join with '/' because that's what dirstate always uses, even on | |
324 # Windows. Change existing separator to '/' first in case we are | |
325 # passed filenames from an external source (like the command line). | |
326 return shortname + '/' + filename.replace(os.sep, '/') | |
327 | |
328 def isstandin(filename): | |
329 '''Return true if filename is a big file standin. filename must | |
330 be in Mercurial\'s internal form (slash-separated).''' | |
331 return filename.startswith(shortname + '/') | |
332 | |
333 def splitstandin(filename): | |
334 # Split on / because that's what dirstate always uses, even on Windows. | |
335 # Change local separator to / first just in case we are passed filenames | |
336 # from an external source (like the command line). | |
337 bits = filename.replace(os.sep, '/').split('/', 1) | |
338 if len(bits) == 2 and bits[0] == shortname: | |
339 return bits[1] | |
340 else: | |
341 return None | |
342 | |
343 def updatestandin(repo, standin): | |
344 file = repo.wjoin(splitstandin(standin)) | |
345 if os.path.exists(file): | |
346 hash = hashfile(file) | |
347 executable = getexecutable(file) | |
348 writestandin(repo, standin, hash, executable) | |
349 | |
350 def readstandin(repo, filename, node=None): | |
351 '''read hex hash from standin for filename at given node, or working | |
352 directory if no node is given''' | |
353 return repo[node][standin(filename)].data().strip() | |
354 | |
355 def writestandin(repo, standin, hash, executable): | |
356 '''write hhash to <repo.root>/<standin>''' | |
357 writehash(hash, repo.wjoin(standin), executable) | |
358 | |
359 def copyandhash(instream, outfile): | |
360 '''Read bytes from instream (iterable) and write them to outfile, | |
361 computing the SHA-1 hash of the data along the way. Close outfile | |
362 when done and return the binary hash.''' | |
363 hasher = util.sha1('') | |
364 for data in instream: | |
365 hasher.update(data) | |
366 outfile.write(data) | |
367 | |
368 # Blecch: closing a file that somebody else opened is rude and | |
369 # wrong. But it's so darn convenient and practical! After all, | |
370 # outfile was opened just to copy and hash. | |
371 outfile.close() | |
372 | |
373 return hasher.digest() | |
374 | |
375 def hashrepofile(repo, file): | |
376 return hashfile(repo.wjoin(file)) | |
377 | |
378 def hashfile(file): | |
379 if not os.path.exists(file): | |
380 return '' | |
381 hasher = util.sha1('') | |
382 fd = open(file, 'rb') | |
383 for data in blockstream(fd): | |
384 hasher.update(data) | |
385 fd.close() | |
386 return hasher.hexdigest() | |
387 | |
388 class limitreader(object): | |
389 def __init__(self, f, limit): | |
390 self.f = f | |
391 self.limit = limit | |
392 | |
393 def read(self, length): | |
394 if self.limit == 0: | |
395 return '' | |
396 length = length > self.limit and self.limit or length | |
397 self.limit -= length | |
398 return self.f.read(length) | |
399 | |
400 def close(self): | |
401 pass | |
402 | |
403 def blockstream(infile, blocksize=128 * 1024): | |
404 """Generator that yields blocks of data from infile and closes infile.""" | |
405 while True: | |
406 data = infile.read(blocksize) | |
407 if not data: | |
408 break | |
409 yield data | |
410 # Same blecch as above. | |
411 infile.close() | |
412 | |
413 def readhash(filename): | |
414 rfile = open(filename, 'rb') | |
415 hash = rfile.read(40) | |
416 rfile.close() | |
417 if len(hash) < 40: | |
418 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)') | |
419 % (filename, len(hash))) | |
420 return hash | |
421 | |
422 def writehash(hash, filename, executable): | |
423 util.makedirs(os.path.dirname(filename)) | |
424 if os.path.exists(filename): | |
425 os.unlink(filename) | |
426 wfile = open(filename, 'wb') | |
427 | |
428 try: | |
429 wfile.write(hash) | |
430 wfile.write('\n') | |
431 finally: | |
432 wfile.close() | |
433 if os.path.exists(filename): | |
434 os.chmod(filename, getmode(executable)) | |
435 | |
436 def getexecutable(filename): | |
437 mode = os.stat(filename).st_mode | |
438 return (mode & stat.S_IXUSR) and (mode & stat.S_IXGRP) and (mode & \ | |
439 stat.S_IXOTH) | |
440 | |
441 def getmode(executable): | |
442 if executable: | |
443 return 0755 | |
444 else: | |
445 return 0644 | |
446 | |
447 def urljoin(first, second, *arg): | |
448 def join(left, right): | |
449 if not left.endswith('/'): | |
450 left += '/' | |
451 if right.startswith('/'): | |
452 right = right[1:] | |
453 return left + right | |
454 | |
455 url = join(first, second) | |
456 for a in arg: | |
457 url = join(url, a) | |
458 return url | |
459 | |
460 def hexsha1(data): | |
461 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like | |
462 object data""" | |
463 h = hashlib.sha1() | |
464 for chunk in util.filechunkiter(data): | |
465 h.update(chunk) | |
466 return h.hexdigest() | |
467 | |
468 def httpsendfile(ui, filename): | |
469 try: | |
470 # Mercurial >= 1.9 | |
471 return httpconnection.httpsendfile(ui, filename, 'rb') | |
472 except ImportError: | |
473 if 'ui' in inspect.getargspec(url_.httpsendfile.__init__)[0]: | |
474 # Mercurial == 1.8 | |
475 return url_.httpsendfile(ui, filename, 'rb') | |
476 else: | |
477 # Mercurial <= 1.7 | |
478 return url_.httpsendfile(filename, 'rb') | |
479 | |
480 # Convert a path to a unix style path. This is used to give a | |
481 # canonical path to the lfdirstate. | |
482 def unixpath(path): | |
483 return os.path.normpath(path).replace(os.sep, '/') | |
484 | |
485 def islfilesrepo(repo): | |
15170
c1a4a3220711
largefiles: fix over-long lines
Matt Mackall <mpm@selenic.com>
parents:
15169
diff
changeset
|
486 return ('largefiles' in repo.requirements and |
c1a4a3220711
largefiles: fix over-long lines
Matt Mackall <mpm@selenic.com>
parents:
15169
diff
changeset
|
487 any_(shortname + '/' in f[0] for f in repo.store.datafiles()) |
15168 | 488 |
489 def any_(gen): | |
490 for x in gen: | |
491 if x: | |
492 return True | |
493 return False | |
494 | |
495 class storeprotonotcapable(BaseException): | |
496 def __init__(self, storetypes): | |
497 self.storetypes = storetypes |