74 |
74 |
75 def link(src, dest): |
75 def link(src, dest): |
76 try: |
76 try: |
77 util.oslink(src, dest) |
77 util.oslink(src, dest) |
78 except OSError: |
78 except OSError: |
79 # If hardlinks fail fall back on copy |
79 # if hardlinks fail, fallback on copy |
80 shutil.copyfile(src, dest) |
80 shutil.copyfile(src, dest) |
81 os.chmod(dest, os.stat(src).st_mode) |
81 os.chmod(dest, os.stat(src).st_mode) |
82 |
82 |
83 def systemcachepath(ui, hash): |
83 def systemcachepath(ui, hash): |
84 path = ui.config(longname, 'systemcache', None) |
84 path = ui.config(longname, 'systemcache', None) |
120 def forget(self, f): |
120 def forget(self, f): |
121 return super(largefiles_dirstate, self).forget(unixpath(f)) |
121 return super(largefiles_dirstate, self).forget(unixpath(f)) |
122 |
122 |
123 def openlfdirstate(ui, repo): |
123 def openlfdirstate(ui, repo): |
124 ''' |
124 ''' |
125 Return a dirstate object that tracks big files: i.e. its root is the |
125 Return a dirstate object that tracks largefiles: i.e. its root is |
126 repo root, but it is saved in .hg/largefiles/dirstate. |
126 the repo root, but it is saved in .hg/largefiles/dirstate. |
127 ''' |
127 ''' |
128 admin = repo.join(longname) |
128 admin = repo.join(longname) |
129 opener = scmutil.opener(admin) |
129 opener = scmutil.opener(admin) |
130 if util.safehasattr(repo.dirstate, '_validate'): |
130 if util.safehasattr(repo.dirstate, '_validate'): |
131 lfdirstate = largefiles_dirstate(opener, ui, repo.root, |
131 lfdirstate = largefiles_dirstate(opener, ui, repo.root, |
132 repo.dirstate._validate) |
132 repo.dirstate._validate) |
133 else: |
133 else: |
134 lfdirstate = largefiles_dirstate(opener, ui, repo.root) |
134 lfdirstate = largefiles_dirstate(opener, ui, repo.root) |
135 |
135 |
136 # If the largefiles dirstate does not exist, populate and create it. This |
136 # If the largefiles dirstate does not exist, populate and create |
137 # ensures that we create it on the first meaningful largefiles operation in |
137 # it. This ensures that we create it on the first meaningful |
138 # a new clone. It also gives us an easy way to forcibly rebuild largefiles |
138 # largefiles operation in a new clone. It also gives us an easy |
139 # state: |
139 # way to forcibly rebuild largefiles state: |
140 # rm .hg/largefiles/dirstate && hg status |
140 # rm .hg/largefiles/dirstate && hg status |
141 # Or even, if things are really messed up: |
141 # Or even, if things are really messed up: |
142 # rm -rf .hg/largefiles && hg status |
142 # rm -rf .hg/largefiles && hg status |
143 if not os.path.exists(os.path.join(admin, 'dirstate')): |
143 if not os.path.exists(os.path.join(admin, 'dirstate')): |
144 util.makedirs(admin) |
144 util.makedirs(admin) |
175 finally: |
175 finally: |
176 wlock.release() |
176 wlock.release() |
177 return (modified, added, removed, missing, unknown, ignored, clean) |
177 return (modified, added, removed, missing, unknown, ignored, clean) |
178 |
178 |
179 def listlfiles(repo, rev=None, matcher=None): |
179 def listlfiles(repo, rev=None, matcher=None): |
180 '''list largefiles in the working copy or specified changeset''' |
180 '''return a list of largefiles in the working copy or the |
|
181 specified changeset''' |
181 |
182 |
182 if matcher is None: |
183 if matcher is None: |
183 matcher = getstandinmatcher(repo) |
184 matcher = getstandinmatcher(repo) |
184 |
185 |
185 # ignore unknown files in working directory |
186 # ignore unknown files in working directory |
195 |
196 |
196 def cachepath(repo, hash): |
197 def cachepath(repo, hash): |
197 return repo.join(os.path.join(longname, hash)) |
198 return repo.join(os.path.join(longname, hash)) |
198 |
199 |
199 def copyfromcache(repo, hash, filename): |
200 def copyfromcache(repo, hash, filename): |
200 '''copyfromcache copies the specified largefile from the repo or system |
201 '''Copy the specified largefile from the repo or system cache to |
201 cache to the specified location in the repository. It will not throw an |
202 filename in the repository. Return true on success or false if the |
202 exception on failure, as it is meant to be called only after ensuring that |
203 file was not found in either cache (which should not happened: |
203 the needed largefile exists in the cache.''' |
204 this is meant to be called only after ensuring that the needed |
|
205 largefile exists in the cache).''' |
204 path = findfile(repo, hash) |
206 path = findfile(repo, hash) |
205 if path is None: |
207 if path is None: |
206 return False |
208 return False |
207 util.makedirs(os.path.dirname(repo.wjoin(filename))) |
209 util.makedirs(os.path.dirname(repo.wjoin(filename))) |
208 shutil.copy(path, repo.wjoin(filename)) |
210 shutil.copy(path, repo.wjoin(filename)) |
247 match.matchfn = lambda f: False |
249 match.matchfn = lambda f: False |
248 return match |
250 return match |
249 return getmatcher(repo, pats, opts, showbad=False) |
251 return getmatcher(repo, pats, opts, showbad=False) |
250 |
252 |
251 def getmatcher(repo, pats=[], opts={}, showbad=True): |
253 def getmatcher(repo, pats=[], opts={}, showbad=True): |
252 '''Wrapper around scmutil.match() that adds showbad: if false, neuter |
254 '''Wrapper around scmutil.match() that adds showbad: if false, |
253 the match object\'s bad() method so it does not print any warnings |
255 neuter the match object's bad() method so it does not print any |
254 about missing files or directories.''' |
256 warnings about missing files or directories.''' |
255 match = scmutil.match(repo[None], pats, opts) |
257 match = scmutil.match(repo[None], pats, opts) |
256 |
258 |
257 if not showbad: |
259 if not showbad: |
258 match.bad = lambda f, msg: None |
260 match.bad = lambda f, msg: None |
259 return match |
261 return match |
260 |
262 |
261 def composestandinmatcher(repo, rmatcher): |
263 def composestandinmatcher(repo, rmatcher): |
262 '''Return a matcher that accepts standins corresponding to the files |
264 '''Return a matcher that accepts standins corresponding to the |
263 accepted by rmatcher. Pass the list of files in the matcher as the |
265 files accepted by rmatcher. Pass the list of files in the matcher |
264 paths specified by the user.''' |
266 as the paths specified by the user.''' |
265 smatcher = getstandinmatcher(repo, rmatcher.files()) |
267 smatcher = getstandinmatcher(repo, rmatcher.files()) |
266 isstandin = smatcher.matchfn |
268 isstandin = smatcher.matchfn |
267 def composed_matchfn(f): |
269 def composed_matchfn(f): |
268 return isstandin(f) and rmatcher.matchfn(splitstandin(f)) |
270 return isstandin(f) and rmatcher.matchfn(splitstandin(f)) |
269 smatcher.matchfn = composed_matchfn |
271 smatcher.matchfn = composed_matchfn |
281 # Windows. Change existing separator to '/' first in case we are |
283 # Windows. Change existing separator to '/' first in case we are |
282 # passed filenames from an external source (like the command line). |
284 # passed filenames from an external source (like the command line). |
283 return shortname + '/' + filename.replace(os.sep, '/') |
285 return shortname + '/' + filename.replace(os.sep, '/') |
284 |
286 |
285 def isstandin(filename): |
287 def isstandin(filename): |
286 '''Return true if filename is a big file standin. filename must |
288 '''Return true if filename is a big file standin. filename must be |
287 be in Mercurial\'s internal form (slash-separated).''' |
289 in Mercurial's internal form (slash-separated).''' |
288 return filename.startswith(shortname + '/') |
290 return filename.startswith(shortname + '/') |
289 |
291 |
290 def splitstandin(filename): |
292 def splitstandin(filename): |
291 # Split on / because that's what dirstate always uses, even on Windows. |
293 # Split on / because that's what dirstate always uses, even on Windows. |
292 # Change local separator to / first just in case we are passed filenames |
294 # Change local separator to / first just in case we are passed filenames |
308 '''read hex hash from standin for filename at given node, or working |
310 '''read hex hash from standin for filename at given node, or working |
309 directory if no node is given''' |
311 directory if no node is given''' |
310 return repo[node][standin(filename)].data().strip() |
312 return repo[node][standin(filename)].data().strip() |
311 |
313 |
312 def writestandin(repo, standin, hash, executable): |
314 def writestandin(repo, standin, hash, executable): |
313 '''write hhash to <repo.root>/<standin>''' |
315 '''write hash to <repo.root>/<standin>''' |
314 writehash(hash, repo.wjoin(standin), executable) |
316 writehash(hash, repo.wjoin(standin), executable) |
315 |
317 |
316 def copyandhash(instream, outfile): |
318 def copyandhash(instream, outfile): |
317 '''Read bytes from instream (iterable) and write them to outfile, |
319 '''Read bytes from instream (iterable) and write them to outfile, |
318 computing the SHA-1 hash of the data along the way. Close outfile |
320 computing the SHA-1 hash of the data along the way. Close outfile |
321 for data in instream: |
323 for data in instream: |
322 hasher.update(data) |
324 hasher.update(data) |
323 outfile.write(data) |
325 outfile.write(data) |
324 |
326 |
325 # Blecch: closing a file that somebody else opened is rude and |
327 # Blecch: closing a file that somebody else opened is rude and |
326 # wrong. But it's so darn convenient and practical! After all, |
328 # wrong. But it's so darn convenient and practical! After all, |
327 # outfile was opened just to copy and hash. |
329 # outfile was opened just to copy and hash. |
328 outfile.close() |
330 outfile.close() |
329 |
331 |
330 return hasher.digest() |
332 return hasher.digest() |
331 |
333 |
362 while True: |
364 while True: |
363 data = infile.read(blocksize) |
365 data = infile.read(blocksize) |
364 if not data: |
366 if not data: |
365 break |
367 break |
366 yield data |
368 yield data |
367 # Same blecch as above. |
369 # same blecch as copyandhash() above |
368 infile.close() |
370 infile.close() |
369 |
371 |
370 def readhash(filename): |
372 def readhash(filename): |
371 rfile = open(filename, 'rb') |
373 rfile = open(filename, 'rb') |
372 hash = rfile.read(40) |
374 hash = rfile.read(40) |
423 return h.hexdigest() |
425 return h.hexdigest() |
424 |
426 |
425 def httpsendfile(ui, filename): |
427 def httpsendfile(ui, filename): |
426 return httpconnection.httpsendfile(ui, filename, 'rb') |
428 return httpconnection.httpsendfile(ui, filename, 'rb') |
427 |
429 |
428 # Convert a path to a unix style path. This is used to give a |
|
429 # canonical path to the lfdirstate. |
|
430 def unixpath(path): |
430 def unixpath(path): |
|
431 '''Return a version of path normalized for use with the lfdirstate.''' |
431 return os.path.normpath(path).replace(os.sep, '/') |
432 return os.path.normpath(path).replace(os.sep, '/') |
432 |
433 |
433 def islfilesrepo(repo): |
434 def islfilesrepo(repo): |
434 return ('largefiles' in repo.requirements and |
435 return ('largefiles' in repo.requirements and |
435 any_(shortname + '/' in f[0] for f in repo.store.datafiles())) |
436 any_(shortname + '/' in f[0] for f in repo.store.datafiles())) |