Mercurial > hg
comparison hgext/remotefilelog/basestore.py @ 43076:2372284d9457
formatting: blacken the codebase
This is using my patch to black
(https://github.com/psf/black/pull/826) so we don't un-wrap collection
literals.
Done with:
hg files 'set:**.py - mercurial/thirdparty/** - "contrib/python-zstandard/**"' | xargs black -S
# skip-blame mass-reformatting only
# no-check-commit reformats foo_bar functions
Differential Revision: https://phab.mercurial-scm.org/D6971
author | Augie Fackler <augie@google.com> |
---|---|
date | Sun, 06 Oct 2019 09:45:02 -0400 |
parents | 13dad5cb4b99 |
children | 687b865b95ad |
comparison
equal
deleted
inserted
replaced
43075:57875cf423c9 | 43076:2372284d9457 |
---|---|
16 ) | 16 ) |
17 from . import ( | 17 from . import ( |
18 constants, | 18 constants, |
19 shallowutil, | 19 shallowutil, |
20 ) | 20 ) |
21 | |
21 | 22 |
22 class basestore(object): | 23 class basestore(object): |
23 def __init__(self, repo, path, reponame, shared=False): | 24 def __init__(self, repo, path, reponame, shared=False): |
24 """Creates a remotefilelog store object for the given repo name. | 25 """Creates a remotefilelog store object for the given repo name. |
25 | 26 |
35 self._path = path | 36 self._path = path |
36 self._reponame = reponame | 37 self._reponame = reponame |
37 self._shared = shared | 38 self._shared = shared |
38 self._uid = os.getuid() if not pycompat.iswindows else None | 39 self._uid = os.getuid() if not pycompat.iswindows else None |
39 | 40 |
40 self._validatecachelog = self.ui.config("remotefilelog", | 41 self._validatecachelog = self.ui.config( |
41 "validatecachelog") | 42 "remotefilelog", "validatecachelog" |
42 self._validatecache = self.ui.config("remotefilelog", "validatecache", | 43 ) |
43 'on') | 44 self._validatecache = self.ui.config( |
45 "remotefilelog", "validatecache", 'on' | |
46 ) | |
44 if self._validatecache not in ('on', 'strict', 'off'): | 47 if self._validatecache not in ('on', 'strict', 'off'): |
45 self._validatecache = 'on' | 48 self._validatecache = 'on' |
46 if self._validatecache == 'off': | 49 if self._validatecache == 'off': |
47 self._validatecache = False | 50 self._validatecache = False |
48 | 51 |
52 def getmissing(self, keys): | 55 def getmissing(self, keys): |
53 missing = [] | 56 missing = [] |
54 for name, node in keys: | 57 for name, node in keys: |
55 filepath = self._getfilepath(name, node) | 58 filepath = self._getfilepath(name, node) |
56 exists = os.path.exists(filepath) | 59 exists = os.path.exists(filepath) |
57 if (exists and self._validatecache == 'strict' and | 60 if ( |
58 not self._validatekey(filepath, 'contains')): | 61 exists |
62 and self._validatecache == 'strict' | |
63 and not self._validatekey(filepath, 'contains') | |
64 ): | |
59 exists = False | 65 exists = False |
60 if not exists: | 66 if not exists: |
61 missing.append((name, node)) | 67 missing.append((name, node)) |
62 | 68 |
63 return missing | 69 return missing |
75 | 81 |
76 def cleanup(self, ledger): | 82 def cleanup(self, ledger): |
77 ui = self.ui | 83 ui = self.ui |
78 entries = ledger.sources.get(self, []) | 84 entries = ledger.sources.get(self, []) |
79 count = 0 | 85 count = 0 |
80 progress = ui.makeprogress(_("cleaning up"), unit="files", | 86 progress = ui.makeprogress( |
81 total=len(entries)) | 87 _("cleaning up"), unit="files", total=len(entries) |
88 ) | |
82 for entry in entries: | 89 for entry in entries: |
83 if entry.gced or (entry.datarepacked and entry.historyrepacked): | 90 if entry.gced or (entry.datarepacked and entry.historyrepacked): |
84 progress.update(count) | 91 progress.update(count) |
85 path = self._getfilepath(entry.filename, entry.node) | 92 path = self._getfilepath(entry.filename, entry.node) |
86 util.tryunlink(path) | 93 util.tryunlink(path) |
176 missingfilename.discard(sha) | 183 missingfilename.discard(sha) |
177 | 184 |
178 return filenames | 185 return filenames |
179 | 186 |
180 def _getrepocachepath(self): | 187 def _getrepocachepath(self): |
181 return os.path.join( | 188 return ( |
182 self._path, self._reponame) if self._shared else self._path | 189 os.path.join(self._path, self._reponame) |
190 if self._shared | |
191 else self._path | |
192 ) | |
183 | 193 |
184 def _listkeys(self): | 194 def _listkeys(self): |
185 """List all the remotefilelog keys that exist in the store. | 195 """List all the remotefilelog keys that exist in the store. |
186 | 196 |
187 Returns a iterator of (filename hash, filecontent hash) tuples. | 197 Returns a iterator of (filename hash, filecontent hash) tuples. |
217 with open(self._validatecachelog, 'a+') as f: | 227 with open(self._validatecachelog, 'a+') as f: |
218 f.write("corrupt %s during read\n" % filepath) | 228 f.write("corrupt %s during read\n" % filepath) |
219 os.rename(filepath, filepath + ".corrupt") | 229 os.rename(filepath, filepath + ".corrupt") |
220 raise KeyError("corrupt local cache file %s" % filepath) | 230 raise KeyError("corrupt local cache file %s" % filepath) |
221 except IOError: | 231 except IOError: |
222 raise KeyError("no file found at %s for %s:%s" % (filepath, name, | 232 raise KeyError( |
223 hex(node))) | 233 "no file found at %s for %s:%s" % (filepath, name, hex(node)) |
234 ) | |
224 | 235 |
225 return data | 236 return data |
226 | 237 |
227 def addremotefilelognode(self, name, node, data): | 238 def addremotefilelognode(self, name, node, data): |
228 filepath = self._getfilepath(name, node) | 239 filepath = self._getfilepath(name, node) |
242 shallowutil.mkstickygroupdir(self.ui, os.path.dirname(filepath)) | 253 shallowutil.mkstickygroupdir(self.ui, os.path.dirname(filepath)) |
243 shallowutil.writefile(filepath, data, readonly=True) | 254 shallowutil.writefile(filepath, data, readonly=True) |
244 | 255 |
245 if self._validatecache: | 256 if self._validatecache: |
246 if not self._validatekey(filepath, 'write'): | 257 if not self._validatekey(filepath, 'write'): |
247 raise error.Abort(_("local cache write was corrupted %s") % | 258 raise error.Abort( |
248 filepath) | 259 _("local cache write was corrupted %s") % filepath |
260 ) | |
249 finally: | 261 finally: |
250 os.umask(oldumask) | 262 os.umask(oldumask) |
251 | 263 |
252 def markrepo(self, path): | 264 def markrepo(self, path): |
253 """Call this to add the given repo path to the store's list of | 265 """Call this to add the given repo path to the store's list of |
286 # it is truncated | 298 # it is truncated |
287 return False | 299 return False |
288 | 300 |
289 # extract the node from the metadata | 301 # extract the node from the metadata |
290 offset += size | 302 offset += size |
291 datanode = data[offset:offset + 20] | 303 datanode = data[offset : offset + 20] |
292 | 304 |
293 # and compare against the path | 305 # and compare against the path |
294 if os.path.basename(path) == hex(datanode): | 306 if os.path.basename(path) == hex(datanode): |
295 # Content matches the intended path | 307 # Content matches the intended path |
296 return True | 308 return True |
312 removed = 0 | 324 removed = 0 |
313 | 325 |
314 # keep files newer than a day even if they aren't needed | 326 # keep files newer than a day even if they aren't needed |
315 limit = time.time() - (60 * 60 * 24) | 327 limit = time.time() - (60 * 60 * 24) |
316 | 328 |
317 progress = ui.makeprogress(_("removing unnecessary files"), | 329 progress = ui.makeprogress( |
318 unit="files") | 330 _("removing unnecessary files"), unit="files" |
331 ) | |
319 progress.update(0) | 332 progress.update(0) |
320 for root, dirs, files in os.walk(cachepath): | 333 for root, dirs, files in os.walk(cachepath): |
321 for file in files: | 334 for file in files: |
322 if file == 'repos': | 335 if file == 'repos': |
323 continue | 336 continue |
350 shallowutil.unlinkfile(path) | 363 shallowutil.unlinkfile(path) |
351 except OSError as e: | 364 except OSError as e: |
352 # errno.ENOENT = no such file or directory | 365 # errno.ENOENT = no such file or directory |
353 if e.errno != errno.ENOENT: | 366 if e.errno != errno.ENOENT: |
354 raise | 367 raise |
355 msg = _("warning: file %s was removed by another " | 368 msg = _( |
356 "process\n") | 369 "warning: file %s was removed by another " |
370 "process\n" | |
371 ) | |
357 ui.warn(msg % path) | 372 ui.warn(msg % path) |
358 continue | 373 continue |
359 removed += 1 | 374 removed += 1 |
360 progress.complete() | 375 progress.complete() |
361 | 376 |
362 # remove oldest files until under limit | 377 # remove oldest files until under limit |
363 limit = ui.configbytes("remotefilelog", "cachelimit") | 378 limit = ui.configbytes("remotefilelog", "cachelimit") |
364 if size > limit: | 379 if size > limit: |
365 excess = size - limit | 380 excess = size - limit |
366 progress = ui.makeprogress(_("enforcing cache limit"), unit="bytes", | 381 progress = ui.makeprogress( |
367 total=excess) | 382 _("enforcing cache limit"), unit="bytes", total=excess |
383 ) | |
368 removedexcess = 0 | 384 removedexcess = 0 |
369 while queue and size > limit and size > 0: | 385 while queue and size > limit and size > 0: |
370 progress.update(removedexcess) | 386 progress.update(removedexcess) |
371 atime, oldpath, oldpathstat = queue.get() | 387 atime, oldpath, oldpathstat = queue.get() |
372 try: | 388 try: |
380 size -= oldpathstat.st_size | 396 size -= oldpathstat.st_size |
381 removed += 1 | 397 removed += 1 |
382 removedexcess += oldpathstat.st_size | 398 removedexcess += oldpathstat.st_size |
383 progress.complete() | 399 progress.complete() |
384 | 400 |
385 ui.status(_("finished: removed %d of %d files (%0.2f GB to %0.2f GB)\n") | 401 ui.status( |
386 % (removed, count, | 402 _("finished: removed %d of %d files (%0.2f GB to %0.2f GB)\n") |
387 float(originalsize) / 1024.0 / 1024.0 / 1024.0, | 403 % ( |
388 float(size) / 1024.0 / 1024.0 / 1024.0)) | 404 removed, |
405 count, | |
406 float(originalsize) / 1024.0 / 1024.0 / 1024.0, | |
407 float(size) / 1024.0 / 1024.0 / 1024.0, | |
408 ) | |
409 ) | |
410 | |
389 | 411 |
390 class baseunionstore(object): | 412 class baseunionstore(object): |
391 def __init__(self, *args, **kwargs): | 413 def __init__(self, *args, **kwargs): |
392 # If one of the functions that iterates all of the stores is about to | 414 # If one of the functions that iterates all of the stores is about to |
393 # throw a KeyError, try this many times with a full refresh between | 415 # throw a KeyError, try this many times with a full refresh between |
405 | 427 |
406 @staticmethod | 428 @staticmethod |
407 def retriable(fn): | 429 def retriable(fn): |
408 def noop(*args): | 430 def noop(*args): |
409 pass | 431 pass |
432 | |
410 def wrapped(self, *args, **kwargs): | 433 def wrapped(self, *args, **kwargs): |
411 retrylog = self.retrylog or noop | 434 retrylog = self.retrylog or noop |
412 funcname = fn.__name__ | 435 funcname = fn.__name__ |
413 i = 0 | 436 i = 0 |
414 while i < self.numattempts: | 437 while i < self.numattempts: |
419 try: | 442 try: |
420 return fn(self, *args, **kwargs) | 443 return fn(self, *args, **kwargs) |
421 except KeyError: | 444 except KeyError: |
422 if i == self.numattempts: | 445 if i == self.numattempts: |
423 # retries exhausted | 446 # retries exhausted |
424 retrylog('retries exhausted in %s, raising KeyError\n' % | 447 retrylog( |
425 pycompat.sysbytes(funcname)) | 448 'retries exhausted in %s, raising KeyError\n' |
449 % pycompat.sysbytes(funcname) | |
450 ) | |
426 raise | 451 raise |
452 | |
427 return wrapped | 453 return wrapped |