equal
deleted
inserted
replaced
12 |
12 |
13 from __future__ import absolute_import |
13 from __future__ import absolute_import |
14 |
14 |
15 import array |
15 import array |
16 import errno |
16 import errno |
17 import time |
|
18 |
17 |
19 from .node import ( |
18 from .node import ( |
20 bin, |
19 bin, |
21 hex, |
20 hex, |
22 nullid, |
21 nullid, |
342 if not len(repo.file('.hgtags')): |
341 if not len(repo.file('.hgtags')): |
343 # No tags have ever been committed, so we can avoid a |
342 # No tags have ever been committed, so we can avoid a |
344 # potentially expensive search. |
343 # potentially expensive search. |
345 return ([], {}, valid, None, True) |
344 return ([], {}, valid, None, True) |
346 |
345 |
347 starttime = time.time() |
346 starttime = util.timer() |
348 |
347 |
349 # Now we have to lookup the .hgtags filenode for every new head. |
348 # Now we have to lookup the .hgtags filenode for every new head. |
350 # This is the most expensive part of finding tags, so performance |
349 # This is the most expensive part of finding tags, so performance |
351 # depends primarily on the size of newheads. Worst case: no cache |
350 # depends primarily on the size of newheads. Worst case: no cache |
352 # file, so newheads == repoheads. |
351 # file, so newheads == repoheads. |
357 if fnode != nullid: |
356 if fnode != nullid: |
358 cachefnode[head] = fnode |
357 cachefnode[head] = fnode |
359 |
358 |
360 fnodescache.write() |
359 fnodescache.write() |
361 |
360 |
362 duration = time.time() - starttime |
361 duration = util.timer() - starttime |
363 ui.log('tagscache', |
362 ui.log('tagscache', |
364 '%d/%d cache hits/lookups in %0.4f ' |
363 '%d/%d cache hits/lookups in %0.4f ' |
365 'seconds\n', |
364 'seconds\n', |
366 fnodescache.hitcount, fnodescache.lookupcount, duration) |
365 fnodescache.hitcount, fnodescache.lookupcount, duration) |
367 |
366 |