Mercurial > hg
view mercurial/config.py @ 24735:07200e3332a1
tags: extract .hgtags filenodes cache to a standalone file
Resolution of .hgtags filenodes values has historically been a
performance pain point for large repositories, where reading individual
manifests can take over 100ms. Multiplied by hundreds or even thousands
of heads and resolving .hgtags filenodes becomes a performance issue.
This patch establishes a standalone cache file holding the .hgtags
filenodes for each changeset. After this patch, the .hgtags filenode
for any particular changeset should only have to be computed once
during the lifetime of the repository.
The introduced hgtagsfnodes1 cache file is modeled after the rev branch
cache: the cache is effectively an array of entries consisting of a
changeset fragment and the filenode for a revision. The file grows in
proportion to the length of the repository (24 bytes per changeset) and
is truncated when the repository is stripped. The file is not written
unless tag info is requested and tags have changed since last time.
This patch partially addresses issue4550. Future patches will split the
"tags" cache file into per-filter files and will refactor the cache
format to not capture the .hgtags fnodes, as these are now stored in
the hgtagsfnodes1 cache. This patch is capable of standing alone. We
should not have to wait on the tags cache filter split and format
refactor for this patch to land.
author | Gregory Szorc <gregory.szorc@gmail.com> |
---|---|
date | Wed, 15 Apr 2015 17:42:38 -0400 |
parents | fdfc9faca273 |
children | 3182965b3971 |
line wrap: on
line source
# config.py - configuration parsing for Mercurial # # Copyright 2009 Matt Mackall <mpm@selenic.com> and others # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. from i18n import _ import error, util import os, errno class config(object): def __init__(self, data=None): self._data = {} self._source = {} self._unset = [] if data: for k in data._data: self._data[k] = data[k].copy() self._source = data._source.copy() def copy(self): return config(self) def __contains__(self, section): return section in self._data def __getitem__(self, section): return self._data.get(section, {}) def __iter__(self): for d in self.sections(): yield d def update(self, src): for s, n in src._unset: if s in self and n in self._data[s]: del self._data[s][n] del self._source[(s, n)] for s in src: if s not in self: self._data[s] = util.sortdict() self._data[s].update(src._data[s]) self._source.update(src._source) def get(self, section, item, default=None): return self._data.get(section, {}).get(item, default) def backup(self, section, item): """return a tuple allowing restore to reinstall a previous value The main reason we need it is because it handles the "no data" case. """ try: value = self._data[section][item] source = self.source(section, item) return (section, item, value, source) except KeyError: return (section, item) def source(self, section, item): return self._source.get((section, item), "") def sections(self): return sorted(self._data.keys()) def items(self, section): return self._data.get(section, {}).items() def set(self, section, item, value, source=""): if section not in self: self._data[section] = util.sortdict() self._data[section][item] = value if source: self._source[(section, item)] = source def restore(self, data): """restore data returned by self.backup""" if len(data) == 4: # restore old data section, item, value, source = data self._data[section][item] = value self._source[(section, item)] = source else: # no data before, remove everything section, item = data if section in self._data: self._data[section].pop(item, None) self._source.pop((section, item), None) def parse(self, src, data, sections=None, remap=None, include=None): sectionre = util.re.compile(r'\[([^\[]+)\]') itemre = util.re.compile(r'([^=\s][^=]*?)\s*=\s*(.*\S|)') contre = util.re.compile(r'\s+(\S|\S.*\S)\s*$') emptyre = util.re.compile(r'(;|#|\s*$)') commentre = util.re.compile(r'(;|#)') unsetre = util.re.compile(r'%unset\s+(\S+)') includere = util.re.compile(r'%include\s+(\S|\S.*\S)\s*$') section = "" item = None line = 0 cont = False for l in data.splitlines(True): line += 1 if line == 1 and l.startswith('\xef\xbb\xbf'): # Someone set us up the BOM l = l[3:] if cont: if commentre.match(l): continue m = contre.match(l) if m: if sections and section not in sections: continue v = self.get(section, item) + "\n" + m.group(1) self.set(section, item, v, "%s:%d" % (src, line)) continue item = None cont = False m = includere.match(l) if m: inc = util.expandpath(m.group(1)) base = os.path.dirname(src) inc = os.path.normpath(os.path.join(base, inc)) if include: try: include(inc, remap=remap, sections=sections) except IOError, inst: if inst.errno != errno.ENOENT: raise error.ParseError(_("cannot include %s (%s)") % (inc, inst.strerror), "%s:%s" % (src, line)) continue if emptyre.match(l): continue m = sectionre.match(l) if m: section = m.group(1) if remap: section = remap.get(section, section) if section not in self: self._data[section] = util.sortdict() continue m = itemre.match(l) if m: item = m.group(1) cont = True if sections and section not in sections: continue self.set(section, item, m.group(2), "%s:%d" % (src, line)) continue m = unsetre.match(l) if m: name = m.group(1) if sections and section not in sections: continue if self.get(section, name) is not None: del self._data[section][name] self._unset.append((section, name)) continue raise error.ParseError(l.rstrip(), ("%s:%s" % (src, line))) def read(self, path, fp=None, sections=None, remap=None): if not fp: fp = util.posixfile(path) self.parse(path, fp.read(), sections, remap, self.read)