Mercurial > hg
view mercurial/parser.py @ 16521:592701c8eac6 stable
revset: fix adds/modifies/removes and patterns (issue3403)
The fast path was triggered if the argument was not like "type:value", with
type a known pattern type. This is wrong for several reasons:
- path:value is valid for the fast path
- '*' is interpreted as a glob by default and is not valid for fast path
Fast path detection is now done after the pattern is parsed, and the normalized
path is extracted for direct comparison. All this seems a bit complicated, it
is tempting to drop the fast path completely. Also, the hasfile() revset does
something similar (only check .files()), without a fast path. If the fast path
is really that efficient maybe it should be used there too.
Note that:
$ log 'modifies("set:modified()")'
is different from:
$ log 'modifies("*")'
because of the usual merge ctx.files()/status(ctx.p1(), ctx) differences.
Reported by Steffen Eichenberg <steffen.eichenberg@msg-gillardon.de>
author | Patrick Mezard <patrick@mezard.eu> |
---|---|
date | Thu, 26 Apr 2012 14:24:46 +0200 |
parents | 4b93bd041772 |
children | 8ac8db8dc346 |
line wrap: on
line source
# parser.py - simple top-down operator precedence parser for mercurial # # Copyright 2010 Matt Mackall <mpm@selenic.com> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. # see http://effbot.org/zone/simple-top-down-parsing.htm and # http://eli.thegreenplace.net/2010/01/02/top-down-operator-precedence-parsing/ # for background # takes a tokenizer and elements # tokenizer is an iterator that returns type, value pairs # elements is a mapping of types to binding strength, prefix and infix actions # an action is a tree node name, a tree label, and an optional match # __call__(program) parses program into a labelled tree import error from i18n import _ class parser(object): def __init__(self, tokenizer, elements, methods=None): self._tokenizer = tokenizer self._elements = elements self._methods = methods self.current = None def _advance(self): 'advance the tokenizer' t = self.current try: self.current = self._iter.next() except StopIteration: pass return t def _match(self, m, pos): 'make sure the tokenizer matches an end condition' if self.current[0] != m: raise error.ParseError(_("unexpected token: %s") % self.current[0], self.current[2]) self._advance() def _parse(self, bind=0): token, value, pos = self._advance() # handle prefix rules on current token prefix = self._elements[token][1] if not prefix: raise error.ParseError(_("not a prefix: %s") % token, pos) if len(prefix) == 1: expr = (prefix[0], value) else: if len(prefix) > 2 and prefix[2] == self.current[0]: self._match(prefix[2], pos) expr = (prefix[0], None) else: expr = (prefix[0], self._parse(prefix[1])) if len(prefix) > 2: self._match(prefix[2], pos) # gather tokens until we meet a lower binding strength while bind < self._elements[self.current[0]][0]: token, value, pos = self._advance() e = self._elements[token] # check for suffix - next token isn't a valid prefix if len(e) == 4 and not self._elements[self.current[0]][1]: suffix = e[3] expr = (suffix[0], expr) else: # handle infix rules if len(e) < 3 or not e[2]: raise error.ParseError(_("not an infix: %s") % token, pos) infix = e[2] if len(infix) == 3 and infix[2] == self.current[0]: self._match(infix[2], pos) expr = (infix[0], expr, (None)) else: expr = (infix[0], expr, self._parse(infix[1])) if len(infix) == 3: self._match(infix[2], pos) return expr def parse(self, message): 'generate a parse tree from a message' self._iter = self._tokenizer(message) self._advance() res = self._parse() token, value, pos = self.current return res, pos def eval(self, tree): 'recursively evaluate a parse tree using node methods' if not isinstance(tree, tuple): return tree return self._methods[tree[0]](*[self.eval(t) for t in tree[1:]]) def __call__(self, message): 'parse a message into a parse tree and evaluate if methods given' t = self.parse(message) if self._methods: return self.eval(t) return t