parser: fill invalid infix and suffix actions by None
This can simplify the expansion of (prefix, infix, suffix) actions.
--- a/mercurial/fileset.py Sun Jul 05 11:06:58 2015 +0900
+++ b/mercurial/fileset.py Sun Jul 05 11:17:22 2015 +0900
@@ -11,20 +11,20 @@
elements = {
# token-type: binding-strength, prefix, infix, suffix
- "(": (20, ("group", 1, ")"), ("func", 1, ")")),
- "-": (5, ("negate", 19), ("minus", 5)),
- "not": (10, ("not", 10)),
- "!": (10, ("not", 10)),
- "and": (5, None, ("and", 5)),
- "&": (5, None, ("and", 5)),
- "or": (4, None, ("or", 4)),
- "|": (4, None, ("or", 4)),
- "+": (4, None, ("or", 4)),
- ",": (2, None, ("list", 2)),
- ")": (0, None, None),
- "symbol": (0, ("symbol",), None),
- "string": (0, ("string",), None),
- "end": (0, None, None),
+ "(": (20, ("group", 1, ")"), ("func", 1, ")"), None),
+ "-": (5, ("negate", 19), ("minus", 5), None),
+ "not": (10, ("not", 10), None, None),
+ "!": (10, ("not", 10), None, None),
+ "and": (5, None, ("and", 5), None),
+ "&": (5, None, ("and", 5), None),
+ "or": (4, None, ("or", 4), None),
+ "|": (4, None, ("or", 4), None),
+ "+": (4, None, ("or", 4), None),
+ ",": (2, None, ("list", 2), None),
+ ")": (0, None, None, None),
+ "symbol": (0, ("symbol",), None, None),
+ "string": (0, ("string",), None, None),
+ "end": (0, None, None, None),
}
keywords = set(['and', 'or', 'not'])
--- a/mercurial/parser.py Sun Jul 05 11:06:58 2015 +0900
+++ b/mercurial/parser.py Sun Jul 05 11:17:22 2015 +0900
@@ -12,7 +12,7 @@
# takes a tokenizer and elements
# tokenizer is an iterator that returns (type, value, pos) tuples
# elements is a mapping of types to binding strength, prefix, infix and
-# optional suffix actions
+# suffix actions
# an action is a tree node name, a tree label, and an optional match
# __call__(program) parses program into a labeled tree
@@ -54,16 +54,14 @@
# gather tokens until we meet a lower binding strength
while bind < self._elements[self.current[0]][0]:
token, value, pos = self._advance()
- e = self._elements[token]
+ infix, suffix = self._elements[token][2:]
# check for suffix - next token isn't a valid prefix
- if len(e) == 4 and not self._elements[self.current[0]][1]:
- suffix = e[3]
+ if suffix and not self._elements[self.current[0]][1]:
expr = (suffix[0], expr)
else:
# handle infix rules
- if len(e) < 3 or not e[2]:
+ if not infix:
raise error.ParseError(_("not an infix: %s") % token, pos)
- infix = e[2]
if len(infix) == 3 and infix[2] == self.current[0]:
self._match(infix[2], pos)
expr = (infix[0], expr, (None))
--- a/mercurial/revset.py Sun Jul 05 11:06:58 2015 +0900
+++ b/mercurial/revset.py Sun Jul 05 11:17:22 2015 +0900
@@ -116,30 +116,30 @@
elements = {
# token-type: binding-strength, prefix, infix, suffix
- "(": (21, ("group", 1, ")"), ("func", 1, ")")),
- "##": (20, None, ("_concat", 20)),
- "~": (18, None, ("ancestor", 18)),
+ "(": (21, ("group", 1, ")"), ("func", 1, ")"), None),
+ "##": (20, None, ("_concat", 20), None),
+ "~": (18, None, ("ancestor", 18), None),
"^": (18, None, ("parent", 18), ("parentpost", 18)),
- "-": (5, ("negate", 19), ("minus", 5)),
+ "-": (5, ("negate", 19), ("minus", 5), None),
"::": (17, ("dagrangepre", 17), ("dagrange", 17),
("dagrangepost", 17)),
"..": (17, ("dagrangepre", 17), ("dagrange", 17),
("dagrangepost", 17)),
":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
- "not": (10, ("not", 10)),
- "!": (10, ("not", 10)),
- "and": (5, None, ("and", 5)),
- "&": (5, None, ("and", 5)),
+ "not": (10, ("not", 10), None, None),
+ "!": (10, ("not", 10), None, None),
+ "and": (5, None, ("and", 5), None),
+ "&": (5, None, ("and", 5), None),
"%": (5, None, ("only", 5), ("onlypost", 5)),
- "or": (4, None, ("or", 4)),
- "|": (4, None, ("or", 4)),
- "+": (4, None, ("or", 4)),
- "=": (3, None, ("keyvalue", 3)),
- ",": (2, None, ("list", 2)),
- ")": (0, None, None),
- "symbol": (0, ("symbol",), None),
- "string": (0, ("string",), None),
- "end": (0, None, None),
+ "or": (4, None, ("or", 4), None),
+ "|": (4, None, ("or", 4), None),
+ "+": (4, None, ("or", 4), None),
+ "=": (3, None, ("keyvalue", 3), None),
+ ",": (2, None, ("list", 2), None),
+ ")": (0, None, None, None),
+ "symbol": (0, ("symbol",), None, None),
+ "string": (0, ("string",), None, None),
+ "end": (0, None, None, None),
}
keywords = set(['and', 'or', 'not'])
--- a/mercurial/templater.py Sun Jul 05 11:06:58 2015 +0900
+++ b/mercurial/templater.py Sun Jul 05 11:17:22 2015 +0900
@@ -16,16 +16,16 @@
elements = {
# token-type: binding-strength, prefix, infix, suffix
- "(": (20, ("group", 1, ")"), ("func", 1, ")")),
- ",": (2, None, ("list", 2)),
- "|": (5, None, ("|", 5)),
- "%": (6, None, ("%", 6)),
- ")": (0, None, None),
- "integer": (0, ("integer",), None),
- "symbol": (0, ("symbol",), None),
- "string": (0, ("string",), None),
- "template": (0, ("template",), None),
- "end": (0, None, None),
+ "(": (20, ("group", 1, ")"), ("func", 1, ")"), None),
+ ",": (2, None, ("list", 2), None),
+ "|": (5, None, ("|", 5), None),
+ "%": (6, None, ("%", 6), None),
+ ")": (0, None, None, None),
+ "integer": (0, ("integer",), None, None),
+ "symbol": (0, ("symbol",), None, None),
+ "string": (0, ("string",), None, None),
+ "template": (0, ("template",), None, None),
+ "end": (0, None, None, None),
}
def tokenize(program, start, end):