changeset 5009:30570c2f576f

Merge with mpm
author Brendan Cully <brendan@kublai.com>
date Thu, 26 Jul 2007 10:43:01 -0700
parents b6c3abdbe0eb (current diff) 3addf4531643 (diff)
children 6b2d8caf87b2 89fbb0a5e8e3
files
diffstat 3 files changed, 134 insertions(+), 97 deletions(-) [+]
line wrap: on
line diff
--- a/mercurial/dirstate.py	Thu Jul 26 10:42:44 2007 -0700
+++ b/mercurial/dirstate.py	Thu Jul 26 10:43:01 2007 -0700
@@ -306,16 +306,16 @@
                 bs += 1
         return ret
 
-    def _supported(self, f, st, verbose=False):
-        if stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode):
+    def _supported(self, f, mode, verbose=False):
+        if stat.S_ISREG(mode) or stat.S_ISLNK(mode):
             return True
         if verbose:
             kind = 'unknown'
-            if stat.S_ISCHR(st.st_mode): kind = _('character device')
-            elif stat.S_ISBLK(st.st_mode): kind = _('block device')
-            elif stat.S_ISFIFO(st.st_mode): kind = _('fifo')
-            elif stat.S_ISSOCK(st.st_mode): kind = _('socket')
-            elif stat.S_ISDIR(st.st_mode): kind = _('directory')
+            if stat.S_ISCHR(mode): kind = _('character device')
+            elif stat.S_ISBLK(mode): kind = _('block device')
+            elif stat.S_ISFIFO(mode): kind = _('fifo')
+            elif stat.S_ISSOCK(mode): kind = _('socket')
+            elif stat.S_ISDIR(mode): kind = _('directory')
             self._ui.warn(_('%s: unsupported file type (type is %s)\n')
                           % (self.pathto(f), kind))
         return False
@@ -363,59 +363,73 @@
         common_prefix_len = len(self._root)
         if not self._root.endswith(os.sep):
             common_prefix_len += 1
+
+        normpath = util.normpath
+        listdir = os.listdir
+        lstat = os.lstat
+        bisect_left = bisect.bisect_left
+        isdir = os.path.isdir
+        pconvert = util.pconvert
+        join = os.path.join
+        s_isdir = stat.S_ISDIR
+        supported = self._supported
+        _join = self._join
+        known = {'.hg': 1}
+
         # recursion free walker, faster than os.walk.
         def findfiles(s):
             work = [s]
+            wadd = work.append
+            found = []
+            add = found.append
             if directories:
-                yield 'd', util.normpath(s[common_prefix_len:]), os.lstat(s)
+                add((normpath(s[common_prefix_len:]), 'd', lstat(s)))
             while work:
                 top = work.pop()
-                names = os.listdir(top)
+                names = listdir(top)
                 names.sort()
                 # nd is the top of the repository dir tree
-                nd = util.normpath(top[common_prefix_len:])
+                nd = normpath(top[common_prefix_len:])
                 if nd == '.':
                     nd = ''
                 else:
                     # do not recurse into a repo contained in this
                     # one. use bisect to find .hg directory so speed
                     # is good on big directory.
-                    hg = bisect.bisect_left(names, '.hg')
+                    hg = bisect_left(names, '.hg')
                     if hg < len(names) and names[hg] == '.hg':
-                        if os.path.isdir(os.path.join(top, '.hg')):
+                        if isdir(join(top, '.hg')):
                             continue
                 for f in names:
-                    np = util.pconvert(os.path.join(nd, f))
-                    if seen(np):
+                    np = pconvert(join(nd, f))
+                    if np in known:
                         continue
-                    p = os.path.join(top, f)
+                    known[np] = 1
+                    p = join(top, f)
                     # don't trip over symlinks
-                    st = os.lstat(p)
-                    if stat.S_ISDIR(st.st_mode):
+                    st = lstat(p)
+                    if s_isdir(st.st_mode):
                         if not ignore(np):
-                            work.append(p)
+                            wadd(p)
                             if directories:
-                                yield 'd', np, st
-                        if imatch(np) and np in dc:
-                            yield 'm', np, st
+                                add((np, 'd', st))
+                        if np in dc and match(np):
+                            add((np, 'm', st))
                     elif imatch(np):
-                        if self._supported(np, st):
-                            yield 'f', np, st
+                        if supported(np, st.st_mode):
+                            add((np, 'f', st))
                         elif np in dc:
-                            yield 'm', np, st
-
-        known = {'.hg': 1}
-        def seen(fn):
-            if fn in known: return True
-            known[fn] = 1
+                            add((np, 'm', st))
+            found.sort()
+            return found
 
         # step one, find all files that match our criteria
         files.sort()
         for ff in files:
-            nf = util.normpath(ff)
-            f = self._join(ff)
+            nf = normpath(ff)
+            f = _join(ff)
             try:
-                st = os.lstat(f)
+                st = lstat(f)
             except OSError, inst:
                 found = False
                 for fn in dc:
@@ -429,15 +443,15 @@
                     elif badmatch and badmatch(ff) and imatch(nf):
                         yield 'b', ff, None
                 continue
-            if stat.S_ISDIR(st.st_mode):
-                cmp1 = (lambda x, y: cmp(x[1], y[1]))
-                sorted_ = [ x for x in findfiles(f) ]
-                sorted_.sort(cmp1)
-                for e in sorted_:
-                    yield e
+            if s_isdir(st.st_mode):
+                for f, src, st in findfiles(f):
+                    yield src, f, st
             else:
-                if not seen(nf) and match(nf):
-                    if self._supported(ff, st, verbose=True):
+                if nf in known:
+                    continue
+                known[nf] = 1
+                if match(nf):
+                    if supported(ff, st.st_mode, verbose=True):
                         yield 'f', nf, st
                     elif ff in dc:
                         yield 'm', nf, st
@@ -447,57 +461,73 @@
         ks = dc.keys()
         ks.sort()
         for k in ks:
-            if not seen(k) and imatch(k):
+            if k in known:
+                continue
+            known[k] = 1
+            if imatch(k):
                 yield 'm', k, None
 
     def status(self, files, match, list_ignored, list_clean):
         lookup, modified, added, unknown, ignored = [], [], [], [], []
         removed, deleted, clean = [], [], []
 
+        _join = self._join
+        lstat = os.lstat
+        cmap = self._copymap
+        dmap = self._map
+        ladd = lookup.append
+        madd = modified.append
+        aadd = added.append
+        uadd = unknown.append
+        iadd = ignored.append
+        radd = removed.append
+        dadd = deleted.append
+        cadd = clean.append
+
         for src, fn, st in self.statwalk(files, match, ignored=list_ignored):
-            try:
-                type_, mode, size, time = self._map[fn]
-            except KeyError:
+            if fn in dmap:
+                type_, mode, size, time = dmap[fn]
+            else:
                 if list_ignored and self._ignore(fn):
-                    ignored.append(fn)
+                    iadd(fn)
                 else:
-                    unknown.append(fn)
+                    uadd(fn)
                 continue
             if src == 'm':
                 nonexistent = True
                 if not st:
                     try:
-                        st = os.lstat(self._join(fn))
+                        st = lstat(_join(fn))
                     except OSError, inst:
                         if inst.errno != errno.ENOENT:
                             raise
                         st = None
                     # We need to re-check that it is a valid file
-                    if st and self._supported(fn, st):
+                    if st and self._supported(fn, st.st_mode):
                         nonexistent = False
                 # XXX: what to do with file no longer present in the fs
                 # who are not removed in the dirstate ?
                 if nonexistent and type_ in "nm":
-                    deleted.append(fn)
+                    dadd(fn)
                     continue
             # check the common case first
             if type_ == 'n':
                 if not st:
-                    st = os.lstat(self._join(fn))
+                    st = lstat(_join(fn))
                 if (size >= 0 and (size != st.st_size
                                    or (mode ^ st.st_mode) & 0100)
                     or fn in self._copymap):
-                    modified.append(fn)
+                    madd(fn)
                 elif time != int(st.st_mtime):
-                    lookup.append(fn)
+                    ladd(fn)
                 elif list_clean:
-                    clean.append(fn)
+                    cadd(fn)
             elif type_ == 'm':
-                modified.append(fn)
+                madd(fn)
             elif type_ == 'a':
-                added.append(fn)
+                aadd(fn)
             elif type_ == 'r':
-                removed.append(fn)
+                radd(fn)
 
         return (lookup, modified, added, removed, deleted, unknown, ignored,
                 clean)
--- a/mercurial/merge.py	Thu Jul 26 10:42:44 2007 -0700
+++ b/mercurial/merge.py	Thu Jul 26 10:43:01 2007 -0700
@@ -452,7 +452,7 @@
                 repo.dirstate.forget(f)
         elif m == "f": # forget
             repo.dirstate.forget(f)
-        elif m == "g": # get
+        elif m in "ge": # get or exec change
             if branchmerge:
                 repo.dirstate.normaldirty(f)
             else:
--- a/mercurial/revlog.py	Thu Jul 26 10:42:44 2007 -0700
+++ b/mercurial/revlog.py	Thu Jul 26 10:43:01 2007 -0700
@@ -15,6 +15,12 @@
 import binascii, changegroup, errno, ancestor, mdiff, os
 import sha, struct, util, zlib
 
+_pack = struct.pack
+_unpack = struct.unpack
+_compress = zlib.compress
+_decompress = zlib.decompress
+_sha = sha.new
+
 # revlog flags
 REVLOGV0 = 0
 REVLOGNG = 1
@@ -29,8 +35,6 @@
     pass
 
 def getoffset(q):
-    if q & 0xFFFF:
-        raise RevlogError(_('incompatible revision flag %x') % q)
     return int(q >> 16)
 
 def gettype(q):
@@ -48,7 +52,7 @@
     """
     l = [p1, p2]
     l.sort()
-    s = sha.new(l[0])
+    s = _sha(l[0])
     s.update(l[1])
     s.update(text)
     return s.digest()
@@ -61,7 +65,7 @@
         if text[0] == '\0':
             return ("", text)
         return ('u', text)
-    bin = zlib.compress(text)
+    bin = _compress(text)
     if len(bin) > len(text):
         if text[0] == '\0':
             return ("", text)
@@ -76,7 +80,7 @@
     if t == '\0':
         return bin
     if t == 'x':
-        return zlib.decompress(bin)
+        return _decompress(bin)
     if t == 'u':
         return bin[1:]
     raise RevlogError(_("unknown compression type %r") % t)
@@ -236,16 +240,15 @@
         self.p.loadindex(pos)
         return self.p.index[pos]
     def __getitem__(self, pos):
-        return struct.unpack(indexformatng,
-                             self.p.index[pos] or self.load(pos))
+        return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
     def __setitem__(self, pos, item):
-        self.p.index[pos] = struct.pack(indexformatng, *item)
+        self.p.index[pos] = _pack(indexformatng, *item)
     def __delitem__(self, pos):
         del self.p.index[pos]
     def insert(self, pos, e):
-        self.p.index.insert(pos, struct.pack(indexformatng, *e))
+        self.p.index.insert(pos, _pack(indexformatng, *e))
     def append(self, e):
-        self.p.index.append(struct.pack(indexformatng, *e))
+        self.p.index.append(_pack(indexformatng, *e))
 
 class lazymap(object):
     """a lazy version of the node map"""
@@ -268,7 +271,7 @@
                 self.p.loadindex(i)
                 ret = self.p.index[i]
             if isinstance(ret, str):
-                ret = struct.unpack(indexformatng, ret)
+                ret = _unpack(indexformatng, ret)
             yield ret[7]
     def __getitem__(self, key):
         try:
@@ -301,7 +304,7 @@
         while off + s <= l:
             cur = data[off:off + s]
             off += s
-            e = struct.unpack(indexformatv0, cur)
+            e = _unpack(indexformatv0, cur)
             # transform to revlogv1 format
             e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
                   nodemap[e[4]], nodemap[e[5]], e[6])
@@ -314,7 +317,7 @@
     def packentry(self, entry, node, version):
         e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
               node(entry[5]), node(entry[6]), entry[7])
-        return struct.pack(indexformatv0, *e2)
+        return _pack(indexformatv0, *e2)
 
 # index ng:
 # 6 bytes offset
@@ -359,12 +362,11 @@
         # if we're not using lazymap, always read the whole index
         data = fp.read()
         l = len(data) - s
-        unpack = struct.unpack
         append = index.append
         if inline:
             cache = (0, data)
             while off <= l:
-                e = unpack(indexformatng, data[off:off + s])
+                e = _unpack(indexformatng, data[off:off + s])
                 nodemap[e[7]] = n
                 append(e)
                 n += 1
@@ -373,7 +375,7 @@
                 off += e[1] + s
         else:
             while off <= l:
-                e = unpack(indexformatng, data[off:off + s])
+                e = _unpack(indexformatng, data[off:off + s])
                 nodemap[e[7]] = n
                 append(e)
                 n += 1
@@ -387,9 +389,9 @@
         return index, nodemap, cache
 
     def packentry(self, entry, node, version):
-        p = struct.pack(indexformatng, *entry)
+        p = _pack(indexformatng, *entry)
         if not entry[3] and not getoffset(entry[0]) and entry[5] == nullrev:
-            p = struct.pack(versionformat, version) + p[4:]
+            p = _pack(versionformat, version) + p[4:]
         return p
 
 class revlog(object):
@@ -511,7 +513,7 @@
     def parentrevs(self, rev):
         return self.index[rev][5:7]
     def start(self, rev):
-        return getoffset(self.index[rev][0])
+        return int(self.index[rev][0] >> 16)
     def end(self, rev):
         return self.start(rev) + self.length(rev)
     def length(self, rev):
@@ -847,12 +849,7 @@
         return hash(text, p1, p2) != node
 
     def chunk(self, rev, df=None):
-        start, length = self.start(rev), self.length(rev)
-        if self._inline:
-            start += (rev + 1) * self._io.size
-        end = start + length
         def loadcache(df):
-            cache_length = max(65536, length)
             if not df:
                 if self._inline:
                     df = self.opener(self.indexfile)
@@ -861,21 +858,29 @@
             df.seek(start)
             self._chunkcache = (start, df.read(cache_length))
 
-        if not self._chunkcache:
-            loadcache(df)
+        start, length = self.start(rev), self.length(rev)
+        if self._inline:
+            start += (rev + 1) * self._io.size
+        end = start + length
 
-        cache_start = self._chunkcache[0]
-        cache_end = cache_start + len(self._chunkcache[1])
-        if start >= cache_start and end <= cache_end:
-            # it is cached
-            offset = start - cache_start
+        offset = 0
+        if not self._chunkcache:
+            cache_length = max(65536, length)
+            loadcache(df)
         else:
-            loadcache(df)
-            offset = 0
+            cache_start = self._chunkcache[0]
+            cache_length = len(self._chunkcache[1])
+            cache_end = cache_start + cache_length
+            if start >= cache_start and end <= cache_end:
+                # it is cached
+                offset = start - cache_start
+            else:
+                cache_length = max(65536, length)
+                loadcache(df)
 
         # avoid copying large chunks
         c = self._chunkcache[1]
-        if len(c) > length:
+        if cache_length != length:
             c = c[offset:offset + length]
 
         return decompress(c)
@@ -887,13 +892,11 @@
 
     def revdiff(self, rev1, rev2):
         """return or calculate a delta between two revisions"""
-        b1 = self.base(rev1)
-        b2 = self.base(rev2)
-        if b1 == b2 and rev1 + 1 == rev2:
+        if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
             return self.chunk(rev2)
-        else:
-            return mdiff.textdiff(self.revision(self.node(rev1)),
-                                  self.revision(self.node(rev2)))
+
+        return mdiff.textdiff(self.revision(self.node(rev1)),
+                              self.revision(self.node(rev2)))
 
     def revision(self, node):
         """return an uncompressed revision of a given"""
@@ -907,6 +910,10 @@
         rev = self.rev(node)
         base = self.base(rev)
 
+        # check rev flags
+        if self.index[rev][0] & 0xFFFF:
+            raise RevlogError(_('incompatible revision flag %x') % q)
+
         if self._inline:
             # we probably have the whole chunk cached
             df = None