view tests/test-rust-ancestor.py @ 43256:00de32aa834e

copies: use an unfiltered repository for the changeset centric algorithm Since the algorithm work form heads to ancestors, we don't need to check filtering for anything but the two entries. Using an unfiltered version is noticeably more efficient. Some timing on the pypy repo: revision: large amount; added files: large amount; rename small amount; c3b14617fbd7 9ba6ab77fd29 before: ! wall 2.717861 comb 2.720000 user 2.700000 sys 0.020000 (median of 10) after: ! wall 2.582204 comb 2.580000 user 2.560000 sys 0.020000 (median of 10) revision: large amount; added files: small amount; rename small amount; c3b14617fbd7 f650a9b140d2 before: ! wall 4.003146 comb 4.010000 user 3.970000 sys 0.040000 (median of 10) after: ! wall 3.814613 comb 3.810000 user 3.760000 sys 0.050000 (median of 10) revision: large amount; added files: large amount; rename large amount; 08ea3258278e d9fa043f30c0 before: ! wall 0.704204 comb 0.700000 user 0.700000 sys 0.000000 (median of 13) after: ! wall 0.657387 comb 0.650000 user 0.640000 sys 0.010000 (best of 14) revision: small amount; added files: large amount; rename large amount; df6f7a526b60 a83dc6a2d56f before: ! wall 0.013493 comb 0.020000 user 0.020000 sys 0.000000 (median of 219) after: ! wall 0.013523 comb 0.020000 user 0.020000 sys 0.000000 (median of 218) revision: small amount; added files: large amount; rename small amount; 4aa4e1f8e19a 169138063d63 before: ! wall 0.003017 comb 0.000000 user 0.000000 sys 0.000000 (median of 985) after: ! wall 0.002876 comb 0.000000 user 0.000000 sys 0.000000 (median of 1000) revision: small amount; added files: small amount; rename small amount; 4bc173b045a6 964879152e2e before: ! wall 0.000073 comb 0.000000 user 0.000000 sys 0.000000 (median of 12672) after: ! wall 0.000082 comb 0.000000 user 0.000000 sys 0.000000 (median of 11456) revision: medium amount; added files: large amount; rename medium amount; c95f1ced15f2 2c68e87c3efe before: ! wall 0.478061 comb 0.470000 user 0.470000 sys 0.000000 (median of 19) after: ! wall 0.452420 comb 0.450000 user 0.450000 sys 0.000000 (median of 21) revision: medium amount; added files: medium amount; rename small amount; d343da0c55a8 d7746d32bf9d before: ! wall 0.116015 comb 0.110000 user 0.110000 sys 0.000000 (median of 84) after: ! wall 0.109153 comb 0.100000 user 0.100000 sys 0.000000 (median of 90) Differential Revision: https://phab.mercurial-scm.org/D7123
author Pierre-Yves David <pierre-yves.david@octobus.net>
date Tue, 15 Oct 2019 18:42:03 +0200
parents 2372284d9457
children 8a8305f557d0
line wrap: on
line source

from __future__ import absolute_import
import sys
import unittest

from mercurial import (
    error,
    node,
)

try:
    from mercurial import rustext

    rustext.__name__  # trigger immediate actual import
except ImportError:
    rustext = None
else:
    # this would fail already without appropriate ancestor.__package__
    from mercurial.rustext.ancestor import (
        AncestorsIterator,
        LazyAncestors,
        MissingAncestors,
    )
    from mercurial.rustext import dagop

try:
    from mercurial.cext import parsers as cparsers
except ImportError:
    cparsers = None

# picked from test-parse-index2, copied rather than imported
# so that it stays stable even if test-parse-index2 changes or disappears.
data_non_inlined = (
    b'\x00\x00\x00\x01\x00\x00\x00\x00\x00\x01D\x19'
    b'\x00\x07e\x12\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff'
    b'\xff\xff\xff\xff\xd1\xf4\xbb\xb0\xbe\xfc\x13\xbd\x8c\xd3\x9d'
    b'\x0f\xcd\xd9;\x8c\x07\x8cJ/\x00\x00\x00\x00\x00\x00\x00\x00\x00'
    b'\x00\x00\x00\x00\x00\x00\x01D\x19\x00\x00\x00\x00\x00\xdf\x00'
    b'\x00\x01q\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\xff'
    b'\xff\xff\xff\xc1\x12\xb9\x04\x96\xa4Z1t\x91\xdfsJ\x90\xf0\x9bh'
    b'\x07l&\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
    b'\x00\x01D\xf8\x00\x00\x00\x00\x01\x1b\x00\x00\x01\xb8\x00\x00'
    b'\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\x02\n'
    b'\x0e\xc6&\xa1\x92\xae6\x0b\x02i\xfe-\xe5\xbao\x05\xd1\xe7\x00'
    b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01F'
    b'\x13\x00\x00\x00\x00\x01\xec\x00\x00\x03\x06\x00\x00\x00\x01'
    b'\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x12\xcb\xeby1'
    b'\xb6\r\x98B\xcb\x07\xbd`\x8f\x92\xd9\xc4\x84\xbdK\x00\x00\x00'
    b'\x00\x00\x00\x00\x00\x00\x00\x00\x00'
)


@unittest.skipIf(
    rustext is None or cparsers is None,
    "rustext or the C Extension parsers module "
    "ancestor relies on is not available",
)
class rustancestorstest(unittest.TestCase):
    """Test the correctness of binding to Rust code.

    This test is merely for the binding to Rust itself: extraction of
    Python variable, giving back the results etc.

    It is not meant to test the algorithmic correctness of the operations
    on ancestors it provides. Hence the very simple embedded index data is
    good enough.

    Algorithmic correctness is asserted by the Rust unit tests.
    """

    def parseindex(self):
        return cparsers.parse_index2(data_non_inlined, False)[0]

    def testiteratorrevlist(self):
        idx = self.parseindex()
        # checking test assumption about the index binary data:
        self.assertEqual(
            {i: (r[5], r[6]) for i, r in enumerate(idx)},
            {0: (-1, -1), 1: (0, -1), 2: (1, -1), 3: (2, -1)},
        )
        ait = AncestorsIterator(idx, [3], 0, True)
        self.assertEqual([r for r in ait], [3, 2, 1, 0])

        ait = AncestorsIterator(idx, [3], 0, False)
        self.assertEqual([r for r in ait], [2, 1, 0])

    def testlazyancestors(self):
        idx = self.parseindex()
        start_count = sys.getrefcount(idx)  # should be 2 (see Python doc)
        self.assertEqual(
            {i: (r[5], r[6]) for i, r in enumerate(idx)},
            {0: (-1, -1), 1: (0, -1), 2: (1, -1), 3: (2, -1)},
        )
        lazy = LazyAncestors(idx, [3], 0, True)
        # we have two more references to the index:
        # - in its inner iterator for __contains__ and __bool__
        # - in the LazyAncestors instance itself (to spawn new iterators)
        self.assertEqual(sys.getrefcount(idx), start_count + 2)

        self.assertTrue(2 in lazy)
        self.assertTrue(bool(lazy))
        self.assertEqual(list(lazy), [3, 2, 1, 0])
        # a second time to validate that we spawn new iterators
        self.assertEqual(list(lazy), [3, 2, 1, 0])

        # now let's watch the refcounts closer
        ait = iter(lazy)
        self.assertEqual(sys.getrefcount(idx), start_count + 3)
        del ait
        self.assertEqual(sys.getrefcount(idx), start_count + 2)
        del lazy
        self.assertEqual(sys.getrefcount(idx), start_count)

        # let's check bool for an empty one
        self.assertFalse(LazyAncestors(idx, [0], 0, False))

    def testmissingancestors(self):
        idx = self.parseindex()
        missanc = MissingAncestors(idx, [1])
        self.assertTrue(missanc.hasbases())
        self.assertEqual(missanc.missingancestors([3]), [2, 3])
        missanc.addbases({2})
        self.assertEqual(missanc.bases(), {1, 2})
        self.assertEqual(missanc.missingancestors([3]), [3])
        self.assertEqual(missanc.basesheads(), {2})

    def testmissingancestorsremove(self):
        idx = self.parseindex()
        missanc = MissingAncestors(idx, [1])
        revs = {0, 1, 2, 3}
        missanc.removeancestorsfrom(revs)
        self.assertEqual(revs, {2, 3})

    def testrefcount(self):
        idx = self.parseindex()
        start_count = sys.getrefcount(idx)

        # refcount increases upon iterator init...
        ait = AncestorsIterator(idx, [3], 0, True)
        self.assertEqual(sys.getrefcount(idx), start_count + 1)
        self.assertEqual(next(ait), 3)

        # and decreases once the iterator is removed
        del ait
        self.assertEqual(sys.getrefcount(idx), start_count)

        # and removing ref to the index after iterator init is no issue
        ait = AncestorsIterator(idx, [3], 0, True)
        del idx
        self.assertEqual(list(ait), [3, 2, 1, 0])

    def testgrapherror(self):
        data = (
            data_non_inlined[: 64 + 27] + b'\xf2' + data_non_inlined[64 + 28 :]
        )
        idx = cparsers.parse_index2(data, False)[0]
        with self.assertRaises(rustext.GraphError) as arc:
            AncestorsIterator(idx, [1], -1, False)
        exc = arc.exception
        self.assertIsInstance(exc, ValueError)
        # rust-cpython issues appropriate str instances for Python 2 and 3
        self.assertEqual(exc.args, ('ParentOutOfRange', 1))

    def testwdirunsupported(self):
        # trying to access ancestors of the working directory raises
        # WdirUnsupported directly
        idx = self.parseindex()
        with self.assertRaises(error.WdirUnsupported):
            list(AncestorsIterator(idx, [node.wdirrev], -1, False))

    def testheadrevs(self):
        idx = self.parseindex()
        self.assertEqual(dagop.headrevs(idx, [1, 2, 3]), {3})


if __name__ == '__main__':
    import silenttestrunner

    silenttestrunner.main(__name__)