Mercurial > hg
view tests/test-rust-discovery.py @ 42486:35d674a3d5db
copies: don't filter out copy targets created on other side of merge commit
If file X is copied to Y on one side of merge and the other side
creates Y (no copy), we would not mark that as copy. In the
changeset-centric pathcopies() version, that was done by checking if
the copy target existed on the other branch. Even though merge commits
are pretty uncommon, it still turned out to be too expensive to load
the manifest of the parents of merge commits. In a repo of
mozilla-unified converted to storing copies in changesets, about 2m30s
of `hg debugpathcopies FIREFOX_BETA_59_END FIREFOX_BETA_60_BASE` is
spent on this check of merge commits.
I tried to think of a way of storing more information in the
changesets in order to cheaply detect these cases, but I couldn't
think of a solution. So this patch simply removes those checks.
For reference, these extra copies are reported from the aforementioned
command after this patch:
browser/base/content/sanitize.js -> browser/modules/Sanitizer.jsm
testing/mozbase/mozprocess/tests/process_normal_finish_python.ini -> testing/mozbase/mozprocess/tests/process_normal_finish.ini
testing/mozbase/mozprocess/tests/process_waittimeout_python.ini -> testing/mozbase/mozprocess/tests/process_waittimeout.ini
testing/mozbase/mozprocess/tests/process_waittimeout_10s_python.ini -> testing/mozbase/mozprocess/tests/process_waittimeout_10s.ini
Since these copies were created on one side of some merge, it still
seems reasonable to include them, so I'm not even sure it's worse than
filelog pathcopies(), just different.
Differential Revision: https://phab.mercurial-scm.org/D6420
author | Martin von Zweigbergk <martinvonz@google.com> |
---|---|
date | Thu, 18 Apr 2019 21:22:14 -0700 |
parents | 1b0be75cb61f |
children | a7f1a0b3f461 |
line wrap: on
line source
from __future__ import absolute_import import unittest try: from mercurial import rustext rustext.__name__ # trigger immediate actual import except ImportError: rustext = None else: # this would fail already without appropriate ancestor.__package__ from mercurial.rustext.discovery import ( PartialDiscovery, ) try: from mercurial.cext import parsers as cparsers except ImportError: cparsers = None # picked from test-parse-index2, copied rather than imported # so that it stays stable even if test-parse-index2 changes or disappears. data_non_inlined = ( b'\x00\x00\x00\x01\x00\x00\x00\x00\x00\x01D\x19' b'\x00\x07e\x12\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff' b'\xff\xff\xff\xff\xd1\xf4\xbb\xb0\xbe\xfc\x13\xbd\x8c\xd3\x9d' b'\x0f\xcd\xd9;\x8c\x07\x8cJ/\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x01D\x19\x00\x00\x00\x00\x00\xdf\x00' b'\x00\x01q\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\xff' b'\xff\xff\xff\xc1\x12\xb9\x04\x96\xa4Z1t\x91\xdfsJ\x90\xf0\x9bh' b'\x07l&\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x01D\xf8\x00\x00\x00\x00\x01\x1b\x00\x00\x01\xb8\x00\x00' b'\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\x02\n' b'\x0e\xc6&\xa1\x92\xae6\x0b\x02i\xfe-\xe5\xbao\x05\xd1\xe7\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01F' b'\x13\x00\x00\x00\x00\x01\xec\x00\x00\x03\x06\x00\x00\x00\x01' b'\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x12\xcb\xeby1' b'\xb6\r\x98B\xcb\x07\xbd`\x8f\x92\xd9\xc4\x84\xbdK\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00' ) @unittest.skipIf(rustext is None or cparsers is None, "rustext or the C Extension parsers module " "discovery relies on is not available") class rustdiscoverytest(unittest.TestCase): """Test the correctness of binding to Rust code. This test is merely for the binding to Rust itself: extraction of Python variable, giving back the results etc. It is not meant to test the algorithmic correctness of the provided methods. Hence the very simple embedded index data is good enough. Algorithmic correctness is asserted by the Rust unit tests. """ def parseindex(self): return cparsers.parse_index2(data_non_inlined, False)[0] def testindex(self): idx = self.parseindex() # checking our assumptions about the index binary data: self.assertEqual({i: (r[5], r[6]) for i, r in enumerate(idx)}, {0: (-1, -1), 1: (0, -1), 2: (1, -1), 3: (2, -1)}) def testaddcommonsmissings(self): idx = self.parseindex() disco = PartialDiscovery(idx, [3]) self.assertFalse(disco.hasinfo()) self.assertFalse(disco.iscomplete()) disco.addcommons([1]) self.assertTrue(disco.hasinfo()) self.assertFalse(disco.iscomplete()) disco.addmissings([2]) self.assertTrue(disco.hasinfo()) self.assertTrue(disco.iscomplete()) self.assertEqual(disco.commonheads(), {1}) def testaddmissingsstats(self): idx = self.parseindex() disco = PartialDiscovery(idx, [3]) self.assertIsNone(disco.stats()['undecided'], None) disco.addmissings([2]) self.assertEqual(disco.stats()['undecided'], 2) def testaddinfocommonfirst(self): idx = self.parseindex() disco = PartialDiscovery(idx, [3]) disco.addinfo([(1, True), (2, False)]) self.assertTrue(disco.hasinfo()) self.assertTrue(disco.iscomplete()) self.assertEqual(disco.commonheads(), {1}) def testaddinfomissingfirst(self): idx = self.parseindex() disco = PartialDiscovery(idx, [3]) disco.addinfo([(2, False), (1, True)]) self.assertTrue(disco.hasinfo()) self.assertTrue(disco.iscomplete()) self.assertEqual(disco.commonheads(), {1}) if __name__ == '__main__': import silenttestrunner silenttestrunner.main(__name__)