view hgext/sqlitestore.py @ 45962:a66568f20ddc

copies: use the rust code for `combine_changeset_copies` Changeset centric copy tracing now use the rust code. The rust code focussed on simplicity and will be optimised later. So the performance is not great yet. Now that all the pieces are in place we can start working on performance in the coming changesets. Below is a table that summarize how slower we got: Repo Cases Source-Rev Dest-Rev Py-time Rust-time Difference Factor ------------------------------------------------------------------------------------------------------------------------------------ mercurial x_revs_x_added_0_copies ad6b123de1c7 39cfcef4f463 : 0.000049 s, 0.000046 s, -0.000003 s, × 0.9388 mercurial x_revs_x_added_x_copies 2b1c78674230 0c1d10351869 : 0.000112 s, 0.000173 s, +0.000061 s, × 1.5446 mercurial x000_revs_x000_added_x_copies 81f8ff2a9bf2 dd3267698d84 : 0.004216 s, 0.006303 s, +0.002087 s, × 1.4950 pypy x_revs_x_added_0_copies aed021ee8ae8 099ed31b181b : 0.000204 s, 0.000229 s, +0.000025 s, × 1.1225 pypy x_revs_x000_added_0_copies 4aa4e1f8e19a 359343b9ac0e : 0.000058 s, 0.000056 s, -0.000002 s, × 0.9655 pypy x_revs_x_added_x_copies ac52eb7bbbb0 72e022663155 : 0.000112 s, 0.000143 s, +0.000031 s, × 1.2768 pypy x_revs_x00_added_x_copies c3b14617fbd7 ace7255d9a26 : 0.000339 s, 0.001166 s, +0.000827 s, × 3.4395 pypy x_revs_x000_added_x000_copies df6f7a526b60 a83dc6a2d56f : 0.010214 s, 0.022931 s, +0.012717 s, × 2.2451 pypy x000_revs_xx00_added_0_copies 89a76aede314 2f22446ff07e : 0.047497 s, 0.852446 s, +0.804949 s, × 17.9474 pypy x000_revs_x000_added_x_copies 8a3b5bfd266e 2c68e87c3efe : 0.075297 s, 2.221824 s, +2.146527 s, × 29.5075 pypy x000_revs_x000_added_x000_copies 89a76aede314 7b3dda341c84 : 0.057322 s, 1.194162 s, +1.136840 s, × 20.8325 pypy x0000_revs_x_added_0_copies d1defd0dc478 c9cb1334cc78 : 0.796264 s, 62.468362 s, +61.672098 s, × 78.4518 pypy x0000_revs_xx000_added_0_copies bf2c629d0071 4ffed77c095c : 0.020491 s, 0.022116 s, +0.001625 s, × 1.0793 pypy x0000_revs_xx000_added_x000_copies 08ea3258278e d9fa043f30c0 : 0.121612 s, 2.972788 s, +2.851176 s, × 24.4449 netbeans x_revs_x_added_0_copies fb0955ffcbcd a01e9239f9e7 : 0.000143 s, 0.000180 s, +0.000037 s, × 1.2587 netbeans x_revs_x000_added_0_copies 6f360122949f 20eb231cc7d0 : 0.000112 s, 0.000123 s, +0.000011 s, × 1.0982 netbeans x_revs_x_added_x_copies 1ada3faf6fb6 5a39d12eecf4 : 0.000232 s, 0.000315 s, +0.000083 s, × 1.3578 netbeans x_revs_x00_added_x_copies 35be93ba1e2c 9eec5e90c05f : 0.000721 s, 0.001297 s, +0.000576 s, × 1.7989 netbeans x000_revs_xx00_added_0_copies eac3045b4fdd 51d4ae7f1290 : 0.010115 s, 0.024884 s, +0.014769 s, × 2.4601 netbeans x000_revs_x000_added_x_copies e2063d266acd 6081d72689dc : 0.015461 s, 0.032653 s, +0.017192 s, × 2.1120 netbeans x000_revs_x000_added_x000_copies ff453e9fee32 411350406ec2 : 0.060756 s, 4.230118 s, +4.169362 s, × 69.6247 netbeans x0000_revs_xx000_added_x000_copies 588c2d1ced70 1aad62e59ddd : 0.605842 s, killed mozilla-central x_revs_x_added_0_copies 3697f962bb7b 7015fcdd43a2 : 0.000164 s, 0.000197 s, +0.000033 s, × 1.2012 mozilla-central x_revs_x000_added_0_copies dd390860c6c9 40d0c5bed75d : 0.000331 s, 0.000622 s, +0.000291 s, × 1.8792 mozilla-central x_revs_x_added_x_copies 8d198483ae3b 14207ffc2b2f : 0.000249 s, 0.000296 s, +0.000047 s, × 1.1888 mozilla-central x_revs_x00_added_x_copies 98cbc58cc6bc 446a150332c3 : 0.000711 s, 0.001626 s, +0.000915 s, × 2.2869 mozilla-central x_revs_x000_added_x000_copies 3c684b4b8f68 0a5e72d1b479 : 0.003438 s, 0.006218 s, +0.002780 s, × 1.8086 mozilla-central x_revs_x0000_added_x0000_copies effb563bb7e5 c07a39dc4e80 : 0.069869 s, 0.132760 s, +0.062891 s, × 1.9001 mozilla-central x000_revs_xx00_added_0_copies 6100d773079a 04a55431795e : 0.005701 s, 0.029001 s, +0.023300 s, × 5.0870 mozilla-central x000_revs_x000_added_x_copies 9f17a6fc04f9 2d37b966abed : 0.005757 s, 0.005886 s, +0.000129 s, × 1.0224 mozilla-central x000_revs_x000_added_x000_copies 7c97034feb78 4407bd0c6330 : 0.061826 s, 3.619850 s, +3.558024 s, × 58.5490 mozilla-central x0000_revs_xx000_added_0_copies 9eec5917337d 67118cc6dcad : 0.043354 s, 0.058678 s, +0.015324 s, × 1.3535 mozilla-central x0000_revs_xx000_added_x000_copies f78c615a656c 96a38b690156 : 0.198979 s, 11.926587 s, +11.727608 s, × 59.9389 mozilla-central x00000_revs_x0000_added_x0000_copies 6832ae71433c 4c222a1d9a00 : 2.067096 s, killed mozilla-central x00000_revs_x00000_added_x000_copies 76caed42cf7c 1daa622bbe42 : 3.102616 s, killed mozilla-try x_revs_x_added_0_copies aaf6dde0deb8 9790f499805a : 0.001212 s, 0.001204 s, -0.000008 s, × 0.9934 mozilla-try x_revs_x000_added_0_copies d8d0222927b4 5bb8ce8c7450 : 0.001237 s, 0.001217 s, -0.000020 s, × 0.9838 mozilla-try x_revs_x_added_x_copies 092fcca11bdb 936255a0384a : 0.000557 s, 0.000605 s, +0.000048 s, × 1.0862 mozilla-try x_revs_x00_added_x_copies b53d2fadbdb5 017afae788ec : 0.001532 s, 0.001876 s, +0.000344 s, × 1.2245 mozilla-try x_revs_x000_added_x000_copies 20408ad61ce5 6f0ee96e21ad : 0.035166 s, 0.078190 s, +0.043024 s, × 2.2235 mozilla-try x_revs_x0000_added_x0000_copies effb563bb7e5 c07a39dc4e80 : 0.070336 s, 0.135428 s, +0.065092 s, × 1.9254 mozilla-try x000_revs_xx00_added_0_copies 6100d773079a 04a55431795e : 0.006080 s, 0.029123 s, +0.023043 s, × 4.7900 mozilla-try x000_revs_x000_added_x_copies 9f17a6fc04f9 2d37b966abed : 0.006099 s, 0.006141 s, +0.000042 s, × 1.0069 mozilla-try x000_revs_x000_added_x000_copies 1346fd0130e4 4c65cbdabc1f : 0.064317 s, 4.857827 s, +4.793510 s, × 75.5294 mozilla-try x0000_revs_x_added_0_copies 63519bfd42ee a36a2a865d92 : 0.303263 s, 10.674920 s, +10.371657 s, × 35.2002 mozilla-try x0000_revs_x_added_x_copies 9fe69ff0762d bcabf2a78927 : 0.292804 s, 9.789462 s, +9.496658 s, × 33.4335 mozilla-try x0000_revs_xx000_added_x_copies 156f6e2674f2 4d0f2c178e66 : 0.107594 s, 1.087890 s, +0.980296 s, × 10.1111 mozilla-try x0000_revs_xx000_added_0_copies 9eec5917337d 67118cc6dcad : 0.045202 s, 0.060556 s, +0.015354 s, × 1.3397 mozilla-try x0000_revs_xx000_added_x000_copies 89294cd501d9 7ccb2fc7ccb5 : 1.926277 s, killed mozilla-try x0000_revs_x0000_added_x0000_copies e928c65095ed e951f4ad123a : 0.794492 s, killed mozilla-try x00000_revs_x_added_0_copies 6a320851d377 1ebb79acd503 : 84.521497 s, killed mozilla-try x00000_revs_x00000_added_0_copies dc8a3ca7010e d16fde900c9c : 0.965937 s, 19.647038 s, +18.681101 s, × 20.3399 mozilla-try x00000_revs_x_added_x_copies 5173c4b6f97c 95d83ee7242d : 83.367146 s, killed mozilla-try x00000_revs_x000_added_x_copies 9126823d0e9c ca82787bb23c : 84.260895 s, killed mozilla-try x00000_revs_x0000_added_x0000_copies 8d3fafa80d4b eb884023b810 : 3.274537 s, killed mozilla-try x00000_revs_x00000_added_x0000_copies 1b661134e2ca 1ae03d022d6d : 42.235843 s, killed mozilla-try x00000_revs_x00000_added_x000_copies 9b2a99adc05e 8e29777b48e6 : 49.872829 s, killed Differential Revision: https://phab.mercurial-scm.org/D9299
author Pierre-Yves David <pierre-yves.david@octobus.net>
date Thu, 01 Oct 2020 18:52:13 +0200
parents a5206e71c536
children f7b61ad3c64a
line wrap: on
line source

# sqlitestore.py - Storage backend that uses SQLite
#
# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.

"""store repository data in SQLite (EXPERIMENTAL)

The sqlitestore extension enables the storage of repository data in SQLite.

This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
GUARANTEES. This means that repositories created with this extension may
only be usable with the exact version of this extension/Mercurial that was
used. The extension attempts to enforce this in order to prevent repository
corruption.

In addition, several features are not yet supported or have known bugs:

* Only some data is stored in SQLite. Changeset, manifest, and other repository
  data is not yet stored in SQLite.
* Transactions are not robust. If the process is aborted at the right time
  during transaction close/rollback, the repository could be in an inconsistent
  state. This problem will diminish once all repository data is tracked by
  SQLite.
* Bundle repositories do not work (the ability to use e.g.
  `hg -R <bundle-file> log` to automatically overlay a bundle on top of the
  existing repository).
* Various other features don't work.

This extension should work for basic clone/pull, update, and commit workflows.
Some history rewriting operations may fail due to lack of support for bundle
repositories.

To use, activate the extension and set the ``storage.new-repo-backend`` config
option to ``sqlite`` to enable new repositories to use SQLite for storage.
"""

# To run the test suite with repos using SQLite by default, execute the
# following:
#
# HGREPOFEATURES="sqlitestore" run-tests.py \
#     --extra-config-opt extensions.sqlitestore= \
#     --extra-config-opt storage.new-repo-backend=sqlite

from __future__ import absolute_import

import sqlite3
import struct
import threading
import zlib

from mercurial.i18n import _
from mercurial.node import (
    nullid,
    nullrev,
    short,
)
from mercurial.thirdparty import attr
from mercurial import (
    ancestor,
    dagop,
    encoding,
    error,
    extensions,
    localrepo,
    mdiff,
    pycompat,
    registrar,
    requirements,
    util,
    verify,
)
from mercurial.interfaces import (
    repository,
    util as interfaceutil,
)
from mercurial.utils import (
    hashutil,
    storageutil,
)

try:
    from mercurial import zstd

    zstd.__version__
except ImportError:
    zstd = None

configtable = {}
configitem = registrar.configitem(configtable)

# experimental config: storage.sqlite.compression
configitem(
    b'storage',
    b'sqlite.compression',
    default=b'zstd' if zstd else b'zlib',
    experimental=True,
)

# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
# be specifying the version(s) of Mercurial they are tested with, or
# leave the attribute unspecified.
testedwith = b'ships-with-hg-core'

REQUIREMENT = b'exp-sqlite-001'
REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd'
REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib'
REQUIREMENT_NONE = b'exp-sqlite-comp-001=none'
REQUIREMENT_SHALLOW_FILES = b'exp-sqlite-shallow-files'

CURRENT_SCHEMA_VERSION = 1

COMPRESSION_NONE = 1
COMPRESSION_ZSTD = 2
COMPRESSION_ZLIB = 3

FLAG_CENSORED = 1
FLAG_MISSING_P1 = 2
FLAG_MISSING_P2 = 4

CREATE_SCHEMA = [
    # Deltas are stored as content-indexed blobs.
    # compression column holds COMPRESSION_* constant for how the
    # delta is encoded.
    'CREATE TABLE delta ('
    '    id INTEGER PRIMARY KEY, '
    '    compression INTEGER NOT NULL, '
    '    hash BLOB UNIQUE ON CONFLICT ABORT, '
    '    delta BLOB NOT NULL '
    ')',
    # Tracked paths are denormalized to integers to avoid redundant
    # storage of the path name.
    'CREATE TABLE filepath ('
    '    id INTEGER PRIMARY KEY, '
    '    path BLOB NOT NULL '
    ')',
    'CREATE UNIQUE INDEX filepath_path ON filepath (path)',
    # We have a single table for all file revision data.
    # Each file revision is uniquely described by a (path, rev) and
    # (path, node).
    #
    # Revision data is stored as a pointer to the delta producing this
    # revision and the file revision whose delta should be applied before
    # that one. One can reconstruct the delta chain by recursively following
    # the delta base revision pointers until one encounters NULL.
    #
    # flags column holds bitwise integer flags controlling storage options.
    # These flags are defined by the FLAG_* constants.
    'CREATE TABLE fileindex ('
    '    id INTEGER PRIMARY KEY, '
    '    pathid INTEGER REFERENCES filepath(id), '
    '    revnum INTEGER NOT NULL, '
    '    p1rev INTEGER NOT NULL, '
    '    p2rev INTEGER NOT NULL, '
    '    linkrev INTEGER NOT NULL, '
    '    flags INTEGER NOT NULL, '
    '    deltaid INTEGER REFERENCES delta(id), '
    '    deltabaseid INTEGER REFERENCES fileindex(id), '
    '    node BLOB NOT NULL '
    ')',
    'CREATE UNIQUE INDEX fileindex_pathrevnum '
    '    ON fileindex (pathid, revnum)',
    'CREATE UNIQUE INDEX fileindex_pathnode ON fileindex (pathid, node)',
    # Provide a view over all file data for convenience.
    'CREATE VIEW filedata AS '
    'SELECT '
    '    fileindex.id AS id, '
    '    filepath.id AS pathid, '
    '    filepath.path AS path, '
    '    fileindex.revnum AS revnum, '
    '    fileindex.node AS node, '
    '    fileindex.p1rev AS p1rev, '
    '    fileindex.p2rev AS p2rev, '
    '    fileindex.linkrev AS linkrev, '
    '    fileindex.flags AS flags, '
    '    fileindex.deltaid AS deltaid, '
    '    fileindex.deltabaseid AS deltabaseid '
    'FROM filepath, fileindex '
    'WHERE fileindex.pathid=filepath.id',
    'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
]


def resolvedeltachain(db, pathid, node, revisioncache, stoprids, zstddctx=None):
    """Resolve a delta chain for a file node."""

    # TODO the "not in ({stops})" here is possibly slowing down the query
    # because it needs to perform the lookup on every recursive invocation.
    # This could possibly be faster if we created a temporary query with
    # baseid "poisoned" to null and limited the recursive filter to
    # "is not null".
    res = db.execute(
        'WITH RECURSIVE '
        '    deltachain(deltaid, baseid) AS ('
        '        SELECT deltaid, deltabaseid FROM fileindex '
        '            WHERE pathid=? AND node=? '
        '        UNION ALL '
        '        SELECT fileindex.deltaid, deltabaseid '
        '            FROM fileindex, deltachain '
        '            WHERE '
        '                fileindex.id=deltachain.baseid '
        '                AND deltachain.baseid IS NOT NULL '
        '                AND fileindex.id NOT IN ({stops}) '
        '    ) '
        'SELECT deltachain.baseid, compression, delta '
        'FROM deltachain, delta '
        'WHERE delta.id=deltachain.deltaid'.format(
            stops=','.join(['?'] * len(stoprids))
        ),
        tuple([pathid, node] + list(stoprids.keys())),
    )

    deltas = []
    lastdeltabaseid = None

    for deltabaseid, compression, delta in res:
        lastdeltabaseid = deltabaseid

        if compression == COMPRESSION_ZSTD:
            delta = zstddctx.decompress(delta)
        elif compression == COMPRESSION_NONE:
            delta = delta
        elif compression == COMPRESSION_ZLIB:
            delta = zlib.decompress(delta)
        else:
            raise SQLiteStoreError(
                b'unhandled compression type: %d' % compression
            )

        deltas.append(delta)

    if lastdeltabaseid in stoprids:
        basetext = revisioncache[stoprids[lastdeltabaseid]]
    else:
        basetext = deltas.pop()

    deltas.reverse()
    fulltext = mdiff.patches(basetext, deltas)

    # SQLite returns buffer instances for blob columns on Python 2. This
    # type can propagate through the delta application layer. Because
    # downstream callers assume revisions are bytes, cast as needed.
    if not isinstance(fulltext, bytes):
        fulltext = bytes(delta)

    return fulltext


def insertdelta(db, compression, hash, delta):
    try:
        return db.execute(
            'INSERT INTO delta (compression, hash, delta) VALUES (?, ?, ?)',
            (compression, hash, delta),
        ).lastrowid
    except sqlite3.IntegrityError:
        return db.execute(
            'SELECT id FROM delta WHERE hash=?', (hash,)
        ).fetchone()[0]


class SQLiteStoreError(error.StorageError):
    pass


@attr.s
class revisionentry(object):
    rid = attr.ib()
    rev = attr.ib()
    node = attr.ib()
    p1rev = attr.ib()
    p2rev = attr.ib()
    p1node = attr.ib()
    p2node = attr.ib()
    linkrev = attr.ib()
    flags = attr.ib()


@interfaceutil.implementer(repository.irevisiondelta)
@attr.s(slots=True)
class sqliterevisiondelta(object):
    node = attr.ib()
    p1node = attr.ib()
    p2node = attr.ib()
    basenode = attr.ib()
    flags = attr.ib()
    baserevisionsize = attr.ib()
    revision = attr.ib()
    delta = attr.ib()
    linknode = attr.ib(default=None)


@interfaceutil.implementer(repository.iverifyproblem)
@attr.s(frozen=True)
class sqliteproblem(object):
    warning = attr.ib(default=None)
    error = attr.ib(default=None)
    node = attr.ib(default=None)


@interfaceutil.implementer(repository.ifilestorage)
class sqlitefilestore(object):
    """Implements storage for an individual tracked path."""

    def __init__(self, db, path, compression):
        self._db = db
        self._path = path

        self._pathid = None

        # revnum -> node
        self._revtonode = {}
        # node -> revnum
        self._nodetorev = {}
        # node -> data structure
        self._revisions = {}

        self._revisioncache = util.lrucachedict(10)

        self._compengine = compression

        if compression == b'zstd':
            self._cctx = zstd.ZstdCompressor(level=3)
            self._dctx = zstd.ZstdDecompressor()
        else:
            self._cctx = None
            self._dctx = None

        self._refreshindex()

    def _refreshindex(self):
        self._revtonode = {}
        self._nodetorev = {}
        self._revisions = {}

        res = list(
            self._db.execute(
                'SELECT id FROM filepath WHERE path=?', (self._path,)
            )
        )

        if not res:
            self._pathid = None
            return

        self._pathid = res[0][0]

        res = self._db.execute(
            'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
            'FROM fileindex '
            'WHERE pathid=? '
            'ORDER BY revnum ASC',
            (self._pathid,),
        )

        for i, row in enumerate(res):
            rid, rev, node, p1rev, p2rev, linkrev, flags = row

            if i != rev:
                raise SQLiteStoreError(
                    _(b'sqlite database has inconsistent revision numbers')
                )

            if p1rev == nullrev:
                p1node = nullid
            else:
                p1node = self._revtonode[p1rev]

            if p2rev == nullrev:
                p2node = nullid
            else:
                p2node = self._revtonode[p2rev]

            entry = revisionentry(
                rid=rid,
                rev=rev,
                node=node,
                p1rev=p1rev,
                p2rev=p2rev,
                p1node=p1node,
                p2node=p2node,
                linkrev=linkrev,
                flags=flags,
            )

            self._revtonode[rev] = node
            self._nodetorev[node] = rev
            self._revisions[node] = entry

    # Start of ifileindex interface.

    def __len__(self):
        return len(self._revisions)

    def __iter__(self):
        return iter(pycompat.xrange(len(self._revisions)))

    def hasnode(self, node):
        if node == nullid:
            return False

        return node in self._nodetorev

    def revs(self, start=0, stop=None):
        return storageutil.iterrevs(
            len(self._revisions), start=start, stop=stop
        )

    def parents(self, node):
        if node == nullid:
            return nullid, nullid

        if node not in self._revisions:
            raise error.LookupError(node, self._path, _(b'no node'))

        entry = self._revisions[node]
        return entry.p1node, entry.p2node

    def parentrevs(self, rev):
        if rev == nullrev:
            return nullrev, nullrev

        if rev not in self._revtonode:
            raise IndexError(rev)

        entry = self._revisions[self._revtonode[rev]]
        return entry.p1rev, entry.p2rev

    def rev(self, node):
        if node == nullid:
            return nullrev

        if node not in self._nodetorev:
            raise error.LookupError(node, self._path, _(b'no node'))

        return self._nodetorev[node]

    def node(self, rev):
        if rev == nullrev:
            return nullid

        if rev not in self._revtonode:
            raise IndexError(rev)

        return self._revtonode[rev]

    def lookup(self, node):
        return storageutil.fileidlookup(self, node, self._path)

    def linkrev(self, rev):
        if rev == nullrev:
            return nullrev

        if rev not in self._revtonode:
            raise IndexError(rev)

        entry = self._revisions[self._revtonode[rev]]
        return entry.linkrev

    def iscensored(self, rev):
        if rev == nullrev:
            return False

        if rev not in self._revtonode:
            raise IndexError(rev)

        return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED

    def commonancestorsheads(self, node1, node2):
        rev1 = self.rev(node1)
        rev2 = self.rev(node2)

        ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2)
        return pycompat.maplist(self.node, ancestors)

    def descendants(self, revs):
        # TODO we could implement this using a recursive SQL query, which
        # might be faster.
        return dagop.descendantrevs(revs, self.revs, self.parentrevs)

    def heads(self, start=None, stop=None):
        if start is None and stop is None:
            if not len(self):
                return [nullid]

        startrev = self.rev(start) if start is not None else nullrev
        stoprevs = {self.rev(n) for n in stop or []}

        revs = dagop.headrevssubset(
            self.revs, self.parentrevs, startrev=startrev, stoprevs=stoprevs
        )

        return [self.node(rev) for rev in revs]

    def children(self, node):
        rev = self.rev(node)

        res = self._db.execute(
            'SELECT'
            '  node '
            '  FROM filedata '
            '  WHERE path=? AND (p1rev=? OR p2rev=?) '
            '  ORDER BY revnum ASC',
            (self._path, rev, rev),
        )

        return [row[0] for row in res]

    # End of ifileindex interface.

    # Start of ifiledata interface.

    def size(self, rev):
        if rev == nullrev:
            return 0

        if rev not in self._revtonode:
            raise IndexError(rev)

        node = self._revtonode[rev]

        if self.renamed(node):
            return len(self.read(node))

        return len(self.revision(node))

    def revision(self, node, raw=False, _verifyhash=True):
        if node in (nullid, nullrev):
            return b''

        if isinstance(node, int):
            node = self.node(node)

        if node not in self._nodetorev:
            raise error.LookupError(node, self._path, _(b'no node'))

        if node in self._revisioncache:
            return self._revisioncache[node]

        # Because we have a fulltext revision cache, we are able to
        # short-circuit delta chain traversal and decompression as soon as
        # we encounter a revision in the cache.

        stoprids = {self._revisions[n].rid: n for n in self._revisioncache}

        if not stoprids:
            stoprids[-1] = None

        fulltext = resolvedeltachain(
            self._db,
            self._pathid,
            node,
            self._revisioncache,
            stoprids,
            zstddctx=self._dctx,
        )

        # Don't verify hashes if parent nodes were rewritten, as the hash
        # wouldn't verify.
        if self._revisions[node].flags & (FLAG_MISSING_P1 | FLAG_MISSING_P2):
            _verifyhash = False

        if _verifyhash:
            self._checkhash(fulltext, node)
            self._revisioncache[node] = fulltext

        return fulltext

    def rawdata(self, *args, **kwargs):
        return self.revision(*args, **kwargs)

    def read(self, node):
        return storageutil.filtermetadata(self.revision(node))

    def renamed(self, node):
        return storageutil.filerevisioncopied(self, node)

    def cmp(self, node, fulltext):
        return not storageutil.filedataequivalent(self, node, fulltext)

    def emitrevisions(
        self,
        nodes,
        nodesorder=None,
        revisiondata=False,
        assumehaveparentrevisions=False,
        deltamode=repository.CG_DELTAMODE_STD,
    ):
        if nodesorder not in (b'nodes', b'storage', b'linear', None):
            raise error.ProgrammingError(
                b'unhandled value for nodesorder: %s' % nodesorder
            )

        nodes = [n for n in nodes if n != nullid]

        if not nodes:
            return

        # TODO perform in a single query.
        res = self._db.execute(
            'SELECT revnum, deltaid FROM fileindex '
            'WHERE pathid=? '
            '    AND node in (%s)' % (','.join(['?'] * len(nodes))),
            tuple([self._pathid] + nodes),
        )

        deltabases = {}

        for rev, deltaid in res:
            res = self._db.execute(
                'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
                (self._pathid, deltaid),
            )
            deltabases[rev] = res.fetchone()[0]

        # TODO define revdifffn so we can use delta from storage.
        for delta in storageutil.emitrevisions(
            self,
            nodes,
            nodesorder,
            sqliterevisiondelta,
            deltaparentfn=deltabases.__getitem__,
            revisiondata=revisiondata,
            assumehaveparentrevisions=assumehaveparentrevisions,
            deltamode=deltamode,
        ):

            yield delta

    # End of ifiledata interface.

    # Start of ifilemutation interface.

    def add(self, filedata, meta, transaction, linkrev, p1, p2):
        if meta or filedata.startswith(b'\x01\n'):
            filedata = storageutil.packmeta(meta, filedata)

        return self.addrevision(filedata, transaction, linkrev, p1, p2)

    def addrevision(
        self,
        revisiondata,
        transaction,
        linkrev,
        p1,
        p2,
        node=None,
        flags=0,
        cachedelta=None,
    ):
        if flags:
            raise SQLiteStoreError(_(b'flags not supported on revisions'))

        validatehash = node is not None
        node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)

        if validatehash:
            self._checkhash(revisiondata, node, p1, p2)

        if node in self._nodetorev:
            return node

        node = self._addrawrevision(
            node, revisiondata, transaction, linkrev, p1, p2
        )

        self._revisioncache[node] = revisiondata
        return node

    def addgroup(
        self,
        deltas,
        linkmapper,
        transaction,
        addrevisioncb=None,
        duplicaterevisioncb=None,
        maybemissingparents=False,
    ):
        empty = True

        for node, p1, p2, linknode, deltabase, delta, wireflags in deltas:
            storeflags = 0

            if wireflags & repository.REVISION_FLAG_CENSORED:
                storeflags |= FLAG_CENSORED

            if wireflags & ~repository.REVISION_FLAG_CENSORED:
                raise SQLiteStoreError(b'unhandled revision flag')

            if maybemissingparents:
                if p1 != nullid and not self.hasnode(p1):
                    p1 = nullid
                    storeflags |= FLAG_MISSING_P1

                if p2 != nullid and not self.hasnode(p2):
                    p2 = nullid
                    storeflags |= FLAG_MISSING_P2

            baserev = self.rev(deltabase)

            # If base is censored, delta must be full replacement in a single
            # patch operation.
            if baserev != nullrev and self.iscensored(baserev):
                hlen = struct.calcsize(b'>lll')
                oldlen = len(self.rawdata(deltabase, _verifyhash=False))
                newlen = len(delta) - hlen

                if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
                    raise error.CensoredBaseError(self._path, deltabase)

            if not (storeflags & FLAG_CENSORED) and storageutil.deltaiscensored(
                delta, baserev, lambda x: len(self.rawdata(x))
            ):
                storeflags |= FLAG_CENSORED

            linkrev = linkmapper(linknode)

            if node in self._revisions:
                # Possibly reset parents to make them proper.
                entry = self._revisions[node]

                if entry.flags & FLAG_MISSING_P1 and p1 != nullid:
                    entry.p1node = p1
                    entry.p1rev = self._nodetorev[p1]
                    entry.flags &= ~FLAG_MISSING_P1

                    self._db.execute(
                        'UPDATE fileindex SET p1rev=?, flags=? WHERE id=?',
                        (self._nodetorev[p1], entry.flags, entry.rid),
                    )

                if entry.flags & FLAG_MISSING_P2 and p2 != nullid:
                    entry.p2node = p2
                    entry.p2rev = self._nodetorev[p2]
                    entry.flags &= ~FLAG_MISSING_P2

                    self._db.execute(
                        'UPDATE fileindex SET p2rev=?, flags=? WHERE id=?',
                        (self._nodetorev[p1], entry.flags, entry.rid),
                    )

                if duplicaterevisioncb:
                    duplicaterevisioncb(self, node)
                empty = False
                continue

            if deltabase == nullid:
                text = mdiff.patch(b'', delta)
                storedelta = None
            else:
                text = None
                storedelta = (deltabase, delta)

            self._addrawrevision(
                node,
                text,
                transaction,
                linkrev,
                p1,
                p2,
                storedelta=storedelta,
                flags=storeflags,
            )

            if addrevisioncb:
                addrevisioncb(self, node)
            empty = False

        return not empty

    def censorrevision(self, tr, censornode, tombstone=b''):
        tombstone = storageutil.packmeta({b'censored': tombstone}, b'')

        # This restriction is cargo culted from revlogs and makes no sense for
        # SQLite, since columns can be resized at will.
        if len(tombstone) > len(self.rawdata(censornode)):
            raise error.Abort(
                _(b'censor tombstone must be no longer than censored data')
            )

        # We need to replace the censored revision's data with the tombstone.
        # But replacing that data will have implications for delta chains that
        # reference it.
        #
        # While "better," more complex strategies are possible, we do something
        # simple: we find delta chain children of the censored revision and we
        # replace those incremental deltas with fulltexts of their corresponding
        # revision. Then we delete the now-unreferenced delta and original
        # revision and insert a replacement.

        # Find the delta to be censored.
        censoreddeltaid = self._db.execute(
            'SELECT deltaid FROM fileindex WHERE id=?',
            (self._revisions[censornode].rid,),
        ).fetchone()[0]

        # Find all its delta chain children.
        # TODO once we support storing deltas for !files, we'll need to look
        # for those delta chains too.
        rows = list(
            self._db.execute(
                'SELECT id, pathid, node FROM fileindex '
                'WHERE deltabaseid=? OR deltaid=?',
                (censoreddeltaid, censoreddeltaid),
            )
        )

        for row in rows:
            rid, pathid, node = row

            fulltext = resolvedeltachain(
                self._db, pathid, node, {}, {-1: None}, zstddctx=self._dctx
            )

            deltahash = hashutil.sha1(fulltext).digest()

            if self._compengine == b'zstd':
                deltablob = self._cctx.compress(fulltext)
                compression = COMPRESSION_ZSTD
            elif self._compengine == b'zlib':
                deltablob = zlib.compress(fulltext)
                compression = COMPRESSION_ZLIB
            elif self._compengine == b'none':
                deltablob = fulltext
                compression = COMPRESSION_NONE
            else:
                raise error.ProgrammingError(
                    b'unhandled compression engine: %s' % self._compengine
                )

            if len(deltablob) >= len(fulltext):
                deltablob = fulltext
                compression = COMPRESSION_NONE

            deltaid = insertdelta(self._db, compression, deltahash, deltablob)

            self._db.execute(
                'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
                'WHERE id=?',
                (deltaid, rid),
            )

        # Now create the tombstone delta and replace the delta on the censored
        # node.
        deltahash = hashutil.sha1(tombstone).digest()
        tombstonedeltaid = insertdelta(
            self._db, COMPRESSION_NONE, deltahash, tombstone
        )

        flags = self._revisions[censornode].flags
        flags |= FLAG_CENSORED

        self._db.execute(
            'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
            'WHERE pathid=? AND node=?',
            (flags, tombstonedeltaid, self._pathid, censornode),
        )

        self._db.execute('DELETE FROM delta WHERE id=?', (censoreddeltaid,))

        self._refreshindex()
        self._revisioncache.clear()

    def getstrippoint(self, minlink):
        return storageutil.resolvestripinfo(
            minlink,
            len(self) - 1,
            [self.rev(n) for n in self.heads()],
            self.linkrev,
            self.parentrevs,
        )

    def strip(self, minlink, transaction):
        if not len(self):
            return

        rev, _ignored = self.getstrippoint(minlink)

        if rev == len(self):
            return

        for rev in self.revs(rev):
            self._db.execute(
                'DELETE FROM fileindex WHERE pathid=? AND node=?',
                (self._pathid, self.node(rev)),
            )

        # TODO how should we garbage collect data in delta table?

        self._refreshindex()

    # End of ifilemutation interface.

    # Start of ifilestorage interface.

    def files(self):
        return []

    def storageinfo(
        self,
        exclusivefiles=False,
        sharedfiles=False,
        revisionscount=False,
        trackedsize=False,
        storedsize=False,
    ):
        d = {}

        if exclusivefiles:
            d[b'exclusivefiles'] = []

        if sharedfiles:
            # TODO list sqlite file(s) here.
            d[b'sharedfiles'] = []

        if revisionscount:
            d[b'revisionscount'] = len(self)

        if trackedsize:
            d[b'trackedsize'] = sum(
                len(self.revision(node)) for node in self._nodetorev
            )

        if storedsize:
            # TODO implement this?
            d[b'storedsize'] = None

        return d

    def verifyintegrity(self, state):
        state[b'skipread'] = set()

        for rev in self:
            node = self.node(rev)

            try:
                self.revision(node)
            except Exception as e:
                yield sqliteproblem(
                    error=_(b'unpacking %s: %s') % (short(node), e), node=node
                )

                state[b'skipread'].add(node)

    # End of ifilestorage interface.

    def _checkhash(self, fulltext, node, p1=None, p2=None):
        if p1 is None and p2 is None:
            p1, p2 = self.parents(node)

        if node == storageutil.hashrevisionsha1(fulltext, p1, p2):
            return

        try:
            del self._revisioncache[node]
        except KeyError:
            pass

        if storageutil.iscensoredtext(fulltext):
            raise error.CensoredNodeError(self._path, node, fulltext)

        raise SQLiteStoreError(_(b'integrity check failed on %s') % self._path)

    def _addrawrevision(
        self,
        node,
        revisiondata,
        transaction,
        linkrev,
        p1,
        p2,
        storedelta=None,
        flags=0,
    ):
        if self._pathid is None:
            res = self._db.execute(
                'INSERT INTO filepath (path) VALUES (?)', (self._path,)
            )
            self._pathid = res.lastrowid

        # For simplicity, always store a delta against p1.
        # TODO we need a lot more logic here to make behavior reasonable.

        if storedelta:
            deltabase, delta = storedelta

            if isinstance(deltabase, int):
                deltabase = self.node(deltabase)

        else:
            assert revisiondata is not None
            deltabase = p1

            if deltabase == nullid:
                delta = revisiondata
            else:
                delta = mdiff.textdiff(
                    self.revision(self.rev(deltabase)), revisiondata
                )

        # File index stores a pointer to its delta and the parent delta.
        # The parent delta is stored via a pointer to the fileindex PK.
        if deltabase == nullid:
            baseid = None
        else:
            baseid = self._revisions[deltabase].rid

        # Deltas are stored with a hash of their content. This allows
        # us to de-duplicate. The table is configured to ignore conflicts
        # and it is faster to just insert and silently noop than to look
        # first.
        deltahash = hashutil.sha1(delta).digest()

        if self._compengine == b'zstd':
            deltablob = self._cctx.compress(delta)
            compression = COMPRESSION_ZSTD
        elif self._compengine == b'zlib':
            deltablob = zlib.compress(delta)
            compression = COMPRESSION_ZLIB
        elif self._compengine == b'none':
            deltablob = delta
            compression = COMPRESSION_NONE
        else:
            raise error.ProgrammingError(
                b'unhandled compression engine: %s' % self._compengine
            )

        # Don't store compressed data if it isn't practical.
        if len(deltablob) >= len(delta):
            deltablob = delta
            compression = COMPRESSION_NONE

        deltaid = insertdelta(self._db, compression, deltahash, deltablob)

        rev = len(self)

        if p1 == nullid:
            p1rev = nullrev
        else:
            p1rev = self._nodetorev[p1]

        if p2 == nullid:
            p2rev = nullrev
        else:
            p2rev = self._nodetorev[p2]

        rid = self._db.execute(
            'INSERT INTO fileindex ('
            '    pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
            '    deltaid, deltabaseid) '
            '    VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
            (
                self._pathid,
                rev,
                node,
                p1rev,
                p2rev,
                linkrev,
                flags,
                deltaid,
                baseid,
            ),
        ).lastrowid

        entry = revisionentry(
            rid=rid,
            rev=rev,
            node=node,
            p1rev=p1rev,
            p2rev=p2rev,
            p1node=p1,
            p2node=p2,
            linkrev=linkrev,
            flags=flags,
        )

        self._nodetorev[node] = rev
        self._revtonode[rev] = node
        self._revisions[node] = entry

        return node


class sqliterepository(localrepo.localrepository):
    def cancopy(self):
        return False

    def transaction(self, *args, **kwargs):
        current = self.currenttransaction()

        tr = super(sqliterepository, self).transaction(*args, **kwargs)

        if current:
            return tr

        self._dbconn.execute('BEGIN TRANSACTION')

        def committransaction(_):
            self._dbconn.commit()

        tr.addfinalize(b'sqlitestore', committransaction)

        return tr

    @property
    def _dbconn(self):
        # SQLite connections can only be used on the thread that created
        # them. In most cases, this "just works." However, hgweb uses
        # multiple threads.
        tid = threading.current_thread().ident

        if self._db:
            if self._db[0] == tid:
                return self._db[1]

        db = makedb(self.svfs.join(b'db.sqlite'))
        self._db = (tid, db)

        return db


def makedb(path):
    """Construct a database handle for a database at path."""

    db = sqlite3.connect(encoding.strfromlocal(path))
    db.text_factory = bytes

    res = db.execute('PRAGMA user_version').fetchone()[0]

    # New database.
    if res == 0:
        for statement in CREATE_SCHEMA:
            db.execute(statement)

        db.commit()

    elif res == CURRENT_SCHEMA_VERSION:
        pass

    else:
        raise error.Abort(_(b'sqlite database has unrecognized version'))

    db.execute('PRAGMA journal_mode=WAL')

    return db


def featuresetup(ui, supported):
    supported.add(REQUIREMENT)

    if zstd:
        supported.add(REQUIREMENT_ZSTD)

    supported.add(REQUIREMENT_ZLIB)
    supported.add(REQUIREMENT_NONE)
    supported.add(REQUIREMENT_SHALLOW_FILES)
    supported.add(requirements.NARROW_REQUIREMENT)


def newreporequirements(orig, ui, createopts):
    if createopts[b'backend'] != b'sqlite':
        return orig(ui, createopts)

    # This restriction can be lifted once we have more confidence.
    if b'sharedrepo' in createopts:
        raise error.Abort(
            _(b'shared repositories not supported with SQLite store')
        )

    # This filtering is out of an abundance of caution: we want to ensure
    # we honor creation options and we do that by annotating exactly the
    # creation options we recognize.
    known = {
        b'narrowfiles',
        b'backend',
        b'shallowfilestore',
    }

    unsupported = set(createopts) - known
    if unsupported:
        raise error.Abort(
            _(b'SQLite store does not support repo creation option: %s')
            % b', '.join(sorted(unsupported))
        )

    # Since we're a hybrid store that still relies on revlogs, we fall back
    # to using the revlogv1 backend's storage requirements then adding our
    # own requirement.
    createopts[b'backend'] = b'revlogv1'
    requirements = orig(ui, createopts)
    requirements.add(REQUIREMENT)

    compression = ui.config(b'storage', b'sqlite.compression')

    if compression == b'zstd' and not zstd:
        raise error.Abort(
            _(
                b'storage.sqlite.compression set to "zstd" but '
                b'zstandard compression not available to this '
                b'Mercurial install'
            )
        )

    if compression == b'zstd':
        requirements.add(REQUIREMENT_ZSTD)
    elif compression == b'zlib':
        requirements.add(REQUIREMENT_ZLIB)
    elif compression == b'none':
        requirements.add(REQUIREMENT_NONE)
    else:
        raise error.Abort(
            _(
                b'unknown compression engine defined in '
                b'storage.sqlite.compression: %s'
            )
            % compression
        )

    if createopts.get(b'shallowfilestore'):
        requirements.add(REQUIREMENT_SHALLOW_FILES)

    return requirements


@interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
class sqlitefilestorage(object):
    """Repository file storage backed by SQLite."""

    def file(self, path):
        if path[0] == b'/':
            path = path[1:]

        if REQUIREMENT_ZSTD in self.requirements:
            compression = b'zstd'
        elif REQUIREMENT_ZLIB in self.requirements:
            compression = b'zlib'
        elif REQUIREMENT_NONE in self.requirements:
            compression = b'none'
        else:
            raise error.Abort(
                _(
                    b'unable to determine what compression engine '
                    b'to use for SQLite storage'
                )
            )

        return sqlitefilestore(self._dbconn, path, compression)


def makefilestorage(orig, requirements, features, **kwargs):
    """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
    if REQUIREMENT in requirements:
        if REQUIREMENT_SHALLOW_FILES in requirements:
            features.add(repository.REPO_FEATURE_SHALLOW_FILE_STORAGE)

        return sqlitefilestorage
    else:
        return orig(requirements=requirements, features=features, **kwargs)


def makemain(orig, ui, requirements, **kwargs):
    if REQUIREMENT in requirements:
        if REQUIREMENT_ZSTD in requirements and not zstd:
            raise error.Abort(
                _(
                    b'repository uses zstandard compression, which '
                    b'is not available to this Mercurial install'
                )
            )

        return sqliterepository

    return orig(requirements=requirements, **kwargs)


def verifierinit(orig, self, *args, **kwargs):
    orig(self, *args, **kwargs)

    # We don't care that files in the store don't align with what is
    # advertised. So suppress these warnings.
    self.warnorphanstorefiles = False


def extsetup(ui):
    localrepo.featuresetupfuncs.add(featuresetup)
    extensions.wrapfunction(
        localrepo, b'newreporequirements', newreporequirements
    )
    extensions.wrapfunction(localrepo, b'makefilestorage', makefilestorage)
    extensions.wrapfunction(localrepo, b'makemain', makemain)
    extensions.wrapfunction(verify.verifier, b'__init__', verifierinit)


def reposetup(ui, repo):
    if isinstance(repo, sqliterepository):
        repo._db = None

    # TODO check for bundlerepository?