diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c16a09e921..c8a3b4128d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -105,6 +105,8 @@ jobs: args: --all-targets --all-features - command: check args: --all-targets + - command: doc + args: --all-features --workspace - command: make args: check --locked - command: test diff --git a/CHANGELOG.md b/CHANGELOG.md index 7b62bdb680..68ad5bf651 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,18 +8,124 @@ and this project adheres to [Semantic Versioning](http://semver.org/). Description of the upcoming release here. - ### Changed - [#1591](https://github.com/FuelLabs/fuel-core/pull/1591): Simplify libp2p dependencies and not depend on all sub modules directly. - [#1585](https://github.com/FuelLabs/fuel-core/pull/1585): Let `NetworkBehaviour` macro generate `FuelBehaviorEvent` in p2p +- [#1579](https://github.com/FuelLabs/fuel-core/pull/1579): The change extracts the off-chain-related logic from the executor and moves it to the GraphQL off-chain worker. It creates two new concepts - Off-chain and On-chain databases where the GraphQL worker has exclusive ownership of the database and may modify it without intersecting with the On-chain database. - [#1577](https://github.com/FuelLabs/fuel-core/pull/1577): Moved insertion of sealed blocks into the `BlockImporter` instead of the executor. +- [#1601](https://github.com/FuelLabs/fuel-core/pull/1601): Fix formatting in docs and check that `cargo doc` passes in the CI. #### Breaking - [#1596](https://github.com/FuelLabs/fuel-core/pull/1596) Make `Consensus` type a version-able enum - [#1593](https://github.com/FuelLabs/fuel-core/pull/1593) Make `Block` type a version-able enum +- [#1576](https://github.com/FuelLabs/fuel-core/pull/1576): The change moves the implementation of the storage traits for required tables from `fuel-core` to `fuel-core-storage` crate. The change also adds a more flexible configuration of the encoding/decoding per the table and allows the implementation of specific behaviors for the table in a much easier way. It unifies the encoding between database, SMTs, and iteration, preventing mismatching bytes representation on the Rust type system level. Plus, it increases the re-usage of the code by applying the same blueprint to other tables. + + It is a breaking PR because it changes database encoding/decoding for some tables. + + ### StructuredStorage + + The change adds a new type `StructuredStorage`. It is a wrapper around the key-value storage that implements the storage traits(`StorageInspect`, `StorageMutate`, `StorageRead`, etc) for the tables with blueprint. This blueprint works in tandem with the `TableWithBlueprint` trait. The table may implement `TableWithBlueprint` specifying the blueprint, as an example: + + ```rust + impl TableWithBlueprint for ContractsRawCode { + type Blueprint = Plain; + + fn column() -> Column { + Column::ContractsRawCode + } + } + ``` + + It is a definition of the blueprint for the `ContractsRawCode` table. It has a plain blueprint meaning it simply encodes/decodes bytes and stores/loads them into/from the storage. As a key codec and value codec, it uses a `Raw` encoding/decoding that simplifies writing bytes and loads them back into the memory without applying any serialization or deserialization algorithm. + + If the table implements `TableWithBlueprint` and the selected codec satisfies all blueprint requirements, the corresponding storage traits for that table are implemented on the `StructuredStorage` type. + + ### Codecs + + Each blueprint allows customizing the key and value codecs. It allows the use of different codecs for different tables, taking into account the complexity and weight of the data and providing a way of more optimal implementation. + + That property may be very useful to perform migration in a more easier way. Plus, it also can be a `no_std` migration potentially allowing its fraud proving. + + An example of migration: + + ```rust + /// Define the table for V1 value encoding/decoding. + impl TableWithBlueprint for ContractsRawCodeV1 { + type Blueprint = Plain; + + fn column() -> Column { + Column::ContractsRawCode + } + } + + /// Define the table for V2 value encoding/decoding. + /// It uses `Postcard` codec for the value instead of `Raw` codec. + /// + /// # Dev-note: The columns is the same. + impl TableWithBlueprint for ContractsRawCodeV2 { + type Blueprint = Plain; + + fn column() -> Column { + Column::ContractsRawCode + } + } + + fn migration(storage: &mut Database) { + let mut iter = storage.iter_all::(None); + while let Ok((key, value)) = iter.next() { + // Insert into the same table but with another codec. + storage.storage::().insert(key, value); + } + } + ``` + + ### Structures + + The blueprint of the table defines its behavior. As an example, a `Plain` blueprint simply encodes/decodes bytes and stores/loads them into/from the storage. The `SMT` blueprint builds a sparse merkle tree on top of the key-value pairs. + + Implementing a blueprint one time, we can apply it to any table satisfying the requirements of this blueprint. It increases the re-usage of the code and minimizes duplication. + + It can be useful if we decide to create global roots for all required tables that are used in fraud proving. + + ```rust + impl TableWithBlueprint for SpentMessages { + type Blueprint = Plain; + + fn column() -> Column { + Column::SpentMessages + } + } + | + | + \|/ + + impl TableWithBlueprint for SpentMessages { + type Blueprint = + Sparse; + + fn column() -> Column { + Column::SpentMessages + } + } + ``` + + ### Side changes + + #### `iter_all` + The `iter_all` functionality now accepts the table instead of `K` and `V` generics. It is done to use the correct codec during deserialization. Also, the table definition provides the column. + + #### Duplicated unit tests + + The `fuel-core-storage` crate provides macros that generate unit tests. Almost all tables had the same test like `get`, `insert`, `remove`, `exist`. All duplicated tests were moved to macros. The unique one still stays at the same place where it was before. + + #### `StorageBatchMutate` + + Added a new `StorageBatchMutate` trait that we can move to `fuel-storage` crate later. It allows batch operations on the storage. It may be more performant in some cases. + - [#1573](https://github.com/FuelLabs/fuel-core/pull/1573): Remove nested p2p request/response encoding. Only breaks p2p networking compatibility with older fuel-core versions, but is otherwise fully internal. + ## [Version 0.22.0] ### Added diff --git a/Cargo.lock b/Cargo.lock index 8c69a2ab47..c7b103f774 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -103,9 +103,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.5" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d664a92ecae85fd0a7392615844904654d1d5f5514837f471ddef4a057aba1b6" +checksum = "4cd2405b3ac1faab2990b74d728624cd9fd115651fcecc7c2d8daf01376275ba" dependencies = [ "anstyle", "anstyle-parse", @@ -229,9 +229,9 @@ checksum = "155a5a185e42c6b77ac7b88a15143d930a9e9727a5b7b77eed417404ab15c247" [[package]] name = "assert_cmd" -version = "2.0.12" +version = "2.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88903cb14723e4d4003335bb7f8a14f27691649105346a0f0957466c096adfe6" +checksum = "00ad3f3a942eee60335ab4342358c161ee296829e0d16ff42fc1d6cb07815467" dependencies = [ "anstyle", "bstr", @@ -278,7 +278,7 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17ae5ebefcc48e7452b4987947920dac9450be1110cadf34d1b8c116bdbaf97c" dependencies = [ - "async-lock 3.2.0", + "async-lock 3.3.0", "async-task", "concurrent-queue", "fastrand 2.0.1", @@ -307,7 +307,7 @@ dependencies = [ "async-channel 2.1.1", "async-executor", "async-io 2.2.2", - "async-lock 3.2.0", + "async-lock 3.3.0", "blocking", "futures-lite 2.2.0", "once_cell", @@ -412,14 +412,14 @@ version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6afaa937395a620e33dc6a742c593c01aced20aa376ffb0f628121198578ccc7" dependencies = [ - "async-lock 3.2.0", + "async-lock 3.3.0", "cfg-if", "concurrent-queue", "futures-io", "futures-lite 2.2.0", "parking", "polling 3.3.1", - "rustix 0.38.28", + "rustix 0.38.30", "slab", "tracing", "windows-sys 0.52.0", @@ -436,9 +436,9 @@ dependencies = [ [[package]] name = "async-lock" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7125e42787d53db9dd54261812ef17e937c95a51e4d291373b670342fa44310c" +checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" dependencies = [ "event-listener 4.0.3", "event-listener-strategy", @@ -469,7 +469,7 @@ dependencies = [ "cfg-if", "event-listener 3.1.0", "futures-lite 1.13.0", - "rustix 0.38.28", + "rustix 0.38.30", "windows-sys 0.48.0", ] @@ -485,7 +485,7 @@ dependencies = [ "cfg-if", "futures-core", "futures-io", - "rustix 0.38.28", + "rustix 0.38.30", "signal-hook-registry", "slab", "windows-sys 0.48.0", @@ -731,9 +731,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.5" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64ct" @@ -844,7 +844,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" dependencies = [ "async-channel 2.1.1", - "async-lock 3.2.0", + "async-lock 3.3.0", "async-task", "fastrand 2.0.1", "futures-io", @@ -1086,9 +1086,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.13" +version = "4.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52bdc885e4cacc7f7c9eedc1ef6da641603180c783c41a15c264944deeaab642" +checksum = "58e54881c004cec7895b0068a0a954cd5d62da01aef83fa35b1e594497bf5445" dependencies = [ "clap_builder", "clap_derive 4.4.7", @@ -1096,9 +1096,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.12" +version = "4.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb7fb5e4e979aec3be7791562fcba452f94ad85e954da024396433e0e25a79e9" +checksum = "59cb82d7f531603d2fd1f507441cdd35184fa81beff7bd489570de7f773460bb" dependencies = [ "anstream", "anstyle", @@ -1190,7 +1190,7 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5286a0843c21f8367f7be734f89df9b822e0321d8bcce8d6e735aadff7d74979" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "bech32", "bs58", "digest 0.10.7", @@ -1234,14 +1234,14 @@ dependencies = [ [[package]] name = "console" -version = "0.15.7" +version = "0.15.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8" +checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" dependencies = [ "encode_unicode", "lazy_static", "libc", - "windows-sys 0.45.0", + "windows-sys 0.52.0", ] [[package]] @@ -1393,7 +1393,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.4.13", + "clap 4.4.16", "criterion-plot", "futures", "is-terminal", @@ -1430,44 +1430,37 @@ checksum = "7059fff8937831a9ae6f0fe4d658ffabf58f2ca96aa9dec1c889f936f705f216" [[package]] name = "crossbeam-channel" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82a9b73a36529d9c47029b9fb3a6f0ea3cc916a261195352ba19e770fc1748b2" +checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b" dependencies = [ - "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fca89a0e215bab21874660c67903c5f143333cab1da83d041c7ded6053774751" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "cfg-if", "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" -version = "0.9.17" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e3681d554572a651dda4186cd47240627c3d0114d45a95f6ad27f2f22e7548d" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "autocfg", - "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-utils" -version = "0.8.18" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3a430a770ebd84726f584a90ee7f020d28db52c6d02138900f22341f866d39c" -dependencies = [ - "cfg-if", -] +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" [[package]] name = "crossterm" @@ -2037,7 +2030,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe81b5c06ecfdbc71dd845216f225f53b62a10cb8a16c946836a3467f701d05b" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "bytes", "hex", "k256", @@ -2185,9 +2178,9 @@ dependencies = [ [[package]] name = "ethers-addressbook" -version = "2.0.11" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c405f24ea3a517899ba7985385c43dc4a7eb1209af3b1e0a1a32d7dcc7f8d09" +checksum = "9bf35eb7d2e2092ad41f584951e08ec7c077b142dba29c4f1b8f52d2efddc49c" dependencies = [ "ethers-core", "once_cell", @@ -2216,9 +2209,9 @@ dependencies = [ [[package]] name = "ethers-contract-abigen" -version = "2.0.11" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51258120c6b47ea9d9bec0d90f9e8af71c977fbefbef8213c91bfed385fe45eb" +checksum = "bbdfb952aafd385b31d316ed80d7b76215ce09743c172966d840e96924427e0c" dependencies = [ "Inflector", "const-hex", @@ -2234,15 +2227,15 @@ dependencies = [ "serde", "serde_json", "syn 2.0.48", - "toml 0.8.2", + "toml 0.8.8", "walkdir", ] [[package]] name = "ethers-contract-derive" -version = "2.0.11" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936e7a0f1197cee2b62dc89f63eff3201dbf87c283ff7e18d86d38f83b845483" +checksum = "7465c814a2ecd0de0442160da13584205d1cdc08f4717a6511cad455bd5d7dc4" dependencies = [ "Inflector", "const-hex", @@ -2256,9 +2249,9 @@ dependencies = [ [[package]] name = "ethers-core" -version = "2.0.11" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f03e0bdc216eeb9e355b90cf610ef6c5bb8aca631f97b5ae9980ce34ea7878d" +checksum = "918b1a9ba585ea61022647def2f27c29ba19f6d2a4a4c8f68a9ae97fd5769737" dependencies = [ "arrayvec", "bytes", @@ -2286,9 +2279,9 @@ dependencies = [ [[package]] name = "ethers-etherscan" -version = "2.0.11" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abbac2c890bdbe0f1b8e549a53b00e2c4c1de86bb077c1094d1f38cdf9381a56" +checksum = "facabf8551b4d1a3c08cb935e7fca187804b6c2525cc0dafb8e5a6dd453a24de" dependencies = [ "chrono", "ethers-core", @@ -2335,7 +2328,7 @@ checksum = "25d6c0c9455d93d4990c06e049abf9b30daf148cf461ee939c11d88907c60816" dependencies = [ "async-trait", "auto_impl", - "base64 0.21.5", + "base64 0.21.7", "bytes", "const-hex", "enr", @@ -2386,9 +2379,9 @@ dependencies = [ [[package]] name = "ethers-solc" -version = "2.0.11" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a64f710586d147864cff66540a6d64518b9ff37d73ef827fee430538265b595f" +checksum = "cc2e46e3ec8ef0c986145901fa9864205dc4dcee701f9846be2d56112d34bdea" dependencies = [ "cfg-if", "const-hex", @@ -2617,7 +2610,7 @@ dependencies = [ "async-graphql", "async-trait", "axum", - "clap 4.4.13", + "clap 4.4.16", "derive_more", "enum-iterator", "fuel-core-chain-config", @@ -2641,14 +2634,12 @@ dependencies = [ "hyper", "itertools 0.10.5", "mockall", - "postcard", "proptest", "rand", "rocksdb", - "serde", "serde_json", - "strum 0.24.1", - "strum_macros 0.24.3", + "strum 0.25.0", + "strum_macros 0.25.3", "tempfile", "test-case", "test-strategy", @@ -2666,7 +2657,7 @@ version = "0.0.0" dependencies = [ "anyhow", "async-trait", - "clap 4.4.13", + "clap 4.4.16", "criterion", "ctrlc", "ed25519-dalek", @@ -2700,7 +2691,7 @@ name = "fuel-core-bin" version = "0.22.0" dependencies = [ "anyhow", - "clap 4.4.13", + "clap 4.4.16", "const_format", "dirs 4.0.0", "dotenvy", @@ -2767,7 +2758,7 @@ dependencies = [ name = "fuel-core-client-bin" version = "0.22.0" dependencies = [ - "clap 4.4.13", + "clap 4.4.16", "fuel-core-client", "fuel-core-types", "serde_json", @@ -2850,6 +2841,7 @@ dependencies = [ "mockall", "test-case", "tokio", + "tokio-rayon", "tracing", ] @@ -2858,7 +2850,7 @@ name = "fuel-core-keygen" version = "0.22.0" dependencies = [ "anyhow", - "clap 4.4.13", + "clap 4.4.16", "fuel-core-types", "libp2p-identity", "serde", @@ -2870,7 +2862,7 @@ version = "0.22.0" dependencies = [ "anyhow", "atty", - "clap 4.4.13", + "clap 4.4.16", "crossterm", "fuel-core-keygen", "serde_json", @@ -2980,6 +2972,7 @@ dependencies = [ "mockall", "once_cell", "parking_lot", + "rand", "serde", "serde_json", "test-case", @@ -3009,10 +3002,20 @@ version = "0.22.0" dependencies = [ "anyhow", "derive_more", + "enum-iterator", + "fuel-core-storage", "fuel-core-types", "fuel-vm", + "impl-tools", + "itertools 0.10.5", "mockall", + "paste", + "postcard", "primitive-types", + "rand", + "serde", + "strum 0.25.0", + "strum_macros 0.25.3", ] [[package]] @@ -3109,8 +3112,10 @@ version = "0.22.0" dependencies = [ "anyhow", "bs58", + "derivative", "derive_more", "fuel-vm", + "rand", "secrecy", "serde", "tai64", @@ -3436,9 +3441,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" +checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" dependencies = [ "cfg-if", "libc", @@ -3502,9 +3507,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.22" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d6250322ef6e60f93f9a2162799302cd6f68f79f6e5d85c8c16f14d1d958178" +checksum = "b553656127a00601c8ae5590fcfdc118e4083a7924b6cf4ffc1ea4b99dc429d7" dependencies = [ "bytes", "fnv", @@ -3929,9 +3934,9 @@ dependencies = [ [[package]] name = "igd-next" -version = "0.14.2" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57e065e90a518ab5fedf79aa1e4b784e10f8e484a834f6bda85c42633a2cb7af" +checksum = "064d90fec10d541084e7b39ead8875a5a80d9114a2b18791565253bae25f49e4" dependencies = [ "async-trait", "attohttpc", @@ -3973,6 +3978,30 @@ dependencies = [ "serde", ] +[[package]] +name = "impl-tools" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d82c305b1081f1a99fda262883c788e50ab57d36c00830bdd7e0a82894ad965c" +dependencies = [ + "autocfg", + "impl-tools-lib", + "proc-macro-error", + "syn 2.0.48", +] + +[[package]] +name = "impl-tools-lib" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85d3946d886eaab0702fa0c6585adcced581513223fa9df7ccfabbd9fa331a88" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.48", +] + [[package]] name = "impl-trait-for-tuples" version = "0.2.2" @@ -4084,7 +4113,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bad00257d07be169d870ab665980b06cdb366d792ad690bf2e76876dc503455" dependencies = [ "hermit-abi 0.3.3", - "rustix 0.38.28", + "rustix 0.38.30", "windows-sys 0.52.0", ] @@ -4123,9 +4152,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.66" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" +checksum = "9a1d36f1235bc969acba30b7f5990b864423a6068a10f7c90ae8f0112e3a59d1" dependencies = [ "wasm-bindgen", ] @@ -4142,7 +4171,7 @@ version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "pem 1.1.1", "ring 0.16.20", "serde", @@ -4152,9 +4181,9 @@ dependencies = [ [[package]] name = "k256" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f01b677d82ef7a676aa37e099defd83a28e15687112cafdd112d60236b6115b" +checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" dependencies = [ "cfg-if", "ecdsa", @@ -4166,9 +4195,9 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" dependencies = [ "cpufeatures", ] @@ -4224,9 +4253,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.151" +version = "0.2.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" +checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" [[package]] name = "libflate" @@ -4375,7 +4404,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d665144a616dadebdc5fff186b1233488cdcd8bfb1223218ff084b6d052c94f7" dependencies = [ "asynchronous-codec 0.7.0", - "base64 0.21.5", + "base64 0.21.7", "byteorder", "bytes", "either", @@ -4850,16 +4879,16 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d8de370f98a6cb8a4606618e53e802f93b094ddec0f96988eaec2c27e6e9ce7" dependencies = [ - "clap 4.4.13", + "clap 4.4.16", "termcolor", "threadpool", ] [[package]] name = "libz-sys" -version = "1.1.12" +version = "1.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" +checksum = "295c17e837573c8c821dbaeb3cceb3d745ad082f7572191409e69cbc1b3fd050" dependencies = [ "cc", "pkg-config", @@ -5306,20 +5335,20 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683751d591e6d81200c39fb0d1032608b77724f34114db54f571ff1317b337c0" +checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845" dependencies = [ "num_enum_derive", ] [[package]] name = "num_enum_derive" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c11e44798ad209ccdd91fc192f0526a369a01234f7373e1b141c96d7cee4f0e" +checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" dependencies = [ - "proc-macro-crate 2.0.1", + "proc-macro-crate 3.0.0", "proc-macro2", "quote", "syn 2.0.48", @@ -5448,7 +5477,7 @@ version = "3.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" dependencies = [ - "proc-macro-crate 2.0.1", + "proc-macro-crate 2.0.0", "proc-macro2", "quote", "syn 1.0.109", @@ -5549,7 +5578,7 @@ version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b8fcc794035347fb64beda2d3b462595dd2753e3f268d89c5aae77e8cf2c310" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "serde", ] @@ -5759,7 +5788,7 @@ dependencies = [ "cfg-if", "concurrent-queue", "pin-project-lite", - "rustix 0.38.28", + "rustix 0.38.30", "tracing", "windows-sys 0.52.0", ] @@ -5924,12 +5953,20 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "2.0.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97dc5fea232fc28d2f597b37c4876b348a40e33f3b02cc975c8d006d78d94b1a" +checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" dependencies = [ - "toml_datetime", - "toml_edit 0.20.2", + "toml_edit 0.20.7", +] + +[[package]] +name = "proc-macro-crate" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b2685dd208a3771337d8d386a89840f0f43cd68be8dae90a5f8c2384effc9cd" +dependencies = [ + "toml_edit 0.21.0", ] [[package]] @@ -6358,7 +6395,7 @@ version = "0.11.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "bytes", "cookie", "cookie_store", @@ -6583,9 +6620,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.28" +version = "0.38.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" +checksum = "322394588aaf33c24007e8bb3238ee3e4c5c09c084ab32bc73890b99ff326bca" dependencies = [ "bitflags 2.4.1", "errno", @@ -6649,7 +6686,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", ] [[package]] @@ -7133,9 +7170,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.2" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" +checksum = "2593d31f82ead8df961d8bd23a64c2ccf2eb5dd34b0a34bfb4dd54011c72009e" [[package]] name = "smol" @@ -7489,7 +7526,7 @@ dependencies = [ "cfg-if", "fastrand 2.0.1", "redox_syscall", - "rustix 0.38.28", + "rustix 0.38.30", "windows-sys 0.52.0", ] @@ -7506,9 +7543,9 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff1bc3d3f05aff0403e8ac0d92ced918ec05b666a43f83297ccef5bea8a3d449" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ "winapi-util", ] @@ -7833,21 +7870,21 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.2" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "185d8ab0dfbb35cf1399a6344d8484209c088f75f8f68230da55d48d95d43e3d" +checksum = "a1a195ec8c9da26928f773888e0742ca3ca1040c6cd859c919c9f59c1954ab35" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.20.2", + "toml_edit 0.21.0", ] [[package]] name = "toml_datetime" -version = "0.6.3" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" dependencies = [ "serde", ] @@ -7865,9 +7902,20 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.20.2" +version = "0.20.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" +dependencies = [ + "indexmap 2.1.0", + "toml_datetime", + "winnow", +] + +[[package]] +name = "toml_edit" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" +checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" dependencies = [ "indexmap 2.1.0", "serde", @@ -8210,9 +8258,9 @@ checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "value-bag" -version = "1.4.3" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62ce5bb364b23e66b528d03168df78b38c0f7b6fe17386928f29d5ab2e7cb2f7" +checksum = "7cdbaf5e132e593e9fc1de6a15bbec912395b11fb9719e061cf64f804524c503" [[package]] name = "vcpkg" @@ -8274,9 +8322,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.89" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" +checksum = "b1223296a201415c7fad14792dbefaace9bd52b62d33453ade1c5b5f07555406" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -8284,9 +8332,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.89" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" +checksum = "fcdc935b63408d58a32f8cc9738a0bffd8f05cc7c002086c6ef20b7312ad9dcd" dependencies = [ "bumpalo", "log", @@ -8299,9 +8347,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.39" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac36a15a220124ac510204aec1c3e5db8a22ab06fd6706d881dc6149f8ed9a12" +checksum = "bde2032aeb86bdfaecc8b261eef3cba735cc426c1f3a3416d1e0791be95fc461" dependencies = [ "cfg-if", "js-sys", @@ -8311,9 +8359,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.89" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" +checksum = "3e4c238561b2d428924c49815533a8b9121c664599558a5d9ec51f8a1740a999" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -8321,9 +8369,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.89" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" +checksum = "bae1abb6806dc1ad9e560ed242107c0f6c84335f1749dd4e8ddb012ebd5e25a7" dependencies = [ "proc-macro2", "quote", @@ -8334,15 +8382,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.89" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" +checksum = "4d91413b1c31d7539ba5ef2451af3f0b833a005eb27a631cec32bc0635a8602b" [[package]] name = "web-sys" -version = "0.3.66" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f" +checksum = "58cd2333b6e0be7a39605f0e255892fd7418a682d8da8fe042fe25128794d2ed" dependencies = [ "js-sys", "wasm-bindgen", @@ -8420,15 +8468,6 @@ dependencies = [ "windows-targets 0.48.5", ] -[[package]] -name = "windows-sys" -version = "0.45.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" -dependencies = [ - "windows-targets 0.42.2", -] - [[package]] name = "windows-sys" version = "0.48.0" @@ -8447,21 +8486,6 @@ dependencies = [ "windows-targets 0.52.0", ] -[[package]] -name = "windows-targets" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", -] - [[package]] name = "windows-targets" version = "0.48.5" @@ -8492,12 +8516,6 @@ dependencies = [ "windows_x86_64_msvc 0.52.0", ] -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" - [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" @@ -8510,12 +8528,6 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" -[[package]] -name = "windows_aarch64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" - [[package]] name = "windows_aarch64_msvc" version = "0.48.5" @@ -8528,12 +8540,6 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" -[[package]] -name = "windows_i686_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" - [[package]] name = "windows_i686_gnu" version = "0.48.5" @@ -8546,12 +8552,6 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" -[[package]] -name = "windows_i686_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" - [[package]] name = "windows_i686_msvc" version = "0.48.5" @@ -8564,12 +8564,6 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" -[[package]] -name = "windows_x86_64_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" - [[package]] name = "windows_x86_64_gnu" version = "0.48.5" @@ -8582,12 +8576,6 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" - [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" @@ -8600,12 +8588,6 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" -[[package]] -name = "windows_x86_64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" - [[package]] name = "windows_x86_64_msvc" version = "0.48.5" @@ -8620,9 +8602,9 @@ checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" [[package]] name = "winnow" -version = "0.5.33" +version = "0.5.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7520bbdec7211caa7c4e682eb1fbe07abe20cee6756b6e00f537c82c11816aa" +checksum = "b7cf47b659b318dccbd69cc4797a39ae128f533dce7902a1096044d1967b9c16" dependencies = [ "memchr", ] @@ -8713,7 +8695,7 @@ dependencies = [ name = "xtask" version = "0.0.0" dependencies = [ - "clap 4.4.13", + "clap 4.4.16", "fuel-core", ] diff --git a/Cargo.toml b/Cargo.toml index 94aafb99c4..1b5d4df908 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -85,6 +85,7 @@ async-trait = "0.1" cynic = { version = "2.2.1", features = ["http-reqwest"] } clap = "4.1" derive_more = { version = "0.99" } +enum-iterator = "1.2" hyper = { version = "0.14.26" } primitive-types = { version = "0.12", default-features = false } rand = "0.8" @@ -100,6 +101,8 @@ tracing-attributes = "0.1" tracing-subscriber = "0.3" serde = "1.0" serde_json = "1.0" +strum = "0.25" +strum_macros = "0.25" # enable cookie store to support L7 sticky sessions reqwest = { version = "0.11.16", default-features = false, features = ["rustls-tls", "cookies"] } mockall = "0.11" diff --git a/bin/e2e-test-client/src/config.rs b/bin/e2e-test-client/src/config.rs index 456bd34b9b..9d53704335 100644 --- a/bin/e2e-test-client/src/config.rs +++ b/bin/e2e-test-client/src/config.rs @@ -17,7 +17,7 @@ pub struct SuiteConfig { /// The primary endpoint to connect to pub endpoint: String, /// Max timeout for syncing between wallets - /// Default is [`SYNC_TIMEOUT`](crate::SYNC_TIMEOUT) + /// Default is [`SYNC_TIMEOUT`] #[serde(with = "humantime_serde")] pub wallet_sync_timeout: Duration, /// Enable slower but more stressful tests. Should be used in full E2E tests but not in CI. diff --git a/ci_checks.sh b/ci_checks.sh index d1ffaa75e0..b78fae2781 100755 --- a/ci_checks.sh +++ b/ci_checks.sh @@ -11,6 +11,7 @@ cargo +nightly fmt --all -- --check && cargo sort -w --check && source .github/workflows/scripts/verify_openssl.sh && cargo clippy --all-targets --all-features && +cargo doc --all-features --workspace && cargo make check --locked && cargo make check --all-features --locked && cargo check -p fuel-core-types --target wasm32-unknown-unknown --no-default-features && diff --git a/crates/chain-config/Cargo.toml b/crates/chain-config/Cargo.toml index 4fc9d777c1..d5b89a84be 100644 --- a/crates/chain-config/Cargo.toml +++ b/crates/chain-config/Cargo.toml @@ -17,7 +17,7 @@ fuel-core-storage = { workspace = true } fuel-core-types = { workspace = true, default-features = false, features = ["serde"] } hex = { version = "0.4", features = ["serde"] } itertools = { workspace = true } -postcard = { version = "1.0", features = ["alloc"] } +postcard = { workspace = true, features = ["alloc"] } rand = { workspace = true, optional = true } serde = { workspace = true, features = ["derive", "rc"] } serde_json = { version = "1.0", features = ["raw_value"], optional = true } diff --git a/crates/fuel-core/Cargo.toml b/crates/fuel-core/Cargo.toml index db8c590257..7a54e142f0 100644 --- a/crates/fuel-core/Cargo.toml +++ b/crates/fuel-core/Cargo.toml @@ -19,7 +19,7 @@ async-trait = { workspace = true } axum = { workspace = true } clap = { workspace = true, features = ["derive"] } derive_more = { version = "0.99" } -enum-iterator = "1.2" +enum-iterator = { workspace = true } fuel-core-chain-config = { workspace = true } fuel-core-consensus-module = { workspace = true } fuel-core-database = { workspace = true } @@ -41,16 +41,14 @@ futures = { workspace = true } hex = { version = "0.4", features = ["serde"] } hyper = { workspace = true } itertools = { workspace = true } -postcard = { workspace = true, features = ["use-std"] } rand = { workspace = true } rocksdb = { version = "0.21", default-features = false, features = [ "lz4", "multi-threaded-cf", ], optional = true } -serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true, features = ["raw_value"] } -strum = "0.24" -strum_macros = "0.24" +strum = { workspace = true } +strum_macros = { workspace = true } tempfile = { workspace = true, optional = true } thiserror = "1.0" tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } diff --git a/crates/fuel-core/src/coins_query.rs b/crates/fuel-core/src/coins_query.rs index 254e5f1b7f..9c41fd0605 100644 --- a/crates/fuel-core/src/coins_query.rs +++ b/crates/fuel-core/src/coins_query.rs @@ -1,5 +1,5 @@ use crate::{ - fuel_core_graphql_api::service::Database, + fuel_core_graphql_api::database::ReadView, query::asset_query::{ AssetQuery, AssetSpendTarget, @@ -89,13 +89,13 @@ impl SpendQuery { }) } - /// Return [`Asset`]s. + /// Return `Asset`s. pub fn assets(&self) -> &Vec { &self.query_per_asset } /// Return [`AssetQuery`]s. - pub fn asset_queries<'a>(&'a self, db: &'a Database) -> Vec> { + pub fn asset_queries<'a>(&'a self, db: &'a ReadView) -> Vec> { self.query_per_asset .iter() .map(|asset| { @@ -159,7 +159,7 @@ pub fn largest_first(query: &AssetQuery) -> Result, CoinsQueryErro // An implementation of the method described on: https://iohk.io/en/blog/posts/2018/07/03/self-organisation-in-coin-selection/ pub fn random_improve( - db: &Database, + db: &ReadView, spend_query: &SpendQuery, ) -> Result>, CoinsQueryError> { let mut coins_per_asset = vec![]; @@ -229,7 +229,7 @@ mod tests { SpendQuery, }, database::Database, - fuel_core_graphql_api::service::Database as ServiceDatabase, + fuel_core_graphql_api::api_service::ReadDatabase as ServiceDatabase, query::asset_query::{ AssetQuery, AssetSpendTarget, @@ -323,15 +323,19 @@ mod tests { let result: Vec<_> = spend_query .iter() .map(|asset| { - largest_first(&AssetQuery::new(owner, asset, base_asset_id, None, db)) - .map(|coins| { - coins - .iter() - .map(|coin| { - (*coin.asset_id(base_asset_id), coin.amount()) - }) - .collect() - }) + largest_first(&AssetQuery::new( + owner, + asset, + base_asset_id, + None, + &db.view(), + )) + .map(|coins| { + coins + .iter() + .map(|coin| (*coin.asset_id(base_asset_id), coin.amount())) + .collect() + }) }) .try_collect()?; Ok(result) @@ -484,7 +488,7 @@ mod tests { db: &ServiceDatabase, ) -> Result, CoinsQueryError> { let coins = random_improve( - db, + &db.view(), &SpendQuery::new(owner, &query_per_asset, None, base_asset_id)?, ); @@ -682,7 +686,7 @@ mod tests { Some(excluded_ids), base_asset_id, )?; - let coins = random_improve(&db.service_database(), &spend_query); + let coins = random_improve(&db.service_database().view(), &spend_query); // Transform result for convenience coins.map(|coins| { @@ -840,7 +844,7 @@ mod tests { } let coins = random_improve( - &db.service_database(), + &db.service_database().view(), &SpendQuery::new( owner, &[AssetSpendTarget { @@ -930,7 +934,8 @@ mod tests { } fn service_database(&self) -> ServiceDatabase { - Box::new(self.database.clone()) + let database = self.database.clone(); + ServiceDatabase::new(database.clone(), database) } } @@ -980,18 +985,22 @@ mod tests { pub fn owned_coins(&self, owner: &Address) -> Vec { use crate::query::CoinQueryData; - let db = self.service_database(); - db.owned_coins_ids(owner, None, IterDirection::Forward) - .map(|res| res.map(|id| db.coin(id).unwrap())) + let query = self.service_database(); + let query = query.view(); + query + .owned_coins_ids(owner, None, IterDirection::Forward) + .map(|res| res.map(|id| query.coin(id).unwrap())) .try_collect() .unwrap() } pub fn owned_messages(&self, owner: &Address) -> Vec { use crate::query::MessageQueryData; - let db = self.service_database(); - db.owned_message_ids(owner, None, IterDirection::Forward) - .map(|res| res.map(|id| db.message(&id).unwrap())) + let query = self.service_database(); + let query = query.view(); + query + .owned_message_ids(owner, None, IterDirection::Forward) + .map(|res| res.map(|id| query.message(&id).unwrap())) .try_collect() .unwrap() } diff --git a/crates/fuel-core/src/database.rs b/crates/fuel-core/src/database.rs index 29ace79dcd..8d4538b2d3 100644 --- a/crates/fuel-core/src/database.rs +++ b/crates/fuel-core/src/database.rs @@ -12,17 +12,25 @@ use fuel_core_chain_config::{ MessageConfig, }; use fuel_core_storage::{ + blueprint::Blueprint, + codec::Decode, iter::IterDirection, kv_store::{ - StorageColumn, + BatchOperations, + KeyValueStore, Value, WriteOperation, }, + structured_storage::{ + StructuredStorage, + TableWithBlueprint, + }, transactional::{ StorageTransaction, Transactional, }, Error as StorageError, + Mappable, Result as StorageResult, }; use fuel_core_types::{ @@ -34,11 +42,6 @@ use fuel_core_types::{ }, tai64::Tai64, }; -use itertools::Itertools; -use serde::{ - de::DeserializeOwned, - Serialize, -}; use std::{ fmt::{ self, @@ -46,10 +49,8 @@ use std::{ Formatter, }, marker::Send, - ops::Deref, sync::Arc, }; -use strum::EnumCount; pub use fuel_core_database::Error; pub type Result = core::result::Result; @@ -65,14 +66,9 @@ use std::path::Path; use tempfile::TempDir; // Storages implementation -// TODO: Move to separate `database/storage` folder, because it is only implementation of storages traits. mod block; -mod code_root; mod contracts; mod message; -mod receipts; -#[cfg(feature = "relayer")] -mod relayer; mod sealed_block; mod state; @@ -84,99 +80,11 @@ pub mod storage; pub mod transaction; pub mod transactions; -/// Database tables column ids to the corresponding [`fuel_core_storage::Mappable`] table. -#[repr(u32)] -#[derive( - Copy, - Clone, - Debug, - strum_macros::EnumCount, - strum_macros::IntoStaticStr, - PartialEq, - Eq, - enum_iterator::Sequence, -)] -pub enum Column { - /// The column id of metadata about the blockchain - Metadata = 0, - /// See [`ContractsRawCode`](fuel_core_storage::tables::ContractsRawCode) - ContractsRawCode = 1, - /// See [`ContractsInfo`](fuel_core_storage::tables::ContractsInfo) - ContractsInfo = 2, - /// See [`ContractsState`](fuel_core_storage::tables::ContractsState) - ContractsState = 3, - /// See [`ContractsLatestUtxo`](fuel_core_storage::tables::ContractsLatestUtxo) - ContractsLatestUtxo = 4, - /// See [`ContractsAssets`](fuel_core_storage::tables::ContractsAssets) - ContractsAssets = 5, - /// See [`Coins`](fuel_core_storage::tables::Coins) - Coins = 6, - /// The column of the table that stores `true` if `owner` owns `Coin` with `coin_id` - OwnedCoins = 7, - /// See [`Transactions`](fuel_core_storage::tables::Transactions) - Transactions = 8, - /// Transaction id to current status - TransactionStatus = 9, - /// The column of the table of all `owner`'s transactions - TransactionsByOwnerBlockIdx = 10, - /// See [`Receipts`](fuel_core_storage::tables::Receipts) - Receipts = 11, - /// See [`FuelBlocks`](fuel_core_storage::tables::FuelBlocks) - FuelBlocks = 12, - /// See [`FuelBlockSecondaryKeyBlockHeights`](storage::FuelBlockSecondaryKeyBlockHeights) - FuelBlockSecondaryKeyBlockHeights = 13, - /// See [`Messages`](fuel_core_storage::tables::Messages) - Messages = 14, - /// The column of the table that stores `true` if `owner` owns `Message` with `message_id` - OwnedMessageIds = 15, - /// See [`SealedBlockConsensus`](fuel_core_storage::tables::SealedBlockConsensus) - FuelBlockConsensus = 16, - /// See [`FuelBlockMerkleData`](storage::FuelBlockMerkleData) - FuelBlockMerkleData = 17, - /// See [`FuelBlockMerkleMetadata`](storage::FuelBlockMerkleMetadata) - FuelBlockMerkleMetadata = 18, - /// Messages that have been spent. - /// Existence of a key in this column means that the message has been spent. - /// See [`SpentMessages`](fuel_core_storage::tables::SpentMessages) - SpentMessages = 19, - /// Metadata for the relayer - /// See [`RelayerMetadata`](fuel_core_relayer::ports::RelayerMetadata) - RelayerMetadata = 20, - /// See [`ContractsAssetsMerkleData`](storage::ContractsAssetsMerkleData) - ContractsAssetsMerkleData = 21, - /// See [`ContractsAssetsMerkleMetadata`](storage::ContractsAssetsMerkleMetadata) - ContractsAssetsMerkleMetadata = 22, - /// See [`ContractsStateMerkleData`](storage::ContractsStateMerkleData) - ContractsStateMerkleData = 23, - /// See [`ContractsStateMerkleMetadata`](storage::ContractsStateMerkleMetadata) - ContractsStateMerkleMetadata = 24, - /// See [`ProcessedTransactions`](storage::ProcessedTransactions) - ProcessedTransactions = 25, -} - -impl Column { - /// The total count of variants in the enum. - pub const COUNT: usize = ::COUNT; - - /// Returns the `usize` representation of the `Column`. - pub fn as_usize(&self) -> usize { - *self as usize - } -} - -impl StorageColumn for Column { - fn name(&self) -> &'static str { - self.into() - } - - fn id(&self) -> u32 { - *self as u32 - } -} +pub type Column = fuel_core_storage::column::Column; #[derive(Clone, Debug)] pub struct Database { - data: DataSource, + data: StructuredStorage, // used for RAII _drop: Arc, } @@ -211,9 +119,12 @@ impl Drop for DropResources { } impl Database { - pub fn new(data_source: DataSource) -> Self { + pub fn new(data_source: D) -> Self + where + D: Into, + { Self { - data: data_source, + data: StructuredStorage::new(data_source.into()), _drop: Default::default(), } } @@ -229,14 +140,14 @@ impl Database { let db = RocksDb::default_open(path, capacity.into()).map_err(Into::::into).context("Failed to open rocksdb, you may need to wipe a pre-existing incompatible db `rm -rf ~/.fuel/db`")?; Ok(Database { - data: Arc::new(db), + data: StructuredStorage::new(Arc::new(db).into()), _drop: Default::default(), }) } pub fn in_memory() -> Self { Self { - data: Arc::new(MemoryStore::default()), + data: StructuredStorage::new(Arc::new(MemoryStore::default()).into()), _drop: Default::default(), } } @@ -246,7 +157,7 @@ impl Database { let tmp_dir = TempDir::new().unwrap(); let db = RocksDb::default_open(tmp_dir.path(), None).unwrap(); Self { - data: Arc::new(db), + data: StructuredStorage::new(Arc::new(db).into()), _drop: Arc::new( { move || { @@ -264,189 +175,152 @@ impl Database { } pub fn checkpoint(&self) -> DatabaseResult { - self.data.checkpoint() + self.data.as_ref().checkpoint() } pub fn flush(self) -> DatabaseResult<()> { - self.data.flush() + self.data.as_ref().flush() } } -/// Mutable methods. -// TODO: Add `&mut self` to them. -impl Database { - fn insert, V: Serialize + ?Sized, R: DeserializeOwned>( - &self, - key: K, - column: Column, - value: &V, - ) -> StorageResult> { - let result = self.data.replace( - key.as_ref(), - column, - Arc::new(postcard::to_stdvec(value).map_err(|_| StorageError::Codec)?), - )?; - if let Some(previous) = result { - Ok(Some( - postcard::from_bytes(&previous).map_err(|_| StorageError::Codec)?, - )) - } else { - Ok(None) - } +impl KeyValueStore for DataSource { + type Column = Column; + + fn put(&self, key: &[u8], column: Self::Column, value: Value) -> StorageResult<()> { + self.as_ref().put(key, column, value) } - fn insert_raw, V: AsRef<[u8]>>( + fn replace( &self, - key: K, - column: Column, - value: V, + key: &[u8], + column: Self::Column, + value: Value, ) -> StorageResult> { - self.data - .replace(key.as_ref(), column, Arc::new(value.as_ref().to_vec())) + self.as_ref().replace(key, column, value) } - fn batch_insert, V: Serialize, S>( + fn write( &self, - column: Column, - set: S, - ) -> StorageResult<()> - where - S: Iterator, - { - let set: Vec<_> = set - .map(|(key, value)| { - let value = - postcard::to_stdvec(&value).map_err(|_| StorageError::Codec)?; - - let tuple = ( - key.as_ref().to_vec(), - column, - WriteOperation::Insert(Arc::new(value)), - ); - - Ok::<_, StorageError>(tuple) - }) - .try_collect()?; + key: &[u8], + column: Self::Column, + buf: &[u8], + ) -> StorageResult { + self.as_ref().write(key, column, buf) + } - self.data.batch_write(&mut set.into_iter()) + fn take(&self, key: &[u8], column: Self::Column) -> StorageResult> { + self.as_ref().take(key, column) } - fn take( - &self, - key: &[u8], - column: Column, - ) -> StorageResult> { - self.data - .take(key, column)? - .map(|val| postcard::from_bytes(&val).map_err(|_| StorageError::Codec)) - .transpose() + fn delete(&self, key: &[u8], column: Self::Column) -> StorageResult<()> { + self.as_ref().delete(key, column) } - fn take_raw(&self, key: &[u8], column: Column) -> StorageResult> { - self.data.take(key, column) + fn exists(&self, key: &[u8], column: Self::Column) -> StorageResult { + self.as_ref().exists(key, column) } -} -/// Read-only methods. -impl Database { - fn contains_key(&self, key: &[u8], column: Column) -> StorageResult { - self.data.exists(key, column) + fn size_of_value( + &self, + key: &[u8], + column: Self::Column, + ) -> StorageResult> { + self.as_ref().size_of_value(key, column) } - fn size_of_value(&self, key: &[u8], column: Column) -> StorageResult> { - self.data.size_of_value(key, column) + fn get(&self, key: &[u8], column: Self::Column) -> StorageResult> { + self.as_ref().get(key, column) } fn read( &self, key: &[u8], - column: Column, + column: Self::Column, buf: &mut [u8], ) -> StorageResult> { - self.data.read(key, column, buf) - } - - fn read_alloc(&self, key: &[u8], column: Column) -> StorageResult>> { - self.data - .get(key, column) - .map(|value| value.map(|value| value.deref().clone())) + self.as_ref().read(key, column, buf) } +} - fn get( +impl BatchOperations for DataSource { + fn batch_write( &self, - key: &[u8], - column: Column, - ) -> StorageResult> { - self.data - .get(key, column)? - .map(|val| postcard::from_bytes(&val).map_err(|_| StorageError::Codec)) - .transpose() + entries: &mut dyn Iterator, Self::Column, WriteOperation)>, + ) -> StorageResult<()> { + self.as_ref().batch_write(entries) } +} - fn iter_all( +/// Read-only methods. +impl Database { + fn iter_all( &self, - column: Column, direction: Option, - ) -> impl Iterator> + '_ + ) -> impl Iterator> + '_ where - K: From>, - V: DeserializeOwned, + M: Mappable + TableWithBlueprint, + M::Blueprint: Blueprint, { - self.iter_all_filtered::, Vec>(column, None, None, direction) + self.iter_all_filtered::, Vec>(None, None, direction) } - fn iter_all_by_prefix( + fn iter_all_by_prefix( &self, - column: Column, prefix: Option

, - ) -> impl Iterator> + '_ + ) -> impl Iterator> + '_ where - K: From>, - V: DeserializeOwned, + M: Mappable + TableWithBlueprint, + M::Blueprint: Blueprint, P: AsRef<[u8]>, { - self.iter_all_filtered::(column, prefix, None, None) + self.iter_all_filtered::(prefix, None, None) } - fn iter_all_by_start( + fn iter_all_by_start( &self, - column: Column, start: Option, direction: Option, - ) -> impl Iterator> + '_ + ) -> impl Iterator> + '_ where - K: From>, - V: DeserializeOwned, + M: Mappable + TableWithBlueprint, + M::Blueprint: Blueprint, S: AsRef<[u8]>, { - self.iter_all_filtered::(column, None, start, direction) + self.iter_all_filtered::(None, start, direction) } - fn iter_all_filtered( + fn iter_all_filtered( &self, - column: Column, prefix: Option

, start: Option, direction: Option, - ) -> impl Iterator> + '_ + ) -> impl Iterator> + '_ where - K: From>, - V: DeserializeOwned, + M: Mappable + TableWithBlueprint, + M::Blueprint: Blueprint, P: AsRef<[u8]>, S: AsRef<[u8]>, { self.data + .as_ref() .iter_all( - column, + M::column(), prefix.as_ref().map(|p| p.as_ref()), start.as_ref().map(|s| s.as_ref()), direction.unwrap_or_default(), ) .map(|val| { val.and_then(|(key, value)| { - let key = K::from(key); - let value: V = - postcard::from_bytes(&value).map_err(|_| StorageError::Codec)?; + let key = + >::KeyCodec::decode( + key.as_slice(), + ) + .map_err(|e| StorageError::Codec(anyhow::anyhow!(e)))?; + let value = + >::ValueCodec::decode( + value.as_slice(), + ) + .map_err(|e| StorageError::Codec(anyhow::anyhow!(e)))?; Ok((key, value)) }) }) diff --git a/crates/fuel-core/src/database/balances.rs b/crates/fuel-core/src/database/balances.rs index 0c92179adf..84eb0c7f7e 100644 --- a/crates/fuel-core/src/database/balances.rs +++ b/crates/fuel-core/src/database/balances.rs @@ -1,155 +1,18 @@ -use crate::database::{ - storage::{ - ContractsAssetsMerkleData, - ContractsAssetsMerkleMetadata, - DatabaseColumn, - SparseMerkleMetadata, - }, - Column, - Database, -}; +use crate::database::Database; use fuel_core_storage::{ tables::ContractsAssets, ContractsAssetKey, Error as StorageError, - Mappable, - MerkleRoot, - MerkleRootStorage, - StorageAsMut, - StorageAsRef, - StorageInspect, - StorageMutate, + StorageBatchMutate, }; use fuel_core_types::{ fuel_asm::Word, - fuel_merkle::{ - sparse, - sparse::{ - in_memory, - MerkleTree, - MerkleTreeKey, - }, - }, fuel_types::{ AssetId, ContractId, }, }; use itertools::Itertools; -use std::borrow::{ - BorrowMut, - Cow, -}; - -impl StorageInspect for Database { - type Error = StorageError; - - fn get( - &self, - key: &::Key, - ) -> Result::OwnedValue>>, Self::Error> { - self.get(key.as_ref(), Column::ContractsAssets) - .map_err(Into::into) - } - - fn contains_key( - &self, - key: &::Key, - ) -> Result { - self.contains_key(key.as_ref(), Column::ContractsAssets) - .map_err(Into::into) - } -} - -impl StorageMutate for Database { - fn insert( - &mut self, - key: &::Key, - value: &::Value, - ) -> Result::OwnedValue>, Self::Error> { - let prev = Database::insert(self, key.as_ref(), Column::ContractsAssets, value) - .map_err(Into::into); - - // Get latest metadata entry for this contract id - let prev_metadata = self - .storage::() - .get(key.contract_id())? - .unwrap_or_default(); - - let root = prev_metadata.root; - let storage = self.borrow_mut(); - let mut tree: MerkleTree = - MerkleTree::load(storage, &root) - .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; - - // Update the contact's key-value dataset. The key is the asset id and the - // value the Word - tree.update(MerkleTreeKey::new(key), value.to_be_bytes().as_slice()) - .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; - - // Generate new metadata for the updated tree - let root = tree.root(); - let metadata = SparseMerkleMetadata { root }; - self.storage::() - .insert(key.contract_id(), &metadata)?; - - prev - } - - fn remove( - &mut self, - key: &::Key, - ) -> Result::OwnedValue>, Self::Error> { - let prev = Database::take(self, key.as_ref(), Column::ContractsAssets) - .map_err(Into::into); - - // Get latest metadata entry for this contract id - let prev_metadata = self - .storage::() - .get(key.contract_id())?; - - if let Some(prev_metadata) = prev_metadata { - let root = prev_metadata.root; - - // Load the tree saved in metadata - let storage = self.borrow_mut(); - let mut tree: MerkleTree = - MerkleTree::load(storage, &root) - .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; - - // Update the contract's key-value dataset. The key is the asset id and - // the value is the Word - tree.delete(MerkleTreeKey::new(key)) - .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; - - let root = tree.root(); - if root == *sparse::empty_sum() { - // The tree is now empty; remove the metadata - self.storage::() - .remove(key.contract_id())?; - } else { - // Generate new metadata for the updated tree - let metadata = SparseMerkleMetadata { root }; - self.storage::() - .insert(key.contract_id(), &metadata)?; - } - } - - prev - } -} - -impl MerkleRootStorage for Database { - fn root(&self, parent: &ContractId) -> Result { - let metadata = self - .storage::() - .get(parent)?; - let root = metadata - .map(|metadata| metadata.root) - .unwrap_or_else(|| in_memory::MerkleTree::new().root()); - Ok(root) - } -} impl Database { /// Initialize the balances of the contract from the all leafs. @@ -162,56 +25,23 @@ impl Database { where S: Iterator, { - if self - .storage::() - .contains_key(contract_id)? - { - return Err( - anyhow::anyhow!("The contract balances is already initialized").into(), - ) - } - - let balances = balances.collect_vec(); - - // Keys and values should be original without any modifications. - // Key is `ContractId` ++ `AssetId` - self.batch_insert( - Column::ContractsAssets, - balances.clone().into_iter().map(|(asset, value)| { - (ContractsAssetKey::new(contract_id, &asset), value) - }), - )?; - - // Merkle data: - // - Asset key should be converted into `MerkleTreeKey` by `new` function that hashes them. - // - The balance value are original. - let balances = balances.into_iter().map(|(asset, value)| { - ( - MerkleTreeKey::new(ContractsAssetKey::new(contract_id, &asset)), - value.to_be_bytes(), - ) - }); - let (root, nodes) = in_memory::MerkleTree::nodes_from_set(balances); - self.batch_insert(ContractsAssetsMerkleData::column(), nodes.into_iter())?; - let metadata = SparseMerkleMetadata { root }; - self.storage::() - .insert(contract_id, &metadata)?; - - Ok(()) + let balances = balances + .map(|(asset, balance)| { + (ContractsAssetKey::new(contract_id, &asset), balance) + }) + .collect_vec(); + <_ as StorageBatchMutate>::init_storage( + &mut self.data, + &mut balances.iter().map(|(key, value)| (key, value)), + ) } } #[cfg(test)] mod tests { use super::*; - use fuel_core_storage::{ - StorageAsMut, - StorageAsRef, - }; - use fuel_core_types::fuel_types::{ - AssetId, - Word, - }; + use fuel_core_storage::StorageAsMut; + use fuel_core_types::fuel_types::AssetId; use rand::Rng; fn random_asset_id(rng: &mut R) -> AssetId @@ -223,255 +53,6 @@ mod tests { bytes.into() } - #[test] - fn get() { - let key = (&ContractId::from([1u8; 32]), &AssetId::new([1u8; 32])).into(); - let balance: Word = 100; - - let database = &mut Database::default(); - database - .storage::() - .insert(&key, &balance) - .unwrap(); - - assert_eq!( - database - .storage::() - .get(&key) - .unwrap() - .unwrap() - .into_owned(), - balance - ); - } - - #[test] - fn put() { - let key = (&ContractId::from([1u8; 32]), &AssetId::new([1u8; 32])).into(); - let balance: Word = 100; - - let database = &mut Database::default(); - database - .storage::() - .insert(&key, &balance) - .unwrap(); - - let returned = database - .storage::() - .get(&key) - .unwrap() - .unwrap(); - assert_eq!(*returned, balance); - } - - #[test] - fn remove() { - let key = (&ContractId::from([1u8; 32]), &AssetId::new([1u8; 32])).into(); - let balance: Word = 100; - - let database = &mut Database::default(); - database - .storage::() - .insert(&key, &balance) - .unwrap(); - - database.storage::().remove(&key).unwrap(); - - assert!(!database - .storage::() - .contains_key(&key) - .unwrap()); - } - - #[test] - fn exists() { - let key = (&ContractId::from([1u8; 32]), &AssetId::new([1u8; 32])).into(); - let balance: Word = 100; - - let database = &mut Database::default(); - database - .storage::() - .insert(&key, &balance) - .unwrap(); - - assert!(database - .storage::() - .contains_key(&key) - .unwrap()); - } - - #[test] - fn root() { - let key = (&ContractId::from([1u8; 32]), &AssetId::new([1u8; 32])).into(); - let balance: Word = 100; - - let mut database = Database::default(); - - StorageMutate::::insert(&mut database, &key, &balance).unwrap(); - - let root = database - .storage::() - .root(key.contract_id()); - assert!(root.is_ok()) - } - - #[test] - fn root_returns_empty_root_for_invalid_contract() { - let invalid_contract_id = ContractId::from([1u8; 32]); - let database = Database::default(); - let empty_root = in_memory::MerkleTree::new().root(); - let root = database - .storage::() - .root(&invalid_contract_id) - .unwrap(); - assert_eq!(root, empty_root) - } - - #[test] - fn put_updates_the_assets_merkle_root_for_the_given_contract() { - let contract_id = ContractId::from([1u8; 32]); - let database = &mut Database::default(); - - // Write the first contract asset - let asset_id = AssetId::new([1u8; 32]); - let key = (&contract_id, &asset_id).into(); - let balance: Word = 100; - database - .storage::() - .insert(&key, &balance) - .unwrap(); - - // Read the first Merkle root - let root_1 = database - .storage::() - .root(&contract_id) - .unwrap(); - - // Write the second contract asset - let asset_id = AssetId::new([2u8; 32]); - let key = (&contract_id, &asset_id).into(); - let balance: Word = 100; - database - .storage::() - .insert(&key, &balance) - .unwrap(); - - // Read the second Merkle root - let root_2 = database - .storage::() - .root(&contract_id) - .unwrap(); - - assert_ne!(root_1, root_2); - } - - #[test] - fn put_creates_merkle_metadata_when_empty() { - let contract_id = ContractId::from([1u8; 32]); - let asset_id = AssetId::new([1u8; 32]); - let key = (&contract_id, &asset_id).into(); - let database = &mut Database::default(); - - // Write a contract asset - let balance: Word = 100; - database - .storage::() - .insert(&key, &balance) - .unwrap(); - - // Read the Merkle metadata - let metadata = database - .storage::() - .get(&contract_id) - .unwrap(); - - assert!(metadata.is_some()); - } - - #[test] - fn remove_updates_the_assets_merkle_root_for_the_given_contract() { - let contract_id = ContractId::from([1u8; 32]); - let database = &mut Database::default(); - - // Write the first contract asset - let asset_id = AssetId::new([1u8; 32]); - let key = (&contract_id, &asset_id).into(); - let balance: Word = 100; - database - .storage::() - .insert(&key, &balance) - .unwrap(); - let root_0 = database - .storage::() - .root(&contract_id) - .unwrap(); - - // Write the second contract asset - let asset_id = AssetId::new([2u8; 32]); - let key = (&contract_id, &asset_id).into(); - let balance: Word = 100; - database - .storage::() - .insert(&key, &balance) - .unwrap(); - - // Read the first Merkle root - let root_1 = database - .storage::() - .root(&contract_id) - .unwrap(); - - // Remove the first contract asset - let asset_id = AssetId::new([2u8; 32]); - let key = (&contract_id, &asset_id).into(); - database.storage::().remove(&key).unwrap(); - - // Read the second Merkle root - let root_2 = database - .storage::() - .root(&contract_id) - .unwrap(); - - assert_ne!(root_1, root_2); - assert_eq!(root_0, root_2); - } - - #[test] - fn updating_foreign_contract_does_not_affect_the_given_contract_insertion() { - let given_contract_id = ContractId::from([1u8; 32]); - let foreign_contract_id = ContractId::from([2u8; 32]); - let database = &mut Database::default(); - - let asset_id = AssetId::new([0u8; 32]); - let balance: Word = 100; - - // Given - let given_contract_key = (&given_contract_id, &asset_id).into(); - let foreign_contract_key = (&foreign_contract_id, &asset_id).into(); - database - .storage::() - .insert(&given_contract_key, &balance) - .unwrap(); - - // When - database - .storage::() - .insert(&foreign_contract_key, &balance) - .unwrap(); - database - .storage::() - .remove(&foreign_contract_key) - .unwrap(); - - // Then - let result = database - .storage::() - .insert(&given_contract_key, &balance) - .unwrap(); - - assert!(result.is_some()); - } - #[test] fn init_contract_balances_works() { use rand::{ @@ -526,37 +107,4 @@ mod tests { assert_eq!(seq_value, value); } } - - #[test] - fn remove_deletes_merkle_metadata_when_empty() { - let contract_id = ContractId::from([1u8; 32]); - let asset_id = AssetId::new([1u8; 32]); - let key = (&contract_id, &asset_id).into(); - let database = &mut Database::default(); - - // Write a contract asset - let balance: Word = 100; - database - .storage::() - .insert(&key, &balance) - .unwrap(); - - // Read the Merkle metadata - database - .storage::() - .get(&contract_id) - .unwrap() - .expect("Expected Merkle metadata to be present"); - - // Remove the contract asset - database.storage::().remove(&key).unwrap(); - - // Read the Merkle metadata - let metadata = database - .storage::() - .get(&contract_id) - .unwrap(); - - assert!(metadata.is_none()); - } } diff --git a/crates/fuel-core/src/database/block.rs b/crates/fuel-core/src/database/block.rs index f4fbbe3342..f270e581f6 100644 --- a/crates/fuel-core/src/database/block.rs +++ b/crates/fuel-core/src/database/block.rs @@ -1,23 +1,28 @@ use crate::database::{ - storage::{ - DenseMerkleMetadata, - FuelBlockMerkleData, - FuelBlockMerkleMetadata, - FuelBlockSecondaryKeyBlockHeights, - ToDatabaseKey, - }, Column, Database, Error as DatabaseError, }; use fuel_core_storage::{ + blueprint::plain::Plain, + codec::{ + primitive::Primitive, + raw::Raw, + }, iter::IterDirection, not_found, + structured_storage::TableWithBlueprint, tables::{ + merkle::{ + DenseMerkleMetadata, + FuelBlockMerkleData, + FuelBlockMerkleMetadata, + }, FuelBlocks, Transactions, }, Error as StorageError, + Mappable, MerkleRootStorage, Result as StorageResult, StorageAsMut, @@ -39,27 +44,48 @@ use fuel_core_types::{ tai64::Tai64, }; use itertools::Itertools; -use std::{ - borrow::{ - BorrowMut, - Cow, - }, - convert::{ - TryFrom, - TryInto, - }, +use std::borrow::{ + BorrowMut, + Cow, }; +/// The table of fuel block's secondary key - `BlockHeight`. +/// It links the `BlockHeight` to corresponding `BlockId`. +pub struct FuelBlockSecondaryKeyBlockHeights; + +impl Mappable for FuelBlockSecondaryKeyBlockHeights { + /// Secondary key - `BlockHeight`. + type Key = BlockHeight; + type OwnedKey = Self::Key; + /// Primary key - `BlockId`. + type Value = BlockId; + type OwnedValue = Self::Value; +} + +impl TableWithBlueprint for FuelBlockSecondaryKeyBlockHeights { + type Blueprint = Plain, Raw>; + + fn column() -> Column { + Column::FuelBlockSecondaryKeyBlockHeights + } +} + +#[cfg(test)] +fuel_core_storage::basic_storage_tests!( + FuelBlockSecondaryKeyBlockHeights, + ::Key::default(), + ::Value::default() +); + impl StorageInspect for Database { type Error = StorageError; fn get(&self, key: &BlockId) -> Result>, Self::Error> { - Database::get(self, key.as_slice(), Column::FuelBlocks).map_err(Into::into) + self.data.storage::().get(key) } fn contains_key(&self, key: &BlockId) -> Result { - Database::contains_key(self, key.as_slice(), Column::FuelBlocks) - .map_err(Into::into) + self.data.storage::().contains_key(key) } } @@ -69,7 +95,10 @@ impl StorageMutate for Database { key: &BlockId, value: &CompressedBlock, ) -> Result, Self::Error> { - let prev = Database::insert(self, key.as_slice(), Column::FuelBlocks, value)?; + let prev = self + .data + .storage_as_mut::() + .insert(key, value)?; let height = value.header().height(); self.storage::() @@ -77,10 +106,7 @@ impl StorageMutate for Database { // Get latest metadata entry let prev_metadata = self - .iter_all::, DenseMerkleMetadata>( - Column::FuelBlockMerkleMetadata, - Some(IterDirection::Reverse), - ) + .iter_all::(Some(IterDirection::Reverse)) .next() .transpose()? .map(|(_, metadata)| metadata) @@ -105,7 +131,7 @@ impl StorageMutate for Database { fn remove(&mut self, key: &BlockId) -> Result, Self::Error> { let prev: Option = - Database::take(self, key.as_slice(), Column::FuelBlocks)?; + self.data.storage_as_mut::().remove(key)?; if let Some(block) = &prev { let height = block.header().height(); @@ -148,12 +174,9 @@ impl Database { } pub fn get_block_id(&self, height: &BlockHeight) -> StorageResult> { - Database::get( - self, - height.database_key().as_ref(), - Column::FuelBlockSecondaryKeyBlockHeights, - ) - .map_err(Into::into) + self.storage::() + .get(height) + .map(|v| v.map(|v| v.into_owned())) } pub fn all_block_ids( @@ -162,48 +185,23 @@ impl Database { direction: IterDirection, ) -> impl Iterator> + '_ { let start = start.map(|b| b.to_bytes()); - self.iter_all_by_start::, BlockId, _>( - Column::FuelBlockSecondaryKeyBlockHeights, + self.iter_all_by_start::( start, Some(direction), ) - .map(|res| { - let (height, id) = res?; - let block_height_bytes: [u8; 4] = height - .as_slice() - .try_into() - .expect("block height always has correct number of bytes"); - Ok((block_height_bytes.into(), id)) - }) } pub fn ids_of_genesis_block(&self) -> StorageResult<(BlockHeight, BlockId)> { - self.iter_all( - Column::FuelBlockSecondaryKeyBlockHeights, - Some(IterDirection::Forward), - ) - .next() - .ok_or(DatabaseError::ChainUninitialized)? - .map(|(height, id): (Vec, BlockId)| { - let bytes = <[u8; 4]>::try_from(height.as_slice()) - .expect("all block heights are stored with the correct amount of bytes"); - (u32::from_be_bytes(bytes).into(), id) - }) + self.iter_all::(Some(IterDirection::Forward)) + .next() + .ok_or(DatabaseError::ChainUninitialized)? } pub fn ids_of_latest_block(&self) -> StorageResult> { let ids = self - .iter_all::, BlockId>( - Column::FuelBlockSecondaryKeyBlockHeights, - Some(IterDirection::Reverse), - ) + .iter_all::(Some(IterDirection::Reverse)) .next() - .transpose()? - .map(|(height, block)| { - // safety: we know that all block heights are stored with the correct amount of bytes - let bytes = <[u8; 4]>::try_from(height.as_slice()).unwrap(); - (u32::from_be_bytes(bytes).into(), block) - }); + .transpose()?; Ok(ids) } diff --git a/crates/fuel-core/src/database/code_root.rs b/crates/fuel-core/src/database/code_root.rs deleted file mode 100644 index 7474a85d2a..0000000000 --- a/crates/fuel-core/src/database/code_root.rs +++ /dev/null @@ -1,122 +0,0 @@ -use crate::database::{ - storage::DatabaseColumn, - Column, -}; -use fuel_core_storage::tables::ContractsInfo; - -impl DatabaseColumn for ContractsInfo { - fn column() -> Column { - Column::ContractsInfo - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::database::Database; - use fuel_core_storage::StorageAsMut; - use fuel_core_types::{ - fuel_types::{ - Bytes32, - ContractId, - Salt, - }, - fuel_vm::Contract, - }; - use rand::{ - rngs::StdRng, - Rng, - SeedableRng, - }; - - #[test] - fn get() { - let rng = &mut StdRng::seed_from_u64(2322u64); - let contract_id: ContractId = ContractId::from([1u8; 32]); - let contract: Contract = Contract::from(vec![32u8]); - let root = contract.root(); - let salt: Salt = rng.gen(); - - let database = &mut Database::default(); - database - .storage::() - .insert(&contract_id, &(salt, root)) - .unwrap(); - - assert_eq!( - database - .storage::() - .get(&contract_id) - .unwrap() - .unwrap() - .into_owned(), - (salt, root) - ); - } - - #[test] - fn put() { - let rng = &mut StdRng::seed_from_u64(2322u64); - let contract_id: ContractId = ContractId::from([1u8; 32]); - let contract: Contract = Contract::from(vec![32u8]); - let root = contract.root(); - let salt: Salt = rng.gen(); - - let database = &mut Database::default(); - database - .storage::() - .insert(&contract_id, &(salt, root)) - .unwrap(); - - let returned: (Salt, Bytes32) = *database - .storage::() - .get(&contract_id) - .unwrap() - .unwrap(); - assert_eq!(returned, (salt, root)); - } - - #[test] - fn remove() { - let rng = &mut StdRng::seed_from_u64(2322u64); - let contract_id: ContractId = ContractId::from([1u8; 32]); - let contract: Contract = Contract::from(vec![32u8]); - let root = contract.root(); - let salt: Salt = rng.gen(); - - let database = &mut Database::default(); - database - .storage::() - .insert(&contract_id, &(salt, root)) - .unwrap(); - - database - .storage::() - .remove(&contract_id) - .unwrap(); - - assert!(!database - .contains_key(contract_id.as_ref(), Column::ContractsInfo) - .unwrap()); - } - - #[test] - fn exists() { - let rng = &mut StdRng::seed_from_u64(2322u64); - let contract_id: ContractId = ContractId::from([1u8; 32]); - let contract: Contract = Contract::from(vec![32u8]); - let root = contract.root(); - let salt: Salt = rng.gen(); - - let database = &mut Database::default(); - database - .storage::() - .insert(&contract_id, &(salt, root)) - .unwrap(); - - assert!(database - .storage::() - .contains_key(&contract_id) - .unwrap()); - } -} diff --git a/crates/fuel-core/src/database/coin.rs b/crates/fuel-core/src/database/coin.rs index b56ca30daf..d1979c86ff 100644 --- a/crates/fuel-core/src/database/coin.rs +++ b/crates/fuel-core/src/database/coin.rs @@ -1,12 +1,18 @@ use crate::database::{ - storage::DatabaseColumn, Column, Database, }; use fuel_core_chain_config::CoinConfig; use fuel_core_storage::{ + blueprint::plain::Plain, + codec::{ + postcard::Postcard, + primitive::utxo_id_to_bytes, + raw::Raw, + }, iter::IterDirection, not_found, + structured_storage::TableWithBlueprint, tables::Coins, Error as StorageError, Mappable, @@ -21,7 +27,6 @@ use fuel_core_types::{ entities::coins::coin::CompressedCoin, fuel_tx::{ Address, - Bytes32, UtxoId, }, }; @@ -35,13 +40,6 @@ pub fn owner_coin_id_key(owner: &Address, coin_id: &UtxoId) -> OwnedCoinKey { default } -fn utxo_id_to_bytes(utxo_id: &UtxoId) -> [u8; TxId::LEN + 1] { - let mut default = [0; TxId::LEN + 1]; - default[0..TxId::LEN].copy_from_slice(utxo_id.tx_id().as_ref()); - default[TxId::LEN] = utxo_id.output_index(); - default -} - /// The storage table of owned coin ids. Maps addresses to owned coins. pub struct OwnedCoins; /// The storage key for owned coins: `Address ++ UtxoId` @@ -51,25 +49,45 @@ impl Mappable for OwnedCoins { type Key = Self::OwnedKey; type OwnedKey = OwnedCoinKey; type Value = Self::OwnedValue; - type OwnedValue = bool; + type OwnedValue = (); } -impl DatabaseColumn for OwnedCoins { +impl TableWithBlueprint for OwnedCoins { + type Blueprint = Plain; + fn column() -> Column { Column::OwnedCoins } } +#[cfg(test)] +mod test { + use super::*; + + fn generate_key(rng: &mut impl rand::Rng) -> ::Key { + let mut bytes = [0u8; 65]; + rng.fill(bytes.as_mut()); + bytes + } + + fuel_core_storage::basic_storage_tests!( + OwnedCoins, + [0u8; 65], + ::Value::default(), + ::Value::default(), + generate_key + ); +} + impl StorageInspect for Database { type Error = StorageError; fn get(&self, key: &UtxoId) -> Result>, Self::Error> { - Database::get(self, &utxo_id_to_bytes(key), Column::Coins).map_err(Into::into) + self.data.storage::().get(key) } fn contains_key(&self, key: &UtxoId) -> Result { - Database::contains_key(self, &utxo_id_to_bytes(key), Column::Coins) - .map_err(Into::into) + self.data.storage::().contains_key(key) } } @@ -81,16 +99,15 @@ impl StorageMutate for Database { ) -> Result, Self::Error> { let coin_by_owner = owner_coin_id_key(&value.owner, key); // insert primary record - let insert = Database::insert(self, utxo_id_to_bytes(key), Column::Coins, value)?; + let insert = self.data.storage_as_mut::().insert(key, value)?; // insert secondary index by owner self.storage_as_mut::() - .insert(&coin_by_owner, &true)?; + .insert(&coin_by_owner, &())?; Ok(insert) } fn remove(&mut self, key: &UtxoId) -> Result, Self::Error> { - let coin: Option = - Database::take(self, &utxo_id_to_bytes(key), Column::Coins)?; + let coin = self.data.storage_as_mut::().remove(key)?; // cleanup secondary index if let Some(coin) = &coin { @@ -109,8 +126,7 @@ impl Database { start_coin: Option, direction: Option, ) -> impl Iterator> + '_ { - self.iter_all_filtered::, bool, _, _>( - Column::OwnedCoins, + self.iter_all_filtered::( Some(*owner), start_coin.map(|b| owner_coin_id_key(owner, &b)), direction, @@ -138,22 +154,19 @@ impl Database { pub fn get_coin_config(&self) -> StorageResult>> { let configs = self - .iter_all::, CompressedCoin>(Column::Coins, None) + .iter_all::(None) .map(|raw_coin| -> StorageResult { - let coin = raw_coin?; - - let byte_id = Bytes32::new(coin.0[..32].try_into()?); - let output_index = coin.0[32]; + let (utxo_id, coin) = raw_coin?; Ok(CoinConfig { - tx_id: Some(byte_id), - output_index: Some(output_index), - tx_pointer_block_height: Some(coin.1.tx_pointer.block_height()), - tx_pointer_tx_idx: Some(coin.1.tx_pointer.tx_index()), - maturity: Some(coin.1.maturity), - owner: coin.1.owner, - amount: coin.1.amount, - asset_id: coin.1.asset_id, + tx_id: Some(*utxo_id.tx_id()), + output_index: Some(utxo_id.output_index()), + tx_pointer_block_height: Some(coin.tx_pointer.block_height()), + tx_pointer_tx_idx: Some(coin.tx_pointer.tx_index()), + maturity: Some(coin.maturity), + owner: coin.owner, + amount: coin.amount, + asset_id: coin.asset_id, }) }) .collect::>>()?; diff --git a/crates/fuel-core/src/database/contracts.rs b/crates/fuel-core/src/database/contracts.rs index 48cbb1a780..ead374f465 100644 --- a/crates/fuel-core/src/database/contracts.rs +++ b/crates/fuel-core/src/database/contracts.rs @@ -1,29 +1,20 @@ -use crate::database::{ - storage::DatabaseColumn, - Column, - Database, -}; +use crate::database::Database; use fuel_core_chain_config::ContractConfig; use fuel_core_storage::{ iter::IterDirection, tables::{ + ContractsAssets, ContractsInfo, ContractsLatestUtxo, ContractsRawCode, + ContractsState, }, ContractsAssetKey, - Error as StorageError, - Mappable, Result as StorageResult, StorageAsRef, - StorageInspect, - StorageMutate, - StorageRead, - StorageSize, }; use fuel_core_types::{ entities::contract::ContractUtxoInfo, - fuel_tx::Contract, fuel_types::{ AssetId, Bytes32, @@ -31,80 +22,6 @@ use fuel_core_types::{ Word, }, }; -use std::borrow::Cow; - -impl DatabaseColumn for ContractsLatestUtxo { - fn column() -> Column { - Column::ContractsLatestUtxo - } -} - -impl StorageInspect for Database { - type Error = StorageError; - - fn get( - &self, - key: &::Key, - ) -> Result::OwnedValue>>, Self::Error> - { - Ok(self - .read_alloc(key.as_ref(), Column::ContractsRawCode)? - .map(|v| Cow::Owned(Contract::from(v)))) - } - - fn contains_key( - &self, - key: &::Key, - ) -> Result { - self.contains_key(key.as_ref(), Column::ContractsRawCode) - .map_err(Into::into) - } -} - -// # Dev-note: The value of the `ContractsRawCode` has a unique implementation of serialization -// and deserialization. Because the value is a contract byte code represented by bytes, -// we don't use `serde::Deserialization` and `serde::Serialization` for `Vec`, because we don't -// need to store the size of the contract. We store/load raw bytes. -impl StorageMutate for Database { - fn insert( - &mut self, - key: &::Key, - value: &::Value, - ) -> Result::OwnedValue>, Self::Error> { - let result = Database::insert_raw(self, key, Column::ContractsRawCode, value)?; - - Ok(result.map(|v| Contract::from(v.as_ref().clone()))) - } - - fn remove( - &mut self, - key: &::Key, - ) -> Result::OwnedValue>, Self::Error> { - let result = Database::take_raw(self, key.as_ref(), Column::ContractsRawCode)?; - - Ok(result.map(|v| Contract::from(v.as_ref().clone()))) - } -} - -impl StorageSize for Database { - fn size_of_value(&self, key: &ContractId) -> Result, Self::Error> { - self.size_of_value(key.as_ref(), Column::ContractsRawCode) - } -} - -impl StorageRead for Database { - fn read( - &self, - key: &ContractId, - buf: &mut [u8], - ) -> Result, Self::Error> { - self.read(key.as_ref(), Column::ContractsRawCode, buf) - } - - fn read_alloc(&self, key: &ContractId) -> Result>, Self::Error> { - self.read_alloc(key.as_ref(), Column::ContractsRawCode) - } -} impl Database { pub fn get_contract_config_by_id( @@ -136,37 +53,25 @@ impl Database { .into_owned(); let state = Some( - self.iter_all_by_prefix::, Bytes32, _>( - Column::ContractsState, - Some(contract_id.as_ref()), - ) - .map(|res| -> StorageResult<(Bytes32, Bytes32)> { - let safe_res = res?; - - // We don't need to store ContractId which is the first 32 bytes of this - // key, as this Vec is already attached to that ContractId - let state_key = Bytes32::new(safe_res.0[32..].try_into()?); - - Ok((state_key, safe_res.1)) - }) - .filter(|val| val.is_ok()) - .collect::>>()?, + self.iter_all_by_prefix::(Some(contract_id.as_ref())) + .map(|res| -> StorageResult<(Bytes32, Bytes32)> { + let (key, value) = res?; + + Ok((*key.state_key(), value)) + }) + .filter(|val| val.is_ok()) + .collect::>>()?, ); let balances = Some( - self.iter_all_by_prefix::, u64, _>( - Column::ContractsAssets, - Some(contract_id.as_ref()), - ) - .map(|res| { - let safe_res = res?; - - let asset_id = AssetId::new(safe_res.0[32..].try_into()?); - - Ok((asset_id, safe_res.1)) - }) - .filter(|val| val.is_ok()) - .collect::>>()?, + self.iter_all_by_prefix::(Some(contract_id.as_ref())) + .map(|res| { + let (key, value) = res?; + + Ok((*key.asset_id(), value)) + }) + .filter(|val| val.is_ok()) + .collect::>>()?, ); Ok(ContractConfig { @@ -188,25 +93,19 @@ impl Database { start_asset: Option, direction: Option, ) -> impl Iterator> + '_ { - self.iter_all_filtered::, Word, _, _>( - Column::ContractsAssets, + self.iter_all_filtered::( Some(contract), start_asset.map(|asset_id| ContractsAssetKey::new(&contract, &asset_id)), direction, ) - .map(|res| { - res.map(|(key, balance)| { - (AssetId::new(key[32..].try_into().unwrap()), balance) - }) - }) + .map(|res| res.map(|(key, balance)| (*key.asset_id(), balance))) } pub fn get_contract_config(&self) -> StorageResult>> { let configs = self - .iter_all::, Word>(Column::ContractsRawCode, None) + .iter_all::(None) .map(|raw_contract_id| -> StorageResult { - let contract_id = - ContractId::new(raw_contract_id.unwrap().0[..32].try_into()?); + let contract_id = raw_contract_id?.0; self.get_contract_config_by_id(contract_id) }) .collect::>>()?; @@ -219,60 +118,12 @@ impl Database { mod tests { use super::*; use fuel_core_storage::StorageAsMut; - use fuel_core_types::fuel_tx::{ - Contract, - TxId, - TxPointer, - UtxoId, - }; + use fuel_core_types::fuel_tx::Contract; use rand::{ RngCore, SeedableRng, }; - #[test] - fn raw_code_get() { - let contract_id: ContractId = ContractId::from([1u8; 32]); - let contract: Contract = Contract::from(vec![32u8]); - - let database = &mut Database::default(); - - database - .storage::() - .insert(&contract_id, contract.as_ref()) - .unwrap(); - - assert_eq!( - database - .storage::() - .get(&contract_id) - .unwrap() - .unwrap() - .into_owned(), - contract - ); - } - - #[test] - fn raw_code_put() { - let contract_id: ContractId = ContractId::from([1u8; 32]); - let contract: Contract = Contract::from(vec![32u8]); - - let database = &mut Database::default(); - database - .storage::() - .insert(&contract_id, contract.as_ref()) - .unwrap(); - - let returned: Contract = database - .storage::() - .get(&contract_id) - .unwrap() - .unwrap() - .into_owned(); - assert_eq!(returned, contract); - } - #[test] fn raw_code_put_huge_contract() { let rng = &mut rand::rngs::StdRng::seed_from_u64(2322u64); @@ -295,148 +146,4 @@ mod tests { .into_owned(); assert_eq!(returned, contract); } - - #[test] - fn raw_code_remove() { - let contract_id: ContractId = ContractId::from([1u8; 32]); - let contract: Contract = Contract::from(vec![32u8]); - - let database = &mut Database::default(); - database - .storage::() - .insert(&contract_id, contract.as_ref()) - .unwrap(); - - database - .storage::() - .remove(&contract_id) - .unwrap(); - - assert!(!database - .storage::() - .contains_key(&contract_id) - .unwrap()); - } - - #[test] - fn raw_code_exists() { - let contract_id: ContractId = ContractId::from([1u8; 32]); - let contract: Contract = Contract::from(vec![32u8]); - - let database = &mut Database::default(); - database - .storage::() - .insert(&contract_id, contract.as_ref()) - .unwrap(); - - assert!(database - .storage::() - .contains_key(&contract_id) - .unwrap()); - } - - #[test] - fn latest_utxo_get() { - let contract_id: ContractId = ContractId::from([1u8; 32]); - let utxo_id: UtxoId = UtxoId::new(TxId::new([2u8; 32]), 4); - let tx_pointer = TxPointer::new(1.into(), 5); - let utxo_info = ContractUtxoInfo { - utxo_id, - tx_pointer, - }; - let database = &mut Database::default(); - - database - .storage::() - .insert(&contract_id, &utxo_info) - .unwrap(); - - assert_eq!( - database - .storage::() - .get(&contract_id) - .unwrap() - .unwrap() - .into_owned(), - utxo_info - ); - } - - #[test] - fn latest_utxo_put() { - let contract_id: ContractId = ContractId::from([1u8; 32]); - let utxo_id: UtxoId = UtxoId::new(TxId::new([2u8; 32]), 4); - let tx_pointer = TxPointer::new(1.into(), 5); - let utxo_info = ContractUtxoInfo { - utxo_id, - tx_pointer, - }; - - let database = &mut Database::default(); - database - .storage::() - .insert(&contract_id, &utxo_info) - .unwrap(); - - let returned: ContractUtxoInfo = database - .storage::() - .get(&contract_id) - .unwrap() - .unwrap() - .into_owned(); - assert_eq!(returned, utxo_info); - } - - #[test] - fn latest_utxo_remove() { - let contract_id: ContractId = ContractId::from([1u8; 32]); - let utxo_id: UtxoId = UtxoId::new(TxId::new([2u8; 32]), 4); - let tx_pointer = TxPointer::new(1.into(), 5); - - let database = &mut Database::default(); - database - .storage::() - .insert( - &contract_id, - &ContractUtxoInfo { - utxo_id, - tx_pointer, - }, - ) - .unwrap(); - - database - .storage::() - .remove(&contract_id) - .unwrap(); - - assert!(!database - .storage::() - .contains_key(&contract_id) - .unwrap()); - } - - #[test] - fn latest_utxo_exists() { - let contract_id: ContractId = ContractId::from([1u8; 32]); - let utxo_id: UtxoId = UtxoId::new(TxId::new([2u8; 32]), 4); - let tx_pointer = TxPointer::new(1.into(), 5); - - let database = &mut Database::default(); - database - .storage::() - .insert( - &contract_id, - &ContractUtxoInfo { - utxo_id, - tx_pointer, - }, - ) - .unwrap(); - - assert!(database - .storage::() - .contains_key(&contract_id) - .unwrap()); - } } diff --git a/crates/fuel-core/src/database/message.rs b/crates/fuel-core/src/database/message.rs index cccbf8abb1..96ed198447 100644 --- a/crates/fuel-core/src/database/message.rs +++ b/crates/fuel-core/src/database/message.rs @@ -1,17 +1,27 @@ use crate::database::{ - storage::ToDatabaseKey, Column, Database, }; use fuel_core_chain_config::MessageConfig; use fuel_core_storage::{ + blueprint::plain::Plain, + codec::{ + manual::Manual, + postcard::Postcard, + Decode, + Encode, + }, iter::IterDirection, + structured_storage::TableWithBlueprint, tables::{ Messages, SpentMessages, }, Error as StorageError, + Mappable, Result as StorageResult, + StorageAsMut, + StorageAsRef, StorageInspect, StorageMutate, }; @@ -27,19 +37,50 @@ use std::{ ops::Deref, }; -use super::storage::DatabaseColumn; +fuel_core_types::fuel_vm::double_key!(OwnedMessageKey, Address, address, Nonce, nonce); + +/// The table that stores all messages per owner. +pub struct OwnedMessageIds; + +impl Mappable for OwnedMessageIds { + type Key = OwnedMessageKey; + type OwnedKey = Self::Key; + type Value = (); + type OwnedValue = Self::Value; +} + +impl Encode for Manual { + type Encoder<'a> = Cow<'a, [u8]>; + + fn encode(t: &OwnedMessageKey) -> Self::Encoder<'_> { + Cow::Borrowed(t.as_ref()) + } +} + +impl Decode for Manual { + fn decode(bytes: &[u8]) -> anyhow::Result { + OwnedMessageKey::from_slice(bytes) + .map_err(|_| anyhow::anyhow!("Unable to decode bytes")) + } +} + +impl TableWithBlueprint for OwnedMessageIds { + type Blueprint = Plain, Postcard>; + + fn column() -> fuel_core_storage::column::Column { + Column::OwnedMessageIds + } +} impl StorageInspect for Database { type Error = StorageError; fn get(&self, key: &Nonce) -> Result>, Self::Error> { - let key = key.database_key(); - Database::get(self, key.as_ref(), Column::Messages).map_err(Into::into) + self.data.storage::().get(key) } fn contains_key(&self, key: &Nonce) -> Result { - let key = key.database_key(); - Database::contains_key(self, key.as_ref(), Column::Messages).map_err(Into::into) + self.data.storage::().contains_key(key) } } @@ -50,42 +91,28 @@ impl StorageMutate for Database { value: &Message, ) -> Result, Self::Error> { // insert primary record - let result = - Database::insert(self, key.database_key().as_ref(), Column::Messages, value)?; + let result = self.data.storage_as_mut::().insert(key, value)?; // insert secondary record by owner - let _: Option = Database::insert( - self, - owner_msg_id_key(&value.recipient, key), - Column::OwnedMessageIds, - &true, - )?; + self.storage_as_mut::() + .insert(&OwnedMessageKey::new(&value.recipient, key), &())?; Ok(result) } fn remove(&mut self, key: &Nonce) -> Result, Self::Error> { let result: Option = - Database::take(self, key.database_key().as_ref(), Column::Messages)?; + self.data.storage_as_mut::().remove(key)?; if let Some(message) = &result { - Database::take::( - self, - &owner_msg_id_key(&message.recipient, key), - Column::OwnedMessageIds, - )?; + self.storage_as_mut::() + .remove(&OwnedMessageKey::new(&message.recipient, key))?; } Ok(result) } } -impl DatabaseColumn for SpentMessages { - fn column() -> Column { - Column::SpentMessages - } -} - impl Database { pub fn owned_message_ids( &self, @@ -93,18 +120,12 @@ impl Database { start_message_id: Option, direction: Option, ) -> impl Iterator> + '_ { - self.iter_all_filtered::, bool, _, _>( - Column::OwnedMessageIds, + self.iter_all_filtered::( Some(*owner), - start_message_id.map(|msg_id| owner_msg_id_key(owner, &msg_id)), + start_message_id.map(|msg_id| OwnedMessageKey::new(owner, &msg_id)), direction, ) - .map(|res| { - res.map(|(key, _)| { - Nonce::try_from(&key[Address::LEN..Address::LEN + Nonce::LEN]) - .expect("key is always {Nonce::LEN} bytes") - }) - }) + .map(|res| res.map(|(key, _)| *key.nonce())) } pub fn all_messages( @@ -113,7 +134,7 @@ impl Database { direction: Option, ) -> impl Iterator> + '_ { let start = start.map(|v| v.deref().to_vec()); - self.iter_all_by_start::, Message, _>(Column::Messages, start, direction) + self.iter_all_by_start::(start, direction) .map(|res| res.map(|(_, message)| message)) } @@ -158,19 +179,9 @@ impl Database { } } -// TODO: Reuse `fuel_vm::storage::double_key` macro. -/// Get a Key by chaining Owner + Nonce -fn owner_msg_id_key(owner: &Address, nonce: &Nonce) -> [u8; Address::LEN + Nonce::LEN] { - let mut default = [0u8; Address::LEN + Nonce::LEN]; - default[0..Address::LEN].copy_from_slice(owner.as_ref()); - default[Address::LEN..].copy_from_slice(nonce.as_ref()); - default -} - #[cfg(test)] mod tests { use super::*; - use fuel_core_storage::StorageAsMut; #[test] fn owned_message_ids() { @@ -180,14 +191,14 @@ mod tests { // insert a message with the first id let first_id = 1.into(); let _ = db - .storage::() + .storage_as_mut::() .insert(&first_id, &message) .unwrap(); // insert a message with the second id with the same Owner let second_id = 2.into(); let _ = db - .storage::() + .storage_as_mut::() .insert(&second_id, &message) .unwrap(); @@ -196,7 +207,7 @@ mod tests { assert_eq!(owned_msg_ids.count(), 2); // remove the first message with its given id - let _ = db.storage::().remove(&first_id).unwrap(); + let _ = db.storage_as_mut::().remove(&first_id).unwrap(); // verify that only second ID is left let owned_msg_ids: Vec<_> = db @@ -206,7 +217,7 @@ mod tests { assert_eq!(owned_msg_ids.len(), 1); // remove the second message with its given id - let _ = db.storage::().remove(&second_id).unwrap(); + let _ = db.storage_as_mut::().remove(&second_id).unwrap(); let owned_msg_ids = db.owned_message_ids(&message.recipient, None, None); assert_eq!(owned_msg_ids.count(), 0); } diff --git a/crates/fuel-core/src/database/metadata.rs b/crates/fuel-core/src/database/metadata.rs index 5239e58401..665b72e42f 100644 --- a/crates/fuel-core/src/database/metadata.rs +++ b/crates/fuel-core/src/database/metadata.rs @@ -1,27 +1,73 @@ -use crate::database::{ - Column, - Database, - Error as DatabaseError, +use crate::{ + database::{ + storage::UseStructuredImplementation, + Column, + Database, + Error as DatabaseError, + }, + state::DataSource, }; use fuel_core_chain_config::ChainConfig; -use fuel_core_storage::Result as StorageResult; +use fuel_core_storage::{ + blueprint::plain::Plain, + codec::postcard::Postcard, + structured_storage::{ + StructuredStorage, + TableWithBlueprint, + }, + Mappable, + Result as StorageResult, + StorageMutate, +}; + +/// The table that stores all metadata. Each key is a string, while the value depends on the context. +/// The tables mostly used to store metadata for correct work of the `fuel-core`. +pub struct MetadataTable(core::marker::PhantomData); + +impl Mappable for MetadataTable +where + V: Clone, +{ + type Key = str; + type OwnedKey = String; + type Value = V; + type OwnedValue = V; +} + +impl TableWithBlueprint for MetadataTable +where + V: Clone, +{ + type Blueprint = Plain; + + fn column() -> Column { + Column::Metadata + } +} + +impl UseStructuredImplementation> for StructuredStorage where + V: Clone +{ +} -pub(crate) const DB_VERSION_KEY: &[u8] = b"version"; -pub(crate) const CHAIN_NAME_KEY: &[u8] = b"chain_name"; +pub(crate) const DB_VERSION_KEY: &str = "version"; +pub(crate) const CHAIN_NAME_KEY: &str = "chain_name"; /// Tracks the total number of transactions written to the chain /// It's useful for analyzing TPS or other metrics. -pub(crate) const TX_COUNT: &[u8] = b"total_tx_count"; +pub(crate) const TX_COUNT: &str = "total_tx_count"; /// Can be used to perform migrations in the future. pub(crate) const DB_VERSION: u32 = 0x00; impl Database { /// Ensures the database is initialized and that the database version is correct - pub fn init(&self, config: &ChainConfig) -> StorageResult<()> { + pub fn init(&mut self, config: &ChainConfig) -> StorageResult<()> { + use fuel_core_storage::StorageAsMut; // initialize chain name if not set if self.get_chain_name()?.is_none() { - self.insert(CHAIN_NAME_KEY, Column::Metadata, &config.chain_name) - .and_then(|v: Option| { + self.storage::>() + .insert(CHAIN_NAME_KEY, &config.chain_name) + .and_then(|v| { if v.is_some() { Err(DatabaseError::ChainAlreadyInitialized.into()) } else { @@ -31,7 +77,8 @@ impl Database { } // Ensure the database version is correct - if let Some(version) = self.get::(DB_VERSION_KEY, Column::Metadata)? { + if let Some(version) = self.storage::>().get(DB_VERSION_KEY)? { + let version = version.into_owned(); if version != DB_VERSION { return Err(DatabaseError::InvalidDatabaseVersion { found: version, @@ -39,28 +86,42 @@ impl Database { })? } } else { - let _: Option = - self.insert(DB_VERSION_KEY, Column::Metadata, &DB_VERSION)?; + self.storage::>() + .insert(DB_VERSION_KEY, &DB_VERSION)?; } Ok(()) } pub fn get_chain_name(&self) -> StorageResult> { - self.get(CHAIN_NAME_KEY, Column::Metadata) + use fuel_core_storage::StorageAsRef; + self.storage::>() + .get(CHAIN_NAME_KEY) + .map(|v| v.map(|v| v.into_owned())) } pub fn increase_tx_count(&self, new_txs: u64) -> StorageResult { + use fuel_core_storage::StorageAsRef; // TODO: how should tx count be initialized after regenesis? - let current_tx_count: u64 = - self.get(TX_COUNT, Column::Metadata)?.unwrap_or_default(); + let current_tx_count: u64 = self + .storage::>() + .get(TX_COUNT)? + .unwrap_or_default() + .into_owned(); // Using saturating_add because this value doesn't significantly impact the correctness of execution. let new_tx_count = current_tx_count.saturating_add(new_txs); - self.insert::<_, _, u64>(TX_COUNT, Column::Metadata, &new_tx_count)?; + <_ as StorageMutate>>::insert( + // TODO: Workaround to avoid a mutable borrow of self + &mut StructuredStorage::new(self.data.as_ref()), + TX_COUNT, + &new_tx_count, + )?; Ok(new_tx_count) } pub fn get_tx_count(&self) -> StorageResult { - self.get(TX_COUNT, Column::Metadata) - .map(|v| v.unwrap_or_default()) + use fuel_core_storage::StorageAsRef; + self.storage::>() + .get(TX_COUNT) + .map(|v| v.unwrap_or_default().into_owned()) } } diff --git a/crates/fuel-core/src/database/receipts.rs b/crates/fuel-core/src/database/receipts.rs deleted file mode 100644 index 41cdf0df95..0000000000 --- a/crates/fuel-core/src/database/receipts.rs +++ /dev/null @@ -1,11 +0,0 @@ -use crate::database::{ - storage::DatabaseColumn, - Column, -}; -use fuel_core_storage::tables::Receipts; - -impl DatabaseColumn for Receipts { - fn column() -> Column { - Column::Receipts - } -} diff --git a/crates/fuel-core/src/database/relayer.rs b/crates/fuel-core/src/database/relayer.rs deleted file mode 100644 index 787182c01e..0000000000 --- a/crates/fuel-core/src/database/relayer.rs +++ /dev/null @@ -1,10 +0,0 @@ -use crate::database::Column; -use fuel_core_relayer::ports::RelayerMetadata; - -use super::storage::DatabaseColumn; - -impl DatabaseColumn for RelayerMetadata { - fn column() -> Column { - Column::RelayerMetadata - } -} diff --git a/crates/fuel-core/src/database/sealed_block.rs b/crates/fuel-core/src/database/sealed_block.rs index 7b9f337fa2..a1cd34fa66 100644 --- a/crates/fuel-core/src/database/sealed_block.rs +++ b/crates/fuel-core/src/database/sealed_block.rs @@ -1,8 +1,4 @@ -use crate::database::{ - storage::DatabaseColumn, - Column, - Database, -}; +use crate::database::Database; use fuel_core_storage::{ not_found, tables::{ @@ -28,12 +24,6 @@ use fuel_core_types::{ }; use std::ops::Range; -impl DatabaseColumn for SealedBlockConsensus { - fn column() -> Column { - Column::FuelBlockConsensus - } -} - impl Database { pub fn get_sealed_block_by_id( &self, diff --git a/crates/fuel-core/src/database/state.rs b/crates/fuel-core/src/database/state.rs index d5af5db45d..53bed4b8e8 100644 --- a/crates/fuel-core/src/database/state.rs +++ b/crates/fuel-core/src/database/state.rs @@ -1,152 +1,15 @@ -use crate::database::{ - storage::{ - ContractsStateMerkleData, - ContractsStateMerkleMetadata, - DatabaseColumn, - SparseMerkleMetadata, - }, - Column, - Database, -}; +use crate::database::Database; use fuel_core_storage::{ tables::ContractsState, ContractsStateKey, Error as StorageError, - Mappable, - MerkleRoot, - MerkleRootStorage, - StorageAsMut, - StorageAsRef, - StorageInspect, - StorageMutate, + StorageBatchMutate, }; -use fuel_core_types::{ - fuel_merkle::{ - sparse, - sparse::{ - in_memory, - MerkleTree, - MerkleTreeKey, - }, - }, - fuel_types::{ - Bytes32, - ContractId, - }, +use fuel_core_types::fuel_types::{ + Bytes32, + ContractId, }; use itertools::Itertools; -use std::borrow::{ - BorrowMut, - Cow, -}; - -impl StorageInspect for Database { - type Error = StorageError; - - fn get( - &self, - key: &::Key, - ) -> Result::OwnedValue>>, Self::Error> { - self.get(key.as_ref(), Column::ContractsState) - .map_err(Into::into) - } - - fn contains_key( - &self, - key: &::Key, - ) -> Result { - self.contains_key(key.as_ref(), Column::ContractsState) - .map_err(Into::into) - } -} - -impl StorageMutate for Database { - fn insert( - &mut self, - key: &::Key, - value: &::Value, - ) -> Result::OwnedValue>, Self::Error> { - let prev = Database::insert(self, key.as_ref(), Column::ContractsState, value) - .map_err(Into::into); - - // Get latest metadata entry for this contract id - let prev_metadata = self - .storage::() - .get(key.contract_id())? - .unwrap_or_default(); - - let root = prev_metadata.root; - let storage = self.borrow_mut(); - let mut tree: MerkleTree = - MerkleTree::load(storage, &root) - .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; - - // Update the contract's key-value dataset. The key is the state key and - // the value is the 32 bytes - tree.update(MerkleTreeKey::new(key), value.as_slice()) - .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; - - // Generate new metadata for the updated tree - let root = tree.root(); - let metadata = SparseMerkleMetadata { root }; - self.storage::() - .insert(key.contract_id(), &metadata)?; - - prev - } - - fn remove( - &mut self, - key: &::Key, - ) -> Result::OwnedValue>, Self::Error> { - let prev = Database::take(self, key.as_ref(), Column::ContractsState) - .map_err(Into::into); - - // Get latest metadata entry for this contract id - let prev_metadata = self - .storage::() - .get(key.contract_id())?; - - if let Some(prev_metadata) = prev_metadata { - let root = prev_metadata.root; - - // Load the tree saved in metadata - let storage = self.borrow_mut(); - let mut tree: MerkleTree = - MerkleTree::load(storage, &root) - .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; - - // Update the contract's key-value dataset. The key is the state key and - // the value is the 32 bytes - tree.delete(MerkleTreeKey::new(key)) - .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; - - let root = tree.root(); - if root == *sparse::empty_sum() { - // The tree is now empty; remove the metadata - self.storage::() - .remove(key.contract_id())?; - } else { - // Generate new metadata for the updated tree - let metadata = SparseMerkleMetadata { root }; - self.storage::() - .insert(key.contract_id(), &metadata)?; - } - } - - prev - } -} - -impl MerkleRootStorage for Database { - fn root(&self, parent: &ContractId) -> Result { - let metadata = self.storage::().get(parent)?; - let root = metadata - .map(|metadata| metadata.root) - .unwrap_or_else(|| in_memory::MerkleTree::new().root()); - Ok(root) - } -} impl Database { /// Initialize the state of the contract from all leaves. @@ -159,55 +22,20 @@ impl Database { where S: Iterator, { - let slots = slots.collect_vec(); - - if slots.is_empty() { - return Ok(()) - } - - if self - .storage::() - .contains_key(contract_id)? - { - return Err(anyhow::anyhow!("The contract state is already initialized").into()) - } - - // Keys and values should be original without any modifications. - // Key is `ContractId` ++ `StorageKey` - self.batch_insert( - Column::ContractsState, - slots - .clone() - .into_iter() - .map(|(key, value)| (ContractsStateKey::new(contract_id, &key), value)), - )?; - - // Merkle data: - // - State key should be converted into `MerkleTreeKey` by `new` function that hashes them. - // - The state value are original. - let slots = slots.into_iter().map(|(key, value)| { - ( - MerkleTreeKey::new(ContractsStateKey::new(contract_id, &key)), - value, - ) - }); - let (root, nodes) = in_memory::MerkleTree::nodes_from_set(slots); - self.batch_insert(ContractsStateMerkleData::column(), nodes.into_iter())?; - let metadata = SparseMerkleMetadata { root }; - self.storage::() - .insert(contract_id, &metadata)?; - - Ok(()) + let slots = slots + .map(|(key, value)| (ContractsStateKey::new(contract_id, &key), value)) + .collect_vec(); + <_ as StorageBatchMutate>::init_storage( + &mut self.data, + &mut slots.iter().map(|(key, value)| (key, value)), + ) } } #[cfg(test)] mod tests { use super::*; - use fuel_core_storage::{ - StorageAsMut, - StorageAsRef, - }; + use fuel_core_storage::StorageAsMut; use fuel_core_types::fuel_types::Bytes32; use rand::Rng; @@ -220,253 +48,6 @@ mod tests { bytes.into() } - #[test] - fn get() { - let key = (&ContractId::from([1u8; 32]), &Bytes32::from([1u8; 32])).into(); - let stored_value: Bytes32 = Bytes32::from([2u8; 32]); - - let database = &mut Database::default(); - database - .storage::() - .insert(&key, &stored_value) - .unwrap(); - - assert_eq!( - *database - .storage::() - .get(&key) - .unwrap() - .unwrap(), - stored_value - ); - } - - #[test] - fn put() { - let key = (&ContractId::from([1u8; 32]), &Bytes32::from([1u8; 32])).into(); - let stored_value: Bytes32 = Bytes32::from([2u8; 32]); - - let database = &mut Database::default(); - database - .storage::() - .insert(&key, &stored_value) - .unwrap(); - - let returned: Bytes32 = *database - .storage::() - .get(&key) - .unwrap() - .unwrap(); - assert_eq!(returned, stored_value); - } - - #[test] - fn remove() { - let key = (&ContractId::from([1u8; 32]), &Bytes32::from([1u8; 32])).into(); - let stored_value: Bytes32 = Bytes32::from([2u8; 32]); - - let database = &mut Database::default(); - database - .storage::() - .insert(&key, &stored_value) - .unwrap(); - - database.storage::().remove(&key).unwrap(); - - assert!(!database - .storage::() - .contains_key(&key) - .unwrap()); - } - - #[test] - fn exists() { - let key = (&ContractId::from([1u8; 32]), &Bytes32::from([1u8; 32])).into(); - let stored_value: Bytes32 = Bytes32::from([2u8; 32]); - - let database = &mut Database::default(); - database - .storage::() - .insert(&key, &stored_value) - .unwrap(); - - assert!(database - .storage::() - .contains_key(&key) - .unwrap()); - } - - #[test] - fn root() { - let key = (&ContractId::from([1u8; 32]), &Bytes32::from([1u8; 32])).into(); - let stored_value: Bytes32 = Bytes32::from([2u8; 32]); - - let mut database = Database::default(); - - StorageMutate::::insert(&mut database, &key, &stored_value) - .unwrap(); - - let root = database.storage::().root(key.contract_id()); - assert!(root.is_ok()) - } - - #[test] - fn root_returns_empty_root_for_invalid_contract() { - let invalid_contract_id = ContractId::from([1u8; 32]); - let database = Database::default(); - let empty_root = in_memory::MerkleTree::new().root(); - let root = database - .storage::() - .root(&invalid_contract_id) - .unwrap(); - assert_eq!(root, empty_root) - } - - #[test] - fn put_updates_the_state_merkle_root_for_the_given_contract() { - let contract_id = ContractId::from([1u8; 32]); - let database = &mut Database::default(); - - // Write the first contract state - let state_key = Bytes32::from([1u8; 32]); - let state = Bytes32::from([0xff; 32]); - let key = (&contract_id, &state_key).into(); - database - .storage::() - .insert(&key, &state) - .unwrap(); - - // Read the first Merkle root - let root_1 = database - .storage::() - .root(&contract_id) - .unwrap(); - - // Write the second contract state - let state_key = Bytes32::from([2u8; 32]); - let state = Bytes32::from([0xff; 32]); - let key = (&contract_id, &state_key).into(); - database - .storage::() - .insert(&key, &state) - .unwrap(); - - // Read the second Merkle root - let root_2 = database - .storage::() - .root(&contract_id) - .unwrap(); - - assert_ne!(root_1, root_2); - } - - #[test] - fn put_creates_merkle_metadata_when_empty() { - let contract_id = ContractId::from([1u8; 32]); - let state_key = Bytes32::new([1u8; 32]); - let state = Bytes32::from([0xff; 32]); - let key = (&contract_id, &state_key).into(); - let database = &mut Database::default(); - - // Write a contract state - database - .storage::() - .insert(&key, &state) - .unwrap(); - - // Read the Merkle metadata - let metadata = database - .storage::() - .get(&contract_id) - .unwrap(); - - assert!(metadata.is_some()); - } - - #[test] - fn remove_updates_the_state_merkle_root_for_the_given_contract() { - let contract_id = ContractId::from([1u8; 32]); - let database = &mut Database::default(); - - // Write the first contract state - let state_key = Bytes32::new([1u8; 32]); - let state = Bytes32::from([0xff; 32]); - let key = (&contract_id, &state_key).into(); - database - .storage::() - .insert(&key, &state) - .unwrap(); - let root_0 = database - .storage::() - .root(&contract_id) - .unwrap(); - - // Write the second contract state - let state_key = Bytes32::new([2u8; 32]); - let state = Bytes32::from([0xff; 32]); - let key = (&contract_id, &state_key).into(); - database - .storage::() - .insert(&key, &state) - .unwrap(); - - // Read the first Merkle root - let root_1 = database - .storage::() - .root(&contract_id) - .unwrap(); - - // Remove the first contract state - let state_key = Bytes32::new([2u8; 32]); - let key = (&contract_id, &state_key).into(); - database.storage::().remove(&key).unwrap(); - - // Read the second Merkle root - let root_2 = database - .storage::() - .root(&contract_id) - .unwrap(); - - assert_ne!(root_1, root_2); - assert_eq!(root_0, root_2); - } - - #[test] - fn updating_foreign_contract_does_not_affect_the_given_contract_insertion() { - let given_contract_id = ContractId::from([1u8; 32]); - let foreign_contract_id = ContractId::from([2u8; 32]); - let database = &mut Database::default(); - - let state_key = Bytes32::new([1u8; 32]); - let state_value = Bytes32::from([0xff; 32]); - - // Given - let given_contract_key = (&given_contract_id, &state_key).into(); - let foreign_contract_key = (&foreign_contract_id, &state_key).into(); - database - .storage::() - .insert(&given_contract_key, &state_value) - .unwrap(); - - // When - database - .storage::() - .insert(&foreign_contract_key, &state_value) - .unwrap(); - database - .storage::() - .remove(&foreign_contract_key) - .unwrap(); - - // Then - let result = database - .storage::() - .insert(&given_contract_key, &state_value) - .unwrap(); - - assert!(result.is_some()); - } - #[test] fn init_contract_state_works() { use rand::{ @@ -520,37 +101,4 @@ mod tests { assert_eq!(seq_value, value); } } - - #[test] - fn remove_deletes_merkle_metadata_when_empty() { - let contract_id = ContractId::from([1u8; 32]); - let state_key = Bytes32::new([1u8; 32]); - let state = Bytes32::from([0xff; 32]); - let key = (&contract_id, &state_key).into(); - let database = &mut Database::default(); - - // Write a contract state - database - .storage::() - .insert(&key, &state) - .unwrap(); - - // Read the Merkle metadata - database - .storage::() - .get(&contract_id) - .unwrap() - .expect("Expected Merkle metadata to be present"); - - // Remove the contract asset - database.storage::().remove(&key).unwrap(); - - // Read the Merkle metadata - let metadata = database - .storage::() - .get(&contract_id) - .unwrap(); - - assert!(metadata.is_none()); - } } diff --git a/crates/fuel-core/src/database/storage.rs b/crates/fuel-core/src/database/storage.rs index 2c2c5333c6..e63a64323d 100644 --- a/crates/fuel-core/src/database/storage.rs +++ b/crates/fuel-core/src/database/storage.rs @@ -1,319 +1,167 @@ -use crate::database::{ - Column, - Database, +use crate::{ + database::{ + block::FuelBlockSecondaryKeyBlockHeights, + coin::OwnedCoins, + message::OwnedMessageIds, + transactions::{ + OwnedTransactions, + TransactionStatuses, + }, + Database, + }, + state::DataSource, }; use fuel_core_storage::{ - tables::ProcessedTransactions, + structured_storage::StructuredStorage, + tables::{ + merkle::{ + ContractsAssetsMerkleData, + ContractsAssetsMerkleMetadata, + ContractsStateMerkleData, + ContractsStateMerkleMetadata, + FuelBlockMerkleData, + FuelBlockMerkleMetadata, + }, + ContractsAssets, + ContractsInfo, + ContractsLatestUtxo, + ContractsRawCode, + ContractsState, + ProcessedTransactions, + Receipts, + SealedBlockConsensus, + SpentMessages, + Transactions, + }, Error as StorageError, Mappable, MerkleRoot, + MerkleRootStorage, Result as StorageResult, + StorageAsMut, + StorageAsRef, StorageInspect, StorageMutate, + StorageRead, + StorageSize, }; -use fuel_core_types::{ - blockchain::primitives::BlockId, - fuel_merkle::{ - binary, - sparse, - }, - fuel_tx::TxId, - fuel_types::{ - BlockHeight, - ContractId, - Nonce, - }, -}; -use serde::{ - de::DeserializeOwned, - Serialize, -}; -use std::{ - borrow::Cow, - ops::Deref, -}; - -/// Metadata for dense Merkle trees -#[derive(Clone, serde::Serialize, serde::Deserialize)] -pub struct DenseMerkleMetadata { - /// The root hash of the dense Merkle tree structure - pub root: MerkleRoot, - /// The version of the dense Merkle tree structure is equal to the number of - /// leaves. Every time we append a new leaf to the Merkle tree data set, we - /// increment the version number. - pub version: u64, -} - -impl Default for DenseMerkleMetadata { - fn default() -> Self { - let empty_merkle_tree = binary::root_calculator::MerkleRootCalculator::new(); - Self { - root: empty_merkle_tree.root(), - version: 0, - } - } -} - -/// Metadata for sparse Merkle trees -#[derive(Clone, serde::Serialize, serde::Deserialize)] -pub struct SparseMerkleMetadata { - /// The root hash of the sparse Merkle tree structure - pub root: MerkleRoot, -} - -impl Default for SparseMerkleMetadata { - fn default() -> Self { - let empty_merkle_tree = sparse::in_memory::MerkleTree::new(); - Self { - root: empty_merkle_tree.root(), - } - } -} - -/// The table of fuel block's secondary key - `BlockHeight`. -/// It links the `BlockHeight` to corresponding `BlockId`. -pub struct FuelBlockSecondaryKeyBlockHeights; - -impl Mappable for FuelBlockSecondaryKeyBlockHeights { - /// Secondary key - `BlockHeight`. - type Key = BlockHeight; - type OwnedKey = Self::Key; - /// Primary key - `BlockId`. - type Value = BlockId; - type OwnedValue = Self::Value; -} - -/// The table of BMT data for Fuel blocks. -pub struct FuelBlockMerkleData; - -impl Mappable for FuelBlockMerkleData { - type Key = u64; - type OwnedKey = Self::Key; - type Value = binary::Primitive; - type OwnedValue = Self::Value; -} - -/// The metadata table for [`FuelBlockMerkleData`](FuelBlockMerkleData) table. -pub struct FuelBlockMerkleMetadata; - -impl Mappable for FuelBlockMerkleMetadata { - type Key = BlockHeight; - type OwnedKey = Self::Key; - type Value = DenseMerkleMetadata; - type OwnedValue = Self::Value; -} - -/// The table of SMT data for Contract assets. -pub struct ContractsAssetsMerkleData; - -impl Mappable for ContractsAssetsMerkleData { - type Key = [u8; 32]; - type OwnedKey = Self::Key; - type Value = sparse::Primitive; - type OwnedValue = Self::Value; -} - -/// The metadata table for [`ContractsAssetsMerkleData`](ContractsAssetsMerkleData) table -pub struct ContractsAssetsMerkleMetadata; - -impl Mappable for ContractsAssetsMerkleMetadata { - type Key = ContractId; - type OwnedKey = Self::Key; - type Value = SparseMerkleMetadata; - type OwnedValue = Self::Value; -} - -/// The table of SMT data for Contract state. -pub struct ContractsStateMerkleData; - -impl Mappable for ContractsStateMerkleData { - type Key = [u8; 32]; - type OwnedKey = Self::Key; - type Value = sparse::Primitive; - type OwnedValue = Self::Value; -} - -/// The metadata table for [`ContractsStateMerkleData`](ContractsStateMerkleData) table -pub struct ContractsStateMerkleMetadata; - -impl Mappable for ContractsStateMerkleMetadata { - type Key = ContractId; - type OwnedKey = Self::Key; - type Value = SparseMerkleMetadata; - type OwnedValue = Self::Value; -} - -/// The table has a corresponding column in the database. -/// -/// Using this trait allows the configured mappable type to have its' -/// database integration auto-implemented for single column interactions. -/// -/// If the mappable type requires access to multiple columns or custom logic during setting/getting -/// then its' storage interfaces should be manually implemented and this trait should be avoided. -pub trait DatabaseColumn { - /// The column of the table. - fn column() -> Column; -} - -impl DatabaseColumn for FuelBlockSecondaryKeyBlockHeights { - fn column() -> Column { - Column::FuelBlockSecondaryKeyBlockHeights - } -} - -impl DatabaseColumn for ProcessedTransactions { - fn column() -> Column { - Column::ProcessedTransactions - } -} - -impl DatabaseColumn for FuelBlockMerkleData { - fn column() -> Column { - Column::FuelBlockMerkleData - } -} - -impl DatabaseColumn for FuelBlockMerkleMetadata { - fn column() -> Column { - Column::FuelBlockMerkleMetadata - } -} - -impl DatabaseColumn for ContractsAssetsMerkleData { - fn column() -> Column { - Column::ContractsAssetsMerkleData - } -} - -impl DatabaseColumn for ContractsAssetsMerkleMetadata { - fn column() -> Column { - Column::ContractsAssetsMerkleMetadata - } -} - -impl DatabaseColumn for ContractsStateMerkleData { - fn column() -> Column { - Column::ContractsStateMerkleData - } -} - -impl DatabaseColumn for ContractsStateMerkleMetadata { - fn column() -> Column { - Column::ContractsStateMerkleMetadata - } +use std::borrow::Cow; + +/// The trait allows selectively inheriting the implementation of storage traits from `StructuredStorage` +/// for the `Database`. Not all default implementations of the `StructuredStorage` are suitable +/// for the `Database`. Sometimes we want to override some of them and add a custom implementation +/// with additional logic. For example, we want to override the `StorageMutate` trait for the `Messages` +/// table to also track the owner of messages. +pub trait UseStructuredImplementation +where + M: Mappable, +{ } -impl StorageInspect for Database +/// The trait allows to implementation of `UseStructuredImplementation` for the `StructuredStorage` for multiple tables. +macro_rules! use_structured_implementation { + ($($m:ty),*) => { + $( + impl UseStructuredImplementation<$m> for StructuredStorage {} + )* + }; +} + +use_structured_implementation!( + ContractsRawCode, + ContractsAssets, + ContractsState, + ContractsLatestUtxo, + ContractsInfo, + SpentMessages, + SealedBlockConsensus, + Transactions, + ProcessedTransactions, + Receipts, + ContractsStateMerkleMetadata, + ContractsStateMerkleData, + ContractsAssetsMerkleMetadata, + ContractsAssetsMerkleData, + OwnedCoins, + OwnedMessageIds, + OwnedTransactions, + TransactionStatuses, + FuelBlockSecondaryKeyBlockHeights, + FuelBlockMerkleData, + FuelBlockMerkleMetadata +); +#[cfg(feature = "relayer")] +use_structured_implementation!(fuel_core_relayer::ports::RelayerMetadata); + +impl StorageInspect for Database where - T: Mappable + DatabaseColumn, - T::Key: ToDatabaseKey, - T::OwnedValue: DeserializeOwned, + M: Mappable, + StructuredStorage: + StorageInspect + UseStructuredImplementation, { type Error = StorageError; - fn get(&self, key: &T::Key) -> StorageResult>> { - self.get(key.database_key().as_ref(), T::column()) - .map_err(Into::into) + fn get(&self, key: &M::Key) -> StorageResult>> { + self.data.storage::().get(key) } - fn contains_key(&self, key: &T::Key) -> StorageResult { - self.contains_key(key.database_key().as_ref(), T::column()) - .map_err(Into::into) + fn contains_key(&self, key: &M::Key) -> StorageResult { + self.data.storage::().contains_key(key) } } -impl StorageMutate for Database +impl StorageMutate for Database where - T: Mappable + DatabaseColumn, - T::Key: ToDatabaseKey, - T::Value: Serialize, - T::OwnedValue: DeserializeOwned, + M: Mappable, + StructuredStorage: + StorageMutate + UseStructuredImplementation, { fn insert( &mut self, - key: &T::Key, - value: &T::Value, - ) -> StorageResult> { - Database::insert(self, key.database_key().as_ref(), T::column(), &value) - .map_err(Into::into) - } - - fn remove(&mut self, key: &T::Key) -> StorageResult> { - Database::take(self, key.database_key().as_ref(), T::column()).map_err(Into::into) + key: &M::Key, + value: &M::Value, + ) -> StorageResult> { + self.data.storage_as_mut::().insert(key, value) } -} - -/// Some keys requires pre-processing that could change their type. -pub trait ToDatabaseKey { - /// A new type of prepared database key that can be converted into bytes. - type Type<'a>: AsRef<[u8]> - where - Self: 'a; - - /// Coverts the key into database key that supports byte presentation. - fn database_key(&self) -> Self::Type<'_>; -} - -impl ToDatabaseKey for BlockHeight { - type Type<'a> = [u8; 4]; - fn database_key(&self) -> Self::Type<'_> { - self.to_bytes() + fn remove(&mut self, key: &M::Key) -> StorageResult> { + self.data.storage_as_mut::().remove(key) } } -impl ToDatabaseKey for u64 { - type Type<'a> = [u8; 8]; - - fn database_key(&self) -> Self::Type<'_> { - self.to_be_bytes() - } -} - -impl ToDatabaseKey for Nonce { - type Type<'a> = &'a [u8; 32]; - - fn database_key(&self) -> Self::Type<'_> { - self.deref() - } -} - -impl ToDatabaseKey for ContractId { - type Type<'a> = &'a [u8; 32]; - - fn database_key(&self) -> Self::Type<'_> { - self.deref() - } -} - -impl ToDatabaseKey for BlockId { - type Type<'a> = &'a [u8]; - - fn database_key(&self) -> Self::Type<'_> { - self.as_slice() +impl MerkleRootStorage for Database +where + M: Mappable, + StructuredStorage: + MerkleRootStorage + UseStructuredImplementation, +{ + fn root(&self, key: &Key) -> StorageResult { + self.data.storage::().root(key) } } -impl ToDatabaseKey for TxId { - type Type<'a> = &'a [u8; 32]; - - fn database_key(&self) -> Self::Type<'_> { - self.deref() +impl StorageSize for Database +where + M: Mappable, + StructuredStorage: + StorageSize + UseStructuredImplementation, +{ + fn size_of_value(&self, key: &M::Key) -> StorageResult> { + <_ as StorageSize>::size_of_value(&self.data, key) } } -impl ToDatabaseKey for () { - type Type<'a> = &'a [u8]; - - fn database_key(&self) -> Self::Type<'_> { - &[] +impl StorageRead for Database +where + M: Mappable, + StructuredStorage: + StorageRead + UseStructuredImplementation, +{ + fn read(&self, key: &M::Key, buf: &mut [u8]) -> StorageResult> { + self.data.storage::().read(key, buf) } -} - -impl ToDatabaseKey for [u8; N] { - type Type<'a> = &'a [u8]; - fn database_key(&self) -> Self::Type<'_> { - self.as_slice() + fn read_alloc(&self, key: &M::Key) -> StorageResult>> { + self.data.storage::().read_alloc(key) } } diff --git a/crates/fuel-core/src/database/transaction.rs b/crates/fuel-core/src/database/transaction.rs index 2f8829ab40..ec3f3de67d 100644 --- a/crates/fuel-core/src/database/transaction.rs +++ b/crates/fuel-core/src/database/transaction.rs @@ -64,13 +64,10 @@ impl Transaction for DatabaseTransaction { impl From<&Database> for DatabaseTransaction { fn from(source: &Database) -> Self { - let data = Arc::new(MemoryTransactionView::new(source.data.clone())); + let data = Arc::new(MemoryTransactionView::new(source.data.as_ref().clone())); Self { changes: data.clone(), - database: Database { - data, - _drop: Default::default(), - }, + database: Database::new(data), } } } diff --git a/crates/fuel-core/src/database/transactions.rs b/crates/fuel-core/src/database/transactions.rs index e41e84b7ec..c7ec700f62 100644 --- a/crates/fuel-core/src/database/transactions.rs +++ b/crates/fuel-core/src/database/transactions.rs @@ -1,11 +1,24 @@ use crate::database::{ - storage::DatabaseColumn, Column, Database, }; +use core::{ + array::TryFromSliceError, + mem::size_of, +}; use fuel_core_storage::{ + blueprint::plain::Plain, + codec::{ + manual::Manual, + postcard::Postcard, + raw::Raw, + Decode, + Encode, + }, iter::IterDirection, + structured_storage::TableWithBlueprint, tables::Transactions, + Mappable, Result as StorageResult, }; use fuel_core_types::{ @@ -21,15 +34,68 @@ use fuel_core_types::{ }, services::txpool::TransactionStatus, }; -use std::{ - mem::size_of, - ops::Deref, -}; -impl DatabaseColumn for Transactions { +/// These tables allow iteration over all transactions owned by an address. +pub struct OwnedTransactions; + +impl Mappable for OwnedTransactions { + type Key = OwnedTransactionIndexKey; + type OwnedKey = Self::Key; + type Value = Bytes32; + type OwnedValue = Self::Value; +} + +impl TableWithBlueprint for OwnedTransactions { + type Blueprint = Plain, Raw>; + + fn column() -> Column { + Column::TransactionsByOwnerBlockIdx + } +} + +/// The table stores the status of each transaction. +pub struct TransactionStatuses; + +impl Mappable for TransactionStatuses { + type Key = Bytes32; + type OwnedKey = Self::Key; + type Value = TransactionStatus; + type OwnedValue = Self::Value; +} + +impl TableWithBlueprint for TransactionStatuses { + type Blueprint = Plain; + fn column() -> Column { - Column::Transactions + Column::TransactionStatus + } +} + +#[cfg(test)] +mod test { + use super::*; + + fn generate_key(rng: &mut impl rand::Rng) -> ::Key { + let mut bytes = [0u8; INDEX_SIZE]; + rng.fill(bytes.as_mut()); + bytes.into() } + + fuel_core_storage::basic_storage_tests!( + OwnedTransactions, + [1u8; INDEX_SIZE].into(), + ::Value::default(), + ::Value::default(), + generate_key + ); + + fuel_core_storage::basic_storage_tests!( + TransactionStatuses, + ::Key::default(), + TransactionStatus::Submitted { + time: fuel_core_types::tai64::Tai64::UNIX_EPOCH, + } + ); } impl Database { @@ -39,12 +105,8 @@ impl Database { direction: Option, ) -> impl Iterator> + '_ { let start = start.map(|b| b.as_ref().to_vec()); - self.iter_all_by_start::, Transaction, _>( - Column::Transactions, - start, - direction, - ) - .map(|res| res.map(|(_, tx)| tx)) + self.iter_all_by_start::(start, direction) + .map(|res| res.map(|(_, tx)| tx)) } /// Iterates over a KV mapping of `[address + block height + tx idx] => transaction id`. This @@ -59,44 +121,45 @@ impl Database { ) -> impl Iterator> + '_ { let start = start .map(|cursor| owned_tx_index_key(&owner, cursor.block_height, cursor.tx_idx)); - self.iter_all_filtered::( - Column::TransactionsByOwnerBlockIdx, - Some(owner), - start, - direction, - ) - .map(|res| { - res.map(|(key, tx_id)| (TxPointer::new(key.block_height, key.tx_idx), tx_id)) - }) + self.iter_all_filtered::(Some(owner), start, direction) + .map(|res| { + res.map(|(key, tx_id)| { + (TxPointer::new(key.block_height, key.tx_idx), tx_id) + }) + }) } pub fn record_tx_id_owner( - &self, + &mut self, owner: &Address, block_height: BlockHeight, tx_idx: TransactionIndex, tx_id: &Bytes32, ) -> StorageResult> { - self.insert( - owned_tx_index_key(owner, block_height, tx_idx), - Column::TransactionsByOwnerBlockIdx, + use fuel_core_storage::StorageAsMut; + self.storage::().insert( + &OwnedTransactionIndexKey::new(owner, block_height, tx_idx), tx_id, ) } pub fn update_tx_status( - &self, + &mut self, id: &Bytes32, status: TransactionStatus, ) -> StorageResult> { - self.insert(id, Column::TransactionStatus, &status) + use fuel_core_storage::StorageAsMut; + self.storage::().insert(id, &status) } pub fn get_tx_status( &self, id: &Bytes32, ) -> StorageResult> { - self.get(&id.deref()[..], Column::TransactionStatus) + use fuel_core_storage::StorageAsRef; + self.storage::() + .get(id) + .map(|v| v.map(|v| v.into_owned())) } } @@ -123,30 +186,68 @@ fn owned_tx_index_key( pub type TransactionIndex = u16; +#[derive(Clone)] pub struct OwnedTransactionIndexKey { + owner: Address, block_height: BlockHeight, tx_idx: TransactionIndex, } -impl From for OwnedTransactionIndexKey -where - T: AsRef<[u8]>, -{ - fn from(bytes: T) -> Self { +impl OwnedTransactionIndexKey { + pub fn new( + owner: &Address, + block_height: BlockHeight, + tx_idx: TransactionIndex, + ) -> Self { + Self { + owner: *owner, + block_height, + tx_idx, + } + } +} + +impl From<[u8; INDEX_SIZE]> for OwnedTransactionIndexKey { + fn from(bytes: [u8; INDEX_SIZE]) -> Self { + let owner: [u8; 32] = bytes[..32].try_into().expect("It's an array of 32 bytes"); // the first 32 bytes are the owner, which is already known when querying let mut block_height_bytes: [u8; 4] = Default::default(); - block_height_bytes.copy_from_slice(&bytes.as_ref()[32..36]); + block_height_bytes.copy_from_slice(&bytes[32..36]); let mut tx_idx_bytes: [u8; 2] = Default::default(); tx_idx_bytes.copy_from_slice(&bytes.as_ref()[36..38]); Self { - // owner: Address::from(owner_bytes), + owner: Address::from(owner), block_height: u32::from_be_bytes(block_height_bytes).into(), tx_idx: u16::from_be_bytes(tx_idx_bytes), } } } +impl TryFrom<&[u8]> for OwnedTransactionIndexKey { + type Error = TryFromSliceError; + + fn try_from(bytes: &[u8]) -> Result { + let bytes: [u8; INDEX_SIZE] = bytes.try_into()?; + Ok(Self::from(bytes)) + } +} + +impl Encode for Manual { + type Encoder<'a> = [u8; INDEX_SIZE]; + + fn encode(t: &OwnedTransactionIndexKey) -> Self::Encoder<'_> { + owned_tx_index_key(&t.owner, t.block_height, t.tx_idx) + } +} + +impl Decode for Manual { + fn decode(bytes: &[u8]) -> anyhow::Result { + OwnedTransactionIndexKey::try_from(bytes) + .map_err(|_| anyhow::anyhow!("Unable to decode bytes")) + } +} + #[derive(Clone, Debug, PartialOrd, Eq, PartialEq)] pub struct OwnedTransactionIndexCursor { pub block_height: BlockHeight, diff --git a/crates/fuel-core/src/executor.rs b/crates/fuel-core/src/executor.rs index 85fb031871..8b74df131b 100644 --- a/crates/fuel-core/src/executor.rs +++ b/crates/fuel-core/src/executor.rs @@ -19,7 +19,6 @@ mod tests { Coins, ContractsRawCode, Messages, - Receipts, }, StorageAsMut, }; @@ -662,23 +661,18 @@ mod tests { coinbase_recipient: config_coinbase, ..Default::default() }; - let mut producer = create_executor(Default::default(), config); + let producer = create_executor(Default::default(), config); let mut block = Block::default(); *block.transactions_mut() = vec![script.clone().into()]; - assert!(producer + let ExecutionResult { tx_status, .. } = producer .execute_and_commit( ExecutionBlock::Production(block.into()), - Default::default() + Default::default(), ) - .is_ok()); - let receipts = producer - .database - .storage::() - .get(&script.id(&producer.config.consensus_parameters.chain_id)) - .unwrap() - .unwrap(); + .expect("Should execute the block"); + let receipts = &tx_status[0].receipts; if let Some(Receipt::Return { val, .. }) = receipts.first() { *val == 1 @@ -2756,20 +2750,16 @@ mod tests { }, ); - executor + let ExecutionResult { tx_status, .. } = executor .execute_and_commit( ExecutionBlock::Production(block), ExecutionOptions { utxo_validation: true, }, ) - .unwrap(); + .expect("Should execute the block"); - let receipts = database - .storage::() - .get(&tx.id(&ChainId::default())) - .unwrap() - .unwrap(); + let receipts = &tx_status[0].receipts; assert_eq!(block_height as u64, receipts[0].val().unwrap()); } @@ -2835,21 +2825,16 @@ mod tests { }, ); - executor + let ExecutionResult { tx_status, .. } = executor .execute_and_commit( ExecutionBlock::Production(block), ExecutionOptions { utxo_validation: true, }, ) - .unwrap(); - - let receipts = database - .storage::() - .get(&tx.id(&ChainId::default())) - .unwrap() - .unwrap(); + .expect("Should execute the block"); + let receipts = &tx_status[0].receipts; assert_eq!(time.0, receipts[0].val().unwrap()); } } diff --git a/crates/fuel-core/src/graphql_api.rs b/crates/fuel-core/src/graphql_api.rs index 3fd27a3c19..12603d964a 100644 --- a/crates/fuel-core/src/graphql_api.rs +++ b/crates/fuel-core/src/graphql_api.rs @@ -9,9 +9,12 @@ use fuel_core_types::{ }; use std::net::SocketAddr; +pub mod api_service; +pub mod database; pub(crate) mod metrics_extension; pub mod ports; -pub mod service; +pub(crate) mod view_extension; +pub mod worker_service; #[derive(Clone, Debug)] pub struct Config { diff --git a/crates/fuel-core/src/graphql_api/service.rs b/crates/fuel-core/src/graphql_api/api_service.rs similarity index 89% rename from crates/fuel-core/src/graphql_api/service.rs rename to crates/fuel-core/src/graphql_api/api_service.rs index 6c6879ae30..15023a5995 100644 --- a/crates/fuel-core/src/graphql_api/service.rs +++ b/crates/fuel-core/src/graphql_api/api_service.rs @@ -1,13 +1,17 @@ use crate::{ - fuel_core_graphql_api::ports::{ - BlockProducerPort, - ConsensusModulePort, - DatabasePort, - P2pPort, - TxPoolPort, - }, - graphql_api::{ + fuel_core_graphql_api::{ + database::{ + OffChainView, + OnChainView, + }, metrics_extension::MetricsExtension, + ports::{ + BlockProducerPort, + ConsensusModulePort, + P2pPort, + TxPoolPort, + }, + view_extension::ViewExtension, Config, }, schema::{ @@ -55,6 +59,7 @@ use fuel_core_services::{ RunnableTask, StateWatcher, }; +use fuel_core_storage::transactional::AtomicView; use futures::Stream; use serde_json::json; use std::{ @@ -75,7 +80,7 @@ use tower_http::{ pub type Service = fuel_core_services::ServiceRunner; -pub type Database = Box; +pub use super::database::ReadDatabase; pub type BlockProducer = Box; // In the future GraphQL should not be aware of `TxPool`. It should @@ -160,28 +165,35 @@ impl RunnableTask for Task { // Need a seperate Data Object for each Query endpoint, cannot be avoided #[allow(clippy::too_many_arguments)] -pub fn new_service( +pub fn new_service( config: Config, schema: CoreSchemaBuilder, - database: Database, + on_database: OnChain, + off_database: OffChain, txpool: TxPool, producer: BlockProducer, consensus_module: ConsensusModule, p2p_service: P2pService, log_threshold_ms: Duration, request_timeout: Duration, -) -> anyhow::Result { +) -> anyhow::Result +where + OnChain: AtomicView + 'static, + OffChain: AtomicView + 'static, +{ let network_addr = config.addr; + let combined_read_database = ReadDatabase::new(on_database, off_database); let schema = schema .data(config) - .data(database) + .data(combined_read_database) .data(txpool) .data(producer) .data(consensus_module) .data(p2p_service) .extension(async_graphql::extensions::Tracing) .extension(MetricsExtension::new(log_threshold_ms)) + .extension(ViewExtension::new()) .finish(); let router = Router::new() diff --git a/crates/fuel-core/src/graphql_api/database.rs b/crates/fuel-core/src/graphql_api/database.rs new file mode 100644 index 0000000000..feb9a638c1 --- /dev/null +++ b/crates/fuel-core/src/graphql_api/database.rs @@ -0,0 +1,234 @@ +use crate::fuel_core_graphql_api::ports::{ + DatabaseBlocks, + DatabaseChain, + DatabaseContracts, + DatabaseMessageProof, + DatabaseMessages, + OffChainDatabase, + OnChainDatabase, +}; +use fuel_core_storage::{ + iter::{ + BoxedIter, + IterDirection, + }, + tables::Receipts, + transactional::AtomicView, + Error as StorageError, + Mappable, + Result as StorageResult, + StorageInspect, +}; +use fuel_core_txpool::types::{ + ContractId, + TxId, +}; +use fuel_core_types::{ + blockchain::primitives::{ + BlockId, + DaBlockHeight, + }, + entities::message::{ + MerkleProof, + Message, + }, + fuel_tx::{ + Address, + AssetId, + TxPointer, + UtxoId, + }, + fuel_types::{ + BlockHeight, + Nonce, + }, + services::{ + graphql_api::ContractBalance, + txpool::TransactionStatus, + }, +}; +use std::{ + borrow::Cow, + sync::Arc, +}; + +/// The on-chain view of the database used by the [`ReadView`] to fetch on-chain data. +pub type OnChainView = Arc; +/// The off-chain view of the database used by the [`ReadView`] to fetch off-chain data. +pub type OffChainView = Arc; + +/// The container of the on-chain and off-chain database view provides. +/// It is used only by `ViewExtension` to create a [`ReadView`]. +pub struct ReadDatabase { + /// The on-chain database view provider. + on_chain: Box>, + /// The off-chain database view provider. + off_chain: Box>, +} + +impl ReadDatabase { + /// Creates a new [`ReadDatabase`] with the given on-chain and off-chain database view providers. + pub fn new(on_chain: OnChain, off_chain: OffChain) -> Self + where + OnChain: AtomicView + 'static, + OffChain: AtomicView + 'static, + { + Self { + on_chain: Box::new(on_chain), + off_chain: Box::new(off_chain), + } + } + + /// Creates a consistent view of the database. + pub fn view(&self) -> ReadView { + // TODO: Use the same height for both views to guarantee consistency. + // It is not possible to implement until `view_at` is implemented for the `AtomicView`. + // https://github.com/FuelLabs/fuel-core/issues/1582 + ReadView { + on_chain: self.on_chain.latest_view(), + off_chain: self.off_chain.latest_view(), + } + } +} + +pub struct ReadView { + on_chain: OnChainView, + off_chain: OffChainView, +} + +impl DatabaseBlocks for ReadView { + fn block_id(&self, height: &BlockHeight) -> StorageResult { + self.on_chain.block_id(height) + } + + fn blocks_ids( + &self, + start: Option, + direction: IterDirection, + ) -> BoxedIter<'_, StorageResult<(BlockHeight, BlockId)>> { + self.on_chain.blocks_ids(start, direction) + } + + fn ids_of_latest_block(&self) -> StorageResult<(BlockHeight, BlockId)> { + self.on_chain.ids_of_latest_block() + } +} + +impl StorageInspect for ReadView +where + M: Mappable, + dyn OnChainDatabase: StorageInspect, +{ + type Error = StorageError; + + fn get(&self, key: &M::Key) -> StorageResult>> { + self.on_chain.get(key) + } + + fn contains_key(&self, key: &M::Key) -> StorageResult { + self.on_chain.contains_key(key) + } +} + +impl DatabaseMessages for ReadView { + fn all_messages( + &self, + start_message_id: Option, + direction: IterDirection, + ) -> BoxedIter<'_, StorageResult> { + self.on_chain.all_messages(start_message_id, direction) + } + + fn message_is_spent(&self, nonce: &Nonce) -> StorageResult { + self.on_chain.message_is_spent(nonce) + } + + fn message_exists(&self, nonce: &Nonce) -> StorageResult { + self.on_chain.message_exists(nonce) + } +} + +impl DatabaseContracts for ReadView { + fn contract_balances( + &self, + contract: ContractId, + start_asset: Option, + direction: IterDirection, + ) -> BoxedIter> { + self.on_chain + .contract_balances(contract, start_asset, direction) + } +} + +impl DatabaseChain for ReadView { + fn chain_name(&self) -> StorageResult { + self.on_chain.chain_name() + } + + fn da_height(&self) -> StorageResult { + self.on_chain.da_height() + } +} + +impl DatabaseMessageProof for ReadView { + fn block_history_proof( + &self, + message_block_height: &BlockHeight, + commit_block_height: &BlockHeight, + ) -> StorageResult { + self.on_chain + .block_history_proof(message_block_height, commit_block_height) + } +} + +impl OnChainDatabase for ReadView {} + +impl StorageInspect for ReadView { + type Error = StorageError; + + fn get( + &self, + key: &::Key, + ) -> StorageResult::OwnedValue>>> { + self.off_chain.get(key) + } + + fn contains_key(&self, key: &::Key) -> StorageResult { + self.off_chain.contains_key(key) + } +} + +impl OffChainDatabase for ReadView { + fn owned_message_ids( + &self, + owner: &Address, + start_message_id: Option, + direction: IterDirection, + ) -> BoxedIter<'_, StorageResult> { + self.off_chain + .owned_message_ids(owner, start_message_id, direction) + } + + fn owned_coins_ids( + &self, + owner: &Address, + start_coin: Option, + direction: IterDirection, + ) -> BoxedIter<'_, StorageResult> { + self.off_chain.owned_coins_ids(owner, start_coin, direction) + } + + fn tx_status(&self, tx_id: &TxId) -> StorageResult { + self.off_chain.tx_status(tx_id) + } + + fn owned_transactions_ids( + &self, + owner: Address, + start: Option, + direction: IterDirection, + ) -> BoxedIter> { + self.off_chain + .owned_transactions_ids(owner, start, direction) + } +} diff --git a/crates/fuel-core/src/graphql_api/ports.rs b/crates/fuel-core/src/graphql_api/ports.rs index b897acb248..44ff62b79b 100644 --- a/crates/fuel-core/src/graphql_api/ports.rs +++ b/crates/fuel-core/src/graphql_api/ports.rs @@ -14,7 +14,6 @@ use fuel_core_storage::{ Messages, Receipts, SealedBlockConsensus, - SpentMessages, Transactions, }, Error as StorageError, @@ -57,14 +56,41 @@ use fuel_core_types::{ }; use std::sync::Arc; -/// The database port expected by GraphQL API service. -pub trait DatabasePort: +pub trait OffChainDatabase: + Send + Sync + StorageInspect +{ + fn owned_message_ids( + &self, + owner: &Address, + start_message_id: Option, + direction: IterDirection, + ) -> BoxedIter<'_, StorageResult>; + + fn owned_coins_ids( + &self, + owner: &Address, + start_coin: Option, + direction: IterDirection, + ) -> BoxedIter<'_, StorageResult>; + + fn tx_status(&self, tx_id: &TxId) -> StorageResult; + + fn owned_transactions_ids( + &self, + owner: Address, + start: Option, + direction: IterDirection, + ) -> BoxedIter>; +} + +/// The on chain database port expected by GraphQL API service. +pub trait OnChainDatabase: Send + Sync + DatabaseBlocks - + DatabaseTransactions + + StorageInspect + DatabaseMessages - + DatabaseCoins + + StorageInspect + DatabaseContracts + DatabaseChain + DatabaseMessageProof @@ -87,33 +113,8 @@ pub trait DatabaseBlocks: fn ids_of_latest_block(&self) -> StorageResult<(BlockHeight, BlockId)>; } -/// Trait that specifies all the getters required for transactions. -pub trait DatabaseTransactions: - StorageInspect - + StorageInspect -{ - fn tx_status(&self, tx_id: &TxId) -> StorageResult; - - fn owned_transactions_ids( - &self, - owner: Address, - start: Option, - direction: IterDirection, - ) -> BoxedIter>; -} - /// Trait that specifies all the getters required for messages. -pub trait DatabaseMessages: - StorageInspect - + StorageInspect -{ - fn owned_message_ids( - &self, - owner: &Address, - start_message_id: Option, - direction: IterDirection, - ) -> BoxedIter<'_, StorageResult>; - +pub trait DatabaseMessages: StorageInspect { fn all_messages( &self, start_message_id: Option, @@ -125,16 +126,6 @@ pub trait DatabaseMessages: fn message_exists(&self, nonce: &Nonce) -> StorageResult; } -/// Trait that specifies all the getters required for coins. -pub trait DatabaseCoins: StorageInspect { - fn owned_coins_ids( - &self, - owner: &Address, - start_coin: Option, - direction: IterDirection, - ) -> BoxedIter<'_, StorageResult>; -} - /// Trait that specifies all the getters required for contract. pub trait DatabaseContracts: StorageInspect @@ -174,7 +165,7 @@ pub trait TxPoolPort: Send + Sync { } #[async_trait] -pub trait DryRunExecution { +pub trait BlockProducerPort: Send + Sync { async fn dry_run_tx( &self, transaction: Transaction, @@ -183,8 +174,6 @@ pub trait DryRunExecution { ) -> anyhow::Result>; } -pub trait BlockProducerPort: Send + Sync + DryRunExecution {} - #[async_trait::async_trait] pub trait ConsensusModulePort: Send + Sync { async fn manually_produce_blocks( @@ -209,3 +198,51 @@ pub trait DatabaseMessageProof: Send + Sync { pub trait P2pPort: Send + Sync { async fn all_peer_info(&self) -> anyhow::Result>; } + +pub mod worker { + use fuel_core_services::stream::BoxStream; + use fuel_core_storage::{ + tables::Receipts, + transactional::Transactional, + Error as StorageError, + Result as StorageResult, + StorageMutate, + }; + use fuel_core_types::{ + fuel_tx::{ + Address, + Bytes32, + }, + fuel_types::BlockHeight, + services::{ + block_importer::SharedImportResult, + txpool::TransactionStatus, + }, + }; + + pub trait OffChainDatabase: + Send + + Sync + + StorageMutate + + Transactional + { + fn record_tx_id_owner( + &mut self, + owner: &Address, + block_height: BlockHeight, + tx_idx: u16, + tx_id: &Bytes32, + ) -> StorageResult>; + + fn update_tx_status( + &mut self, + id: &Bytes32, + status: TransactionStatus, + ) -> StorageResult>; + } + + pub trait BlockImporter { + /// Returns a stream of imported block. + fn block_events(&self) -> BoxStream; + } +} diff --git a/crates/fuel-core/src/graphql_api/view_extension.rs b/crates/fuel-core/src/graphql_api/view_extension.rs new file mode 100644 index 0000000000..ca482fe987 --- /dev/null +++ b/crates/fuel-core/src/graphql_api/view_extension.rs @@ -0,0 +1,44 @@ +use crate::graphql_api::database::ReadDatabase; +use async_graphql::{ + extensions::{ + Extension, + ExtensionContext, + ExtensionFactory, + NextPrepareRequest, + }, + Request, + ServerResult, +}; +use std::sync::Arc; + +/// The extension that adds the `ReadView` to the request context. +/// It guarantees that the request works with the one view of the database, +/// and external database modification cannot affect the result. +pub(crate) struct ViewExtension; + +impl ViewExtension { + pub fn new() -> Self { + Self + } +} + +impl ExtensionFactory for ViewExtension { + fn create(&self) -> Arc { + Arc::new(ViewExtension::new()) + } +} + +#[async_trait::async_trait] +impl Extension for ViewExtension { + async fn prepare_request( + &self, + ctx: &ExtensionContext<'_>, + request: Request, + next: NextPrepareRequest<'_>, + ) -> ServerResult { + let database: &ReadDatabase = ctx.data_unchecked(); + let view = database.view(); + let request = request.data(view); + next.run(ctx, request).await + } +} diff --git a/crates/fuel-core/src/graphql_api/worker_service.rs b/crates/fuel-core/src/graphql_api/worker_service.rs new file mode 100644 index 0000000000..22f5471922 --- /dev/null +++ b/crates/fuel-core/src/graphql_api/worker_service.rs @@ -0,0 +1,284 @@ +use crate::fuel_core_graphql_api::ports; +use fuel_core_services::{ + stream::BoxStream, + EmptyShared, + RunnableService, + RunnableTask, + ServiceRunner, + StateWatcher, +}; +use fuel_core_storage::{ + tables::Receipts, + Result as StorageResult, + StorageAsMut, +}; +use fuel_core_types::{ + blockchain::block::Block, + fuel_tx::{ + field::{ + Inputs, + Outputs, + }, + input::coin::{ + CoinPredicate, + CoinSigned, + }, + Input, + Output, + Receipt, + Transaction, + TxId, + UniqueIdentifier, + }, + fuel_types::{ + BlockHeight, + Bytes32, + }, + services::{ + block_importer::{ + ImportResult, + SharedImportResult, + }, + executor::TransactionExecutionStatus, + txpool::from_executor_to_status, + }, +}; +use futures::{ + FutureExt, + StreamExt, +}; + +/// The off-chain GraphQL API worker task processes the imported blocks +/// and actualize the information used by the GraphQL service. +pub struct Task { + block_importer: BoxStream, + database: D, +} + +impl Task +where + D: ports::worker::OffChainDatabase, +{ + fn process_block(&mut self, result: SharedImportResult) -> anyhow::Result<()> { + // TODO: Implement the creation of indexes for the messages and coins. + // Implement table `BlockId -> BlockHeight` to get the block height by block id. + // https://github.com/FuelLabs/fuel-core/issues/1583 + let mut transaction = self.database.transaction(); + // save the status for every transaction using the finalized block id + self.persist_transaction_status(&result, transaction.as_mut())?; + + // save the associated owner for each transaction in the block + self.index_tx_owners_for_block( + &result.sealed_block.entity, + transaction.as_mut(), + )?; + transaction.commit()?; + + Ok(()) + } + + /// Associate all transactions within a block to their respective UTXO owners + fn index_tx_owners_for_block( + &self, + block: &Block, + block_st_transaction: &mut D, + ) -> anyhow::Result<()> { + for (tx_idx, tx) in block.transactions().iter().enumerate() { + let block_height = *block.header().height(); + let inputs; + let outputs; + let tx_idx = u16::try_from(tx_idx).map_err(|e| { + anyhow::anyhow!("The block has more than `u16::MAX` transactions, {}", e) + })?; + let tx_id = tx.cached_id().expect( + "The imported block should contains only transactions with cached id", + ); + match tx { + Transaction::Script(tx) => { + inputs = tx.inputs().as_slice(); + outputs = tx.outputs().as_slice(); + } + Transaction::Create(tx) => { + inputs = tx.inputs().as_slice(); + outputs = tx.outputs().as_slice(); + } + Transaction::Mint(_) => continue, + } + self.persist_owners_index( + block_height, + inputs, + outputs, + &tx_id, + tx_idx, + block_st_transaction, + )?; + } + Ok(()) + } + + /// Index the tx id by owner for all of the inputs and outputs + fn persist_owners_index( + &self, + block_height: BlockHeight, + inputs: &[Input], + outputs: &[Output], + tx_id: &Bytes32, + tx_idx: u16, + db: &mut D, + ) -> StorageResult<()> { + let mut owners = vec![]; + for input in inputs { + if let Input::CoinSigned(CoinSigned { owner, .. }) + | Input::CoinPredicate(CoinPredicate { owner, .. }) = input + { + owners.push(owner); + } + } + + for output in outputs { + match output { + Output::Coin { to, .. } + | Output::Change { to, .. } + | Output::Variable { to, .. } => { + owners.push(to); + } + Output::Contract(_) | Output::ContractCreated { .. } => {} + } + } + + // dedupe owners from inputs and outputs prior to indexing + owners.sort(); + owners.dedup(); + + for owner in owners { + db.record_tx_id_owner(owner, block_height, tx_idx, tx_id)?; + } + + Ok(()) + } + + fn persist_transaction_status( + &self, + import_result: &ImportResult, + db: &mut D, + ) -> StorageResult<()> { + for TransactionExecutionStatus { + id, + result, + receipts, + } in import_result.tx_status.iter() + { + let status = from_executor_to_status( + &import_result.sealed_block.entity, + result.clone(), + ); + + if db.update_tx_status(id, status)?.is_some() { + return Err(anyhow::anyhow!( + "Transaction status already exists for tx {}", + id + ) + .into()); + } + + self.persist_receipts(id, receipts, db)?; + } + Ok(()) + } + + fn persist_receipts( + &self, + tx_id: &TxId, + receipts: &[Receipt], + db: &mut D, + ) -> StorageResult<()> { + if db.storage::().insert(tx_id, receipts)?.is_some() { + return Err(anyhow::anyhow!("Receipts already exist for tx {}", tx_id).into()); + } + Ok(()) + } +} + +#[async_trait::async_trait] +impl RunnableService for Task +where + D: ports::worker::OffChainDatabase, +{ + const NAME: &'static str = "GraphQL_Off_Chain_Worker"; + type SharedData = EmptyShared; + type Task = Self; + type TaskParams = (); + + fn shared_data(&self) -> Self::SharedData { + EmptyShared + } + + async fn into_task( + self, + _: &StateWatcher, + _: Self::TaskParams, + ) -> anyhow::Result { + // TODO: It is possible that the node was shut down before we processed all imported blocks. + // It could lead to some missed blocks and the database's inconsistent state. + // Because the result of block execution is not stored on the chain, it is impossible + // to actualize the database without executing the block at the previous state + // of the blockchain. When `AtomicView::view_at` is implemented, we can + // process all missed blocks and actualize the database here. + // https://github.com/FuelLabs/fuel-core/issues/1584 + Ok(self) + } +} + +#[async_trait::async_trait] +impl RunnableTask for Task +where + D: ports::worker::OffChainDatabase, +{ + async fn run(&mut self, watcher: &mut StateWatcher) -> anyhow::Result { + let should_continue; + tokio::select! { + biased; + + _ = watcher.while_started() => { + should_continue = false; + } + + result = self.block_importer.next() => { + if let Some(block) = result { + self.process_block(block)?; + + should_continue = true + } else { + should_continue = false + } + } + } + Ok(should_continue) + } + + async fn shutdown(mut self) -> anyhow::Result<()> { + // Process all remaining blocks before shutdown to not lose any data. + loop { + let result = self.block_importer.next().now_or_never(); + + if let Some(Some(block)) = result { + self.process_block(block)?; + } else { + break; + } + } + Ok(()) + } +} + +pub fn new_service(block_importer: I, database: D) -> ServiceRunner> +where + I: ports::worker::BlockImporter, + D: ports::worker::OffChainDatabase, +{ + let block_importer = block_importer.block_events(); + ServiceRunner::new(Task { + block_importer, + database, + }) +} diff --git a/crates/fuel-core/src/query/balance.rs b/crates/fuel-core/src/query/balance.rs index c597742225..ecbc47620b 100644 --- a/crates/fuel-core/src/query/balance.rs +++ b/crates/fuel-core/src/query/balance.rs @@ -1,4 +1,4 @@ -use crate::fuel_core_graphql_api::service::Database; +use crate::fuel_core_graphql_api::database::ReadView; use asset_query::{ AssetQuery, AssetSpendTarget, @@ -43,7 +43,7 @@ pub trait BalanceQueryData: Send + Sync { ) -> BoxedIter>; } -impl BalanceQueryData for Database { +impl BalanceQueryData for ReadView { fn balance( &self, owner: Address, diff --git a/crates/fuel-core/src/query/balance/asset_query.rs b/crates/fuel-core/src/query/balance/asset_query.rs index e93c9d0f30..ee0266b124 100644 --- a/crates/fuel-core/src/query/balance/asset_query.rs +++ b/crates/fuel-core/src/query/balance/asset_query.rs @@ -1,5 +1,5 @@ use crate::{ - graphql_api::service::Database, + graphql_api::database::ReadView, query::{ CoinQueryData, MessageQueryData, @@ -58,7 +58,7 @@ pub struct AssetsQuery<'a> { pub owner: &'a Address, pub assets: Option>, pub exclude: Option<&'a Exclude>, - pub database: &'a Database, + pub database: &'a ReadView, pub base_asset_id: &'a AssetId, } @@ -67,7 +67,7 @@ impl<'a> AssetsQuery<'a> { owner: &'a Address, assets: Option>, exclude: Option<&'a Exclude>, - database: &'a Database, + database: &'a ReadView, base_asset_id: &'a AssetId, ) -> Self { Self { @@ -171,7 +171,7 @@ pub struct AssetQuery<'a> { pub owner: &'a Address, pub asset: &'a AssetSpendTarget, pub exclude: Option<&'a Exclude>, - pub database: &'a Database, + pub database: &'a ReadView, query: AssetsQuery<'a>, } @@ -181,7 +181,7 @@ impl<'a> AssetQuery<'a> { asset: &'a AssetSpendTarget, base_asset_id: &'a AssetId, exclude: Option<&'a Exclude>, - database: &'a Database, + database: &'a ReadView, ) -> Self { let mut allowed = HashSet::new(); allowed.insert(&asset.id); diff --git a/crates/fuel-core/src/query/block.rs b/crates/fuel-core/src/query/block.rs index 66cba1f941..8aeed56f76 100644 --- a/crates/fuel-core/src/query/block.rs +++ b/crates/fuel-core/src/query/block.rs @@ -1,4 +1,4 @@ -use crate::graphql_api::ports::DatabasePort; +use crate::fuel_core_graphql_api::ports::OnChainDatabase; use fuel_core_storage::{ iter::{ BoxedIter, @@ -26,7 +26,7 @@ pub trait SimpleBlockData: Send + Sync { fn block(&self, id: &BlockId) -> StorageResult; } -impl SimpleBlockData for D { +impl SimpleBlockData for D { fn block(&self, id: &BlockId) -> StorageResult { let block = self .storage::() @@ -56,7 +56,7 @@ pub trait BlockQueryData: Send + Sync + SimpleBlockData { fn consensus(&self, id: &BlockId) -> StorageResult; } -impl BlockQueryData for D { +impl BlockQueryData for D { fn block_id(&self, height: &BlockHeight) -> StorageResult { self.block_id(height) } diff --git a/crates/fuel-core/src/query/chain.rs b/crates/fuel-core/src/query/chain.rs index 88ce035ba1..b9408ddfcd 100644 --- a/crates/fuel-core/src/query/chain.rs +++ b/crates/fuel-core/src/query/chain.rs @@ -1,4 +1,4 @@ -use crate::graphql_api::ports::DatabasePort; +use crate::fuel_core_graphql_api::ports::OnChainDatabase; use fuel_core_storage::Result as StorageResult; use fuel_core_types::blockchain::primitives::DaBlockHeight; @@ -8,7 +8,7 @@ pub trait ChainQueryData: Send + Sync { fn da_height(&self) -> StorageResult; } -impl ChainQueryData for D { +impl ChainQueryData for D { fn name(&self) -> StorageResult { self.chain_name() } diff --git a/crates/fuel-core/src/query/coin.rs b/crates/fuel-core/src/query/coin.rs index d31b60690e..171a88168b 100644 --- a/crates/fuel-core/src/query/coin.rs +++ b/crates/fuel-core/src/query/coin.rs @@ -1,4 +1,7 @@ -use crate::graphql_api::ports::DatabasePort; +use crate::fuel_core_graphql_api::ports::{ + OffChainDatabase, + OnChainDatabase, +}; use fuel_core_storage::{ iter::{ BoxedIter, @@ -34,7 +37,7 @@ pub trait CoinQueryData: Send + Sync { ) -> BoxedIter>; } -impl CoinQueryData for D { +impl CoinQueryData for D { fn coin(&self, utxo_id: UtxoId) -> StorageResult { let coin = self .storage::() diff --git a/crates/fuel-core/src/query/contract.rs b/crates/fuel-core/src/query/contract.rs index d05d90999b..d4bbb8b5d6 100644 --- a/crates/fuel-core/src/query/contract.rs +++ b/crates/fuel-core/src/query/contract.rs @@ -1,4 +1,4 @@ -use crate::graphql_api::ports::DatabasePort; +use crate::fuel_core_graphql_api::ports::OnChainDatabase; use fuel_core_storage::{ iter::{ BoxedIter, @@ -43,7 +43,7 @@ pub trait ContractQueryData: Send + Sync { ) -> BoxedIter>; } -impl ContractQueryData for D { +impl ContractQueryData for D { fn contract_id(&self, id: ContractId) -> StorageResult { let contract_exists = self.storage::().contains_key(&id)?; if contract_exists { diff --git a/crates/fuel-core/src/query/message.rs b/crates/fuel-core/src/query/message.rs index b1ce17e4bb..334c24dc0d 100644 --- a/crates/fuel-core/src/query/message.rs +++ b/crates/fuel-core/src/query/message.rs @@ -3,7 +3,8 @@ use crate::{ ports::{ DatabaseMessageProof, DatabaseMessages, - DatabasePort, + OffChainDatabase, + OnChainDatabase, }, IntoApiResult, }, @@ -80,7 +81,7 @@ pub trait MessageQueryData: Send + Sync { ) -> BoxedIter>; } -impl MessageQueryData for D { +impl MessageQueryData for D { fn message(&self, id: &Nonce) -> StorageResult { self.storage::() .get(id)? @@ -128,7 +129,10 @@ pub trait MessageProofData: ) -> StorageResult; } -impl MessageProofData for D { +impl MessageProofData for D +where + D: OnChainDatabase + OffChainDatabase + ?Sized, +{ fn transaction_status( &self, transaction_id: &TxId, diff --git a/crates/fuel-core/src/query/tx.rs b/crates/fuel-core/src/query/tx.rs index 74d325e33a..ebc2531f27 100644 --- a/crates/fuel-core/src/query/tx.rs +++ b/crates/fuel-core/src/query/tx.rs @@ -1,4 +1,7 @@ -use crate::graphql_api::ports::DatabasePort; +use crate::fuel_core_graphql_api::ports::{ + OffChainDatabase, + OnChainDatabase, +}; use fuel_core_storage::{ iter::{ BoxedIter, @@ -32,7 +35,10 @@ pub trait SimpleTransactionData: Send + Sync { fn transaction(&self, transaction_id: &TxId) -> StorageResult; } -impl SimpleTransactionData for D { +impl SimpleTransactionData for D +where + D: OnChainDatabase + OffChainDatabase + ?Sized, +{ fn transaction(&self, tx_id: &TxId) -> StorageResult { self.storage::() .get(tx_id) @@ -57,7 +63,10 @@ pub trait TransactionQueryData: Send + Sync + SimpleTransactionData { ) -> BoxedIter>; } -impl TransactionQueryData for D { +impl TransactionQueryData for D +where + D: OnChainDatabase + OffChainDatabase + ?Sized, +{ fn status(&self, tx_id: &TxId) -> StorageResult { self.tx_status(tx_id) } diff --git a/crates/fuel-core/src/schema/balance.rs b/crates/fuel-core/src/schema/balance.rs index 9188696a89..da5a72ada5 100644 --- a/crates/fuel-core/src/schema/balance.rs +++ b/crates/fuel-core/src/schema/balance.rs @@ -1,6 +1,6 @@ use crate::{ fuel_core_graphql_api::{ - service::Database, + database::ReadView, Config, }, query::BalanceQueryData, @@ -56,12 +56,12 @@ impl BalanceQuery { #[graphql(desc = "address of the owner")] owner: Address, #[graphql(desc = "asset_id of the coin")] asset_id: AssetId, ) -> async_graphql::Result { - let data: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let base_asset_id = *ctx .data_unchecked::() .consensus_parameters .base_asset_id(); - let balance = data.balance(owner.0, asset_id.0, base_asset_id)?.into(); + let balance = query.balance(owner.0, asset_id.0, base_asset_id)?.into(); Ok(balance) } @@ -82,7 +82,7 @@ impl BalanceQuery { if before.is_some() || after.is_some() { return Err(anyhow!("pagination is not yet supported").into()) } - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); crate::schema::query_pagination(after, before, first, last, |_, direction| { let owner = filter.owner.into(); let base_asset_id = *ctx diff --git a/crates/fuel-core/src/schema/block.rs b/crates/fuel-core/src/schema/block.rs index 234513310b..13a2b98644 100644 --- a/crates/fuel-core/src/schema/block.rs +++ b/crates/fuel-core/src/schema/block.rs @@ -4,13 +4,11 @@ use super::scalars::{ }; use crate::{ fuel_core_graphql_api::{ - service::{ - ConsensusModule, - Database, - }, + api_service::ConsensusModule, + database::ReadView, Config as GraphQLConfig, + IntoApiResult, }, - graphql_api::IntoApiResult, query::{ BlockQueryData, SimpleBlockData, @@ -122,7 +120,7 @@ impl Block { &self, ctx: &Context<'_>, ) -> async_graphql::Result> { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); self.0 .transactions() .iter() @@ -207,7 +205,7 @@ impl BlockQuery { #[graphql(desc = "ID of the block")] id: Option, #[graphql(desc = "Height of the block")] height: Option, ) -> async_graphql::Result> { - let data: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let id = match (id, height) { (Some(_), Some(_)) => { return Err(async_graphql::Error::new( @@ -217,14 +215,14 @@ impl BlockQuery { (Some(id), None) => Ok(id.0.into()), (None, Some(height)) => { let height: u32 = height.into(); - data.block_id(&height.into()) + query.block_id(&height.into()) } (None, None) => { return Err(async_graphql::Error::new("Missing either id or height")) } }; - id.and_then(|id| data.block(&id)).into_api_result() + id.and_then(|id| query.block(&id)).into_api_result() } async fn blocks( @@ -235,9 +233,9 @@ impl BlockQuery { last: Option, before: Option, ) -> async_graphql::Result> { - let db: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); crate::schema::query_pagination(after, before, first, last, |start, direction| { - Ok(blocks_query(db, start.map(Into::into), direction)) + Ok(blocks_query(query, start.map(Into::into), direction)) }) .await } @@ -268,16 +266,16 @@ impl HeaderQuery { last: Option, before: Option, ) -> async_graphql::Result> { - let db: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); crate::schema::query_pagination(after, before, first, last, |start, direction| { - Ok(blocks_query(db, start.map(Into::into), direction)) + Ok(blocks_query(query, start.map(Into::into), direction)) }) .await } } fn blocks_query( - query: &Database, + query: &ReadView, start: Option, direction: IterDirection, ) -> BoxedIter> @@ -307,7 +305,7 @@ impl BlockMutation { start_timestamp: Option, blocks_to_produce: U32, ) -> async_graphql::Result { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let consensus_module = ctx.data_unchecked::(); let config = ctx.data_unchecked::().clone(); diff --git a/crates/fuel-core/src/schema/chain.rs b/crates/fuel-core/src/schema/chain.rs index e1df56c7eb..7c8bb918aa 100644 --- a/crates/fuel-core/src/schema/chain.rs +++ b/crates/fuel-core/src/schema/chain.rs @@ -1,6 +1,6 @@ use crate::{ fuel_core_graphql_api::{ - service::Database, + database::ReadView, Config as GraphQLConfig, }, query::{ @@ -683,19 +683,19 @@ impl HeavyOperation { #[Object] impl ChainInfo { async fn name(&self, ctx: &Context<'_>) -> async_graphql::Result { - let data: &Database = ctx.data_unchecked(); - Ok(data.name()?) + let query: &ReadView = ctx.data_unchecked(); + Ok(query.name()?) } async fn latest_block(&self, ctx: &Context<'_>) -> async_graphql::Result { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let latest_block = query.latest_block()?.into(); Ok(latest_block) } async fn da_height(&self, ctx: &Context<'_>) -> U64 { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let height = query .da_height() diff --git a/crates/fuel-core/src/schema/coins.rs b/crates/fuel-core/src/schema/coins.rs index 60a75add8f..476058016b 100644 --- a/crates/fuel-core/src/schema/coins.rs +++ b/crates/fuel-core/src/schema/coins.rs @@ -4,10 +4,10 @@ use crate::{ SpendQuery, }, fuel_core_graphql_api::{ + database::ReadView, Config as GraphQLConfig, IntoApiResult, }, - graphql_api::service::Database, query::{ asset_query::AssetSpendTarget, CoinQueryData, @@ -152,8 +152,8 @@ impl CoinQuery { ctx: &Context<'_>, #[graphql(desc = "The ID of the coin")] utxo_id: UtxoId, ) -> async_graphql::Result> { - let data: &Database = ctx.data_unchecked(); - data.coin(utxo_id.0).into_api_result() + let query: &ReadView = ctx.data_unchecked(); + query.coin(utxo_id.0).into_api_result() } /// Gets all unspent coins of some `owner` maybe filtered with by `asset_id` per page. @@ -166,7 +166,7 @@ impl CoinQuery { last: Option, before: Option, ) -> async_graphql::Result> { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); crate::schema::query_pagination(after, before, first, last, |start, direction| { let owner: fuel_tx::Address = filter.owner.into(); let coins = query @@ -240,9 +240,9 @@ impl CoinQuery { let spend_query = SpendQuery::new(owner, &query_per_asset, excluded_ids, *base_asset_id)?; - let db = ctx.data_unchecked::(); + let query: &ReadView = ctx.data_unchecked(); - let coins = random_improve(db, &spend_query)? + let coins = random_improve(query, &spend_query)? .into_iter() .map(|coins| { coins diff --git a/crates/fuel-core/src/schema/contract.rs b/crates/fuel-core/src/schema/contract.rs index 2409041925..16a26b8770 100644 --- a/crates/fuel-core/src/schema/contract.rs +++ b/crates/fuel-core/src/schema/contract.rs @@ -1,6 +1,6 @@ use crate::{ fuel_core_graphql_api::{ - service::Database, + database::ReadView, IntoApiResult, }, query::ContractQueryData, @@ -41,16 +41,16 @@ impl Contract { } async fn bytecode(&self, ctx: &Context<'_>) -> async_graphql::Result { - let context: &Database = ctx.data_unchecked(); - context + let query: &ReadView = ctx.data_unchecked(); + query .contract_bytecode(self.0) .map(HexString) .map_err(Into::into) } async fn salt(&self, ctx: &Context<'_>) -> async_graphql::Result { - let context: &Database = ctx.data_unchecked(); - context + let query: &ReadView = ctx.data_unchecked(); + query .contract_salt(self.0) .map(Into::into) .map_err(Into::into) @@ -67,8 +67,8 @@ impl ContractQuery { ctx: &Context<'_>, #[graphql(desc = "ID of the Contract")] id: ContractId, ) -> async_graphql::Result> { - let data: &Database = ctx.data_unchecked(); - data.contract_id(id.0).into_api_result() + let query: &ReadView = ctx.data_unchecked(); + query.contract_id(id.0).into_api_result() } } @@ -108,8 +108,8 @@ impl ContractBalanceQuery { ) -> async_graphql::Result { let contract_id = contract.into(); let asset_id = asset.into(); - let context: &Database = ctx.data_unchecked(); - context + let query: &ReadView = ctx.data_unchecked(); + query .contract_balance(contract_id, asset_id) .into_api_result() .map(|result| { @@ -135,7 +135,7 @@ impl ContractBalanceQuery { ) -> async_graphql::Result< Connection, > { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); crate::schema::query_pagination(after, before, first, last, |start, direction| { let balances = query diff --git a/crates/fuel-core/src/schema/message.rs b/crates/fuel-core/src/schema/message.rs index 75707190e2..dfc1760686 100644 --- a/crates/fuel-core/src/schema/message.rs +++ b/crates/fuel-core/src/schema/message.rs @@ -1,5 +1,3 @@ -use std::ops::Deref; - use super::{ block::Header, scalars::{ @@ -12,7 +10,10 @@ use super::{ }, }; use crate::{ - fuel_core_graphql_api::service::Database, + fuel_core_graphql_api::{ + database::ReadView, + ports::DatabaseBlocks, + }, query::MessageQueryData, schema::scalars::{ BlockId, @@ -75,7 +76,7 @@ impl MessageQuery { before: Option, ) -> async_graphql::Result> { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); crate::schema::query_pagination( after, before, @@ -114,12 +115,12 @@ impl MessageQuery { commit_block_id: Option, commit_block_height: Option, ) -> async_graphql::Result> { - let data: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let block_id = match (commit_block_id, commit_block_height) { (Some(commit_block_id), None) => commit_block_id.0.into(), (None, Some(commit_block_height)) => { let block_height = commit_block_height.0.into(); - data.block_id(&block_height)? + query.block_id(&block_height)? } _ => Err(anyhow::anyhow!( "Either `commit_block_id` or `commit_block_height` must be provided exclusively" @@ -127,7 +128,7 @@ impl MessageQuery { }; Ok(crate::query::message_proof( - data.deref(), + query, transaction_id.into(), nonce.into(), block_id, @@ -140,8 +141,8 @@ impl MessageQuery { ctx: &Context<'_>, nonce: Nonce, ) -> async_graphql::Result { - let data: &Database = ctx.data_unchecked(); - let status = crate::query::message_status(data.deref(), nonce.into())?; + let query: &ReadView = ctx.data_unchecked(); + let status = crate::query::message_status(query, nonce.into())?; Ok(status.into()) } } diff --git a/crates/fuel-core/src/schema/node_info.rs b/crates/fuel-core/src/schema/node_info.rs index 97ef85167c..647b0c4215 100644 --- a/crates/fuel-core/src/schema/node_info.rs +++ b/crates/fuel-core/src/schema/node_info.rs @@ -47,7 +47,7 @@ impl NodeInfo { async fn peers(&self, _ctx: &Context<'_>) -> async_graphql::Result> { #[cfg(feature = "p2p")] { - let p2p: &crate::fuel_core_graphql_api::service::P2pService = + let p2p: &crate::fuel_core_graphql_api::api_service::P2pService = _ctx.data_unchecked(); let peer_info = p2p.all_peer_info().await?; let peers = peer_info.into_iter().map(PeerInfo).collect(); diff --git a/crates/fuel-core/src/schema/tx.rs b/crates/fuel-core/src/schema/tx.rs index 0d772b8685..19a8599b10 100644 --- a/crates/fuel-core/src/schema/tx.rs +++ b/crates/fuel-core/src/schema/tx.rs @@ -1,25 +1,29 @@ use crate::{ fuel_core_graphql_api::{ - service::{ + api_service::{ BlockProducer, - Database, TxPool, }, + database::ReadView, + ports::OffChainDatabase, + Config, IntoApiResult, }, - graphql_api::Config, query::{ transaction_status_change, BlockQueryData, SimpleTransactionData, TransactionQueryData, }, - schema::scalars::{ - Address, - HexString, - SortedTxCursor, - TransactionId, - TxPointer, + schema::{ + scalars::{ + Address, + HexString, + SortedTxCursor, + TransactionId, + TxPointer, + }, + tx::types::TransactionStatus, }, }; use async_graphql::{ @@ -48,7 +52,10 @@ use fuel_core_types::{ }, fuel_types, fuel_types::canonical::Deserialize, - fuel_vm::checked_transaction::EstimatePredicates, + fuel_vm::checked_transaction::{ + CheckPredicateParams, + EstimatePredicates, + }, services::txpool, }; use futures::{ @@ -63,9 +70,6 @@ use std::{ use tokio_stream::StreamExt; use types::Transaction; -use self::types::TransactionStatus; -use fuel_core_types::fuel_vm::checked_transaction::CheckPredicateParams; - pub mod input; pub mod output; pub mod receipt; @@ -81,7 +85,7 @@ impl TxQuery { ctx: &Context<'_>, #[graphql(desc = "The ID of the transaction")] id: TransactionId, ) -> async_graphql::Result> { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let id = id.0; let txpool = ctx.data_unchecked::(); @@ -105,8 +109,7 @@ impl TxQuery { ) -> async_graphql::Result< Connection, > { - let db_query: &Database = ctx.data_unchecked(); - let tx_query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); crate::schema::query_pagination( after, before, @@ -115,7 +118,7 @@ impl TxQuery { |start: &Option, direction| { let start = *start; let block_id = start.map(|sorted| sorted.block_height); - let all_block_ids = db_query.compressed_blocks(block_id, direction); + let all_block_ids = query.compressed_blocks(block_id, direction); let all_txs = all_block_ids .map(move |block| { @@ -145,7 +148,7 @@ impl TxQuery { }); let all_txs = all_txs.map(|result: StorageResult| { result.and_then(|sorted| { - let tx = tx_query.transaction(&sorted.tx_id.0)?; + let tx = query.transaction(&sorted.tx_id.0)?; Ok((sorted, Transaction::from_tx(sorted.tx_id.0, tx))) }) @@ -167,7 +170,7 @@ impl TxQuery { before: Option, ) -> async_graphql::Result> { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let config = ctx.data_unchecked::(); let owner = fuel_types::Address::from(owner); @@ -298,11 +301,11 @@ impl TxStatusSubscription { ) -> anyhow::Result> + 'a> { let txpool = ctx.data_unchecked::(); - let db = ctx.data_unchecked::(); + let query: &ReadView = ctx.data_unchecked(); let rx = txpool.tx_update_subscribe(id.into())?; Ok(transaction_status_change( - move |id| match db.tx_status(&id) { + move |id| match query.tx_status(&id) { Ok(status) => Ok(Some(status)), Err(StorageError::NotFound(_, _)) => Ok(txpool .submission_time(id) diff --git a/crates/fuel-core/src/schema/tx/types.rs b/crates/fuel-core/src/schema/tx/types.rs index 41b06f5cb3..fcd0e110ff 100644 --- a/crates/fuel-core/src/schema/tx/types.rs +++ b/crates/fuel-core/src/schema/tx/types.rs @@ -5,10 +5,8 @@ use super::{ }; use crate::{ fuel_core_graphql_api::{ - service::{ - Database, - TxPool, - }, + api_service::TxPool, + database::ReadView, Config, IntoApiResult, }, @@ -160,7 +158,7 @@ impl SuccessStatus { } async fn block(&self, ctx: &Context<'_>) -> async_graphql::Result { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let block = query.block(&self.block_id)?; Ok(block.into()) } @@ -174,8 +172,8 @@ impl SuccessStatus { } async fn receipts(&self, ctx: &Context<'_>) -> async_graphql::Result> { - let db = ctx.data_unchecked::(); - let receipts = db + let query: &ReadView = ctx.data_unchecked(); + let receipts = query .receipts(&self.tx_id) .unwrap_or_default() .into_iter() @@ -201,7 +199,7 @@ impl FailureStatus { } async fn block(&self, ctx: &Context<'_>) -> async_graphql::Result { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let block = query.block(&self.block_id)?; Ok(block.into()) } @@ -219,8 +217,8 @@ impl FailureStatus { } async fn receipts(&self, ctx: &Context<'_>) -> async_graphql::Result> { - let db = ctx.data_unchecked::(); - let receipts = db + let query: &ReadView = ctx.data_unchecked(); + let receipts = query .receipts(&self.tx_id) .unwrap_or_default() .into_iter() @@ -526,7 +524,7 @@ impl Transaction { ctx: &Context<'_>, ) -> async_graphql::Result> { let id = self.1; - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let txpool = ctx.data_unchecked::(); get_tx_status(id, query, txpool).map_err(Into::into) } @@ -535,7 +533,7 @@ impl Transaction { &self, ctx: &Context<'_>, ) -> async_graphql::Result>> { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let receipts = query .receipts(&self.1) .into_api_result::, async_graphql::Error>()?; @@ -622,7 +620,7 @@ impl Transaction { #[tracing::instrument(level = "debug", skip(query, txpool), ret, err)] pub(crate) fn get_tx_status( id: fuel_core_types::fuel_types::Bytes32, - query: &Database, + query: &ReadView, txpool: &TxPool, ) -> Result, StorageError> { match query diff --git a/crates/fuel-core/src/service.rs b/crates/fuel-core/src/service.rs index 3d5240cab2..7fee7ddbe1 100644 --- a/crates/fuel-core/src/service.rs +++ b/crates/fuel-core/src/service.rs @@ -44,7 +44,7 @@ pub struct SharedState { /// The Relayer shared state. pub relayer: Option>, /// The GraphQL shared state. - pub graph_ql: crate::fuel_core_graphql_api::service::SharedState, + pub graph_ql: crate::fuel_core_graphql_api::api_service::SharedState, /// The underlying database. pub database: Database, /// Subscribe to new block production. @@ -122,7 +122,7 @@ impl FuelService { } #[cfg(feature = "relayer")] - /// Wait for the [`Relayer`] to be in sync with + /// Wait for the Relayer to be in sync with /// the data availability layer. /// /// Yields until the relayer reaches a point where it @@ -187,7 +187,7 @@ pub struct Task { impl Task { /// Private inner method for initializing the fuel service task - pub fn new(database: Database, config: Config) -> anyhow::Result { + pub fn new(mut database: Database, config: Config) -> anyhow::Result { // initialize state tracing::info!("Initializing database"); database.init(&config.chain_conf)?; @@ -305,9 +305,9 @@ mod tests { i += 1; } - // current services: graphql, txpool, PoA + // current services: graphql, graphql worker, txpool, PoA #[allow(unused_mut)] - let mut expected_services = 3; + let mut expected_services = 4; // Relayer service is disabled with `Config::local_node`. // #[cfg(feature = "relayer")] diff --git a/crates/fuel-core/src/service/adapters/block_importer.rs b/crates/fuel-core/src/service/adapters/block_importer.rs index 89627483c8..7fdfb2c303 100644 --- a/crates/fuel-core/src/service/adapters/block_importer.rs +++ b/crates/fuel-core/src/service/adapters/block_importer.rs @@ -70,11 +70,7 @@ impl BlockImporterAdapter { &self, sealed_block: SealedBlock, ) -> anyhow::Result<()> { - tokio::task::spawn_blocking({ - let importer = self.block_importer.clone(); - move || importer.execute_and_commit(sealed_block) - }) - .await??; + self.block_importer.execute_and_commit(sealed_block).await?; Ok(()) } } diff --git a/crates/fuel-core/src/service/adapters/consensus_module/poa.rs b/crates/fuel-core/src/service/adapters/consensus_module/poa.rs index ac446c7167..9e57c2cf0e 100644 --- a/crates/fuel-core/src/service/adapters/consensus_module/poa.rs +++ b/crates/fuel-core/src/service/adapters/consensus_module/poa.rs @@ -1,5 +1,3 @@ -use std::ops::Deref; - use crate::{ database::Database, fuel_core_graphql_api::ports::ConsensusModulePort, @@ -124,15 +122,17 @@ impl fuel_core_poa::ports::BlockProducer for BlockProducerAdapter { } } +#[async_trait::async_trait] impl BlockImporter for BlockImporterAdapter { type Database = Database; - fn commit_result( + async fn commit_result( &self, result: UncommittedImporterResult>, ) -> anyhow::Result<()> { self.block_importer .commit_result(result) + .await .map_err(Into::into) } @@ -140,7 +140,7 @@ impl BlockImporter for BlockImporterAdapter { Box::pin( BroadcastStream::new(self.block_importer.subscribe()) .filter_map(|result| result.ok()) - .map(|r| r.deref().into()), + .map(BlockImportInfo::from), ) } } diff --git a/crates/fuel-core/src/service/adapters/executor.rs b/crates/fuel-core/src/service/adapters/executor.rs index bb8e46042d..dbeece6c73 100644 --- a/crates/fuel-core/src/service/adapters/executor.rs +++ b/crates/fuel-core/src/service/adapters/executor.rs @@ -16,26 +16,19 @@ use fuel_core_executor::{ use fuel_core_storage::{ transactional::StorageTransaction, Error as StorageError, - Result as StorageResult, }; use fuel_core_types::{ blockchain::primitives::DaBlockHeight, entities::message::Message, fuel_tx, fuel_tx::Receipt, - fuel_types::{ - Address, - BlockHeight, - Bytes32, - Nonce, - }, + fuel_types::Nonce, services::{ block_producer::Components, executor::{ Result as ExecutorResult, UncommittedResult, }, - txpool::TransactionStatus, }, }; @@ -84,36 +77,6 @@ impl fuel_core_executor::refs::ContractStorageTrait for Database { type InnerError = StorageError; } -impl fuel_core_executor::ports::MessageIsSpent for Database { - type Error = StorageError; - - fn message_is_spent(&self, nonce: &Nonce) -> StorageResult { - self.message_is_spent(nonce) - } -} - -impl fuel_core_executor::ports::TxIdOwnerRecorder for Database { - type Error = StorageError; - - fn record_tx_id_owner( - &self, - owner: &Address, - block_height: BlockHeight, - tx_idx: u16, - tx_id: &Bytes32, - ) -> Result, Self::Error> { - self.record_tx_id_owner(owner, block_height, tx_idx, tx_id) - } - - fn update_tx_status( - &self, - id: &Bytes32, - status: TransactionStatus, - ) -> Result, Self::Error> { - self.update_tx_status(id, status) - } -} - impl fuel_core_executor::ports::ExecutorDatabaseTrait for Database {} impl fuel_core_executor::ports::RelayerPort for MaybeRelayerAdapter { diff --git a/crates/fuel-core/src/service/adapters/graphql_api.rs b/crates/fuel-core/src/service/adapters/graphql_api.rs index 4faea60040..e83efc44e0 100644 --- a/crates/fuel-core/src/service/adapters/graphql_api.rs +++ b/crates/fuel-core/src/service/adapters/graphql_api.rs @@ -1,20 +1,13 @@ -use super::BlockProducerAdapter; +use super::{ + BlockImporterAdapter, + BlockProducerAdapter, +}; use crate::{ - database::{ - transactions::OwnedTransactionIndexCursor, - Database, - }, + database::Database, fuel_core_graphql_api::ports::{ + worker, BlockProducerPort, - DatabaseBlocks, - DatabaseChain, - DatabaseCoins, - DatabaseContracts, DatabaseMessageProof, - DatabaseMessages, - DatabasePort, - DatabaseTransactions, - DryRunExecution, P2pPort, TxPoolPort, }, @@ -25,51 +18,22 @@ use crate::{ }; use async_trait::async_trait; use fuel_core_services::stream::BoxStream; -use fuel_core_storage::{ - iter::{ - BoxedIter, - IntoBoxedIter, - IterDirection, - }, - not_found, - Error as StorageError, - Result as StorageResult, -}; +use fuel_core_storage::Result as StorageResult; use fuel_core_txpool::{ service::TxStatusMessage, - types::{ - ContractId, - TxId, - }, + types::TxId, }; use fuel_core_types::{ - blockchain::primitives::{ - BlockId, - DaBlockHeight, - }, - entities::message::{ - MerkleProof, - Message, - }, + entities::message::MerkleProof, fuel_tx::{ - Address, - AssetId, Receipt as TxReceipt, Transaction, - TxPointer, - UtxoId, - }, - fuel_types::{ - BlockHeight, - Nonce, }, + fuel_types::BlockHeight, services::{ - graphql_api::ContractBalance, + block_importer::SharedImportResult, p2p::PeerInfo, - txpool::{ - InsertionResult, - TransactionStatus, - }, + txpool::InsertionResult, }, tai64::Tai64, }; @@ -78,140 +42,8 @@ use std::{ sync::Arc, }; -impl DatabaseBlocks for Database { - fn block_id(&self, height: &BlockHeight) -> StorageResult { - self.get_block_id(height) - .and_then(|height| height.ok_or(not_found!("BlockId"))) - } - - fn blocks_ids( - &self, - start: Option, - direction: IterDirection, - ) -> BoxedIter<'_, StorageResult<(BlockHeight, BlockId)>> { - self.all_block_ids(start, direction) - .map(|result| result.map_err(StorageError::from)) - .into_boxed() - } - - fn ids_of_latest_block(&self) -> StorageResult<(BlockHeight, BlockId)> { - self.ids_of_latest_block() - .transpose() - .ok_or(not_found!("BlockId"))? - } -} - -impl DatabaseTransactions for Database { - fn tx_status(&self, tx_id: &TxId) -> StorageResult { - self.get_tx_status(tx_id) - .transpose() - .ok_or(not_found!("TransactionId"))? - } - - fn owned_transactions_ids( - &self, - owner: Address, - start: Option, - direction: IterDirection, - ) -> BoxedIter> { - let start = start.map(|tx_pointer| OwnedTransactionIndexCursor { - block_height: tx_pointer.block_height(), - tx_idx: tx_pointer.tx_index(), - }); - self.owned_transactions(owner, start, Some(direction)) - .map(|result| result.map_err(StorageError::from)) - .into_boxed() - } -} - -impl DatabaseMessages for Database { - fn owned_message_ids( - &self, - owner: &Address, - start_message_id: Option, - direction: IterDirection, - ) -> BoxedIter<'_, StorageResult> { - self.owned_message_ids(owner, start_message_id, Some(direction)) - .map(|result| result.map_err(StorageError::from)) - .into_boxed() - } - - fn all_messages( - &self, - start_message_id: Option, - direction: IterDirection, - ) -> BoxedIter<'_, StorageResult> { - self.all_messages(start_message_id, Some(direction)) - .map(|result| result.map_err(StorageError::from)) - .into_boxed() - } - - fn message_is_spent(&self, nonce: &Nonce) -> StorageResult { - self.message_is_spent(nonce) - } - - fn message_exists(&self, nonce: &Nonce) -> StorageResult { - self.message_exists(nonce) - } -} - -impl DatabaseCoins for Database { - fn owned_coins_ids( - &self, - owner: &Address, - start_coin: Option, - direction: IterDirection, - ) -> BoxedIter<'_, StorageResult> { - self.owned_coins_ids(owner, start_coin, Some(direction)) - .map(|res| res.map_err(StorageError::from)) - .into_boxed() - } -} - -impl DatabaseContracts for Database { - fn contract_balances( - &self, - contract: ContractId, - start_asset: Option, - direction: IterDirection, - ) -> BoxedIter> { - self.contract_balances(contract, start_asset, Some(direction)) - .map(move |result| { - result - .map_err(StorageError::from) - .map(|(asset_id, amount)| ContractBalance { - owner: contract, - amount, - asset_id, - }) - }) - .into_boxed() - } -} - -impl DatabaseChain for Database { - fn chain_name(&self) -> StorageResult { - pub const DEFAULT_NAME: &str = "Fuel.testnet"; - - Ok(self - .get_chain_name()? - .unwrap_or_else(|| DEFAULT_NAME.to_string())) - } - - fn da_height(&self) -> StorageResult { - #[cfg(feature = "relayer")] - { - use fuel_core_relayer::ports::RelayerDb; - self.get_finalized_da_height() - } - #[cfg(not(feature = "relayer"))] - { - Ok(0u64.into()) - } - } -} - -impl DatabasePort for Database {} +mod off_chain; +mod on_chain; #[async_trait] impl TxPoolPort for TxPoolAdapter { @@ -253,7 +85,7 @@ impl DatabaseMessageProof for Database { } #[async_trait] -impl DryRunExecution for BlockProducerAdapter { +impl BlockProducerPort for BlockProducerAdapter { async fn dry_run_tx( &self, transaction: Transaction, @@ -266,8 +98,6 @@ impl DryRunExecution for BlockProducerAdapter { } } -impl BlockProducerPort for BlockProducerAdapter {} - #[async_trait::async_trait] impl P2pPort for P2PAdapter { async fn all_peer_info(&self) -> anyhow::Result> { @@ -305,3 +135,13 @@ impl P2pPort for P2PAdapter { } } } + +impl worker::BlockImporter for BlockImporterAdapter { + fn block_events(&self) -> BoxStream { + use futures::StreamExt; + fuel_core_services::stream::IntoBoxStream::into_boxed( + tokio_stream::wrappers::BroadcastStream::new(self.block_importer.subscribe()) + .filter_map(|r| futures::future::ready(r.ok())), + ) + } +} diff --git a/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs b/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs new file mode 100644 index 0000000000..86fc7002a0 --- /dev/null +++ b/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs @@ -0,0 +1,117 @@ +use crate::{ + database::{ + transactions::OwnedTransactionIndexCursor, + Database, + }, + fuel_core_graphql_api::{ + database::OffChainView, + ports::{ + worker, + OffChainDatabase, + }, + }, +}; +use fuel_core_storage::{ + iter::{ + BoxedIter, + IntoBoxedIter, + IterDirection, + }, + not_found, + transactional::AtomicView, + Error as StorageError, + Result as StorageResult, +}; +use fuel_core_txpool::types::TxId; +use fuel_core_types::{ + fuel_tx::{ + Address, + Bytes32, + TxPointer, + UtxoId, + }, + fuel_types::{ + BlockHeight, + Nonce, + }, + services::txpool::TransactionStatus, +}; +use std::sync::Arc; + +impl OffChainDatabase for Database { + fn owned_message_ids( + &self, + owner: &Address, + start_message_id: Option, + direction: IterDirection, + ) -> BoxedIter<'_, StorageResult> { + self.owned_message_ids(owner, start_message_id, Some(direction)) + .map(|result| result.map_err(StorageError::from)) + .into_boxed() + } + + fn owned_coins_ids( + &self, + owner: &Address, + start_coin: Option, + direction: IterDirection, + ) -> BoxedIter<'_, StorageResult> { + self.owned_coins_ids(owner, start_coin, Some(direction)) + .map(|res| res.map_err(StorageError::from)) + .into_boxed() + } + + fn tx_status(&self, tx_id: &TxId) -> StorageResult { + self.get_tx_status(tx_id) + .transpose() + .ok_or(not_found!("TransactionId"))? + } + + fn owned_transactions_ids( + &self, + owner: Address, + start: Option, + direction: IterDirection, + ) -> BoxedIter> { + let start = start.map(|tx_pointer| OwnedTransactionIndexCursor { + block_height: tx_pointer.block_height(), + tx_idx: tx_pointer.tx_index(), + }); + self.owned_transactions(owner, start, Some(direction)) + .map(|result| result.map_err(StorageError::from)) + .into_boxed() + } +} + +impl AtomicView for Database { + fn view_at(&self, _: BlockHeight) -> StorageResult { + unimplemented!( + "Unimplemented until of the https://github.com/FuelLabs/fuel-core/issues/451" + ) + } + + fn latest_view(&self) -> OffChainView { + // TODO: https://github.com/FuelLabs/fuel-core/issues/1581 + Arc::new(self.clone()) + } +} + +impl worker::OffChainDatabase for Database { + fn record_tx_id_owner( + &mut self, + owner: &Address, + block_height: BlockHeight, + tx_idx: u16, + tx_id: &Bytes32, + ) -> StorageResult> { + Database::record_tx_id_owner(self, owner, block_height, tx_idx, tx_id) + } + + fn update_tx_status( + &mut self, + id: &Bytes32, + status: TransactionStatus, + ) -> StorageResult> { + Database::update_tx_status(self, id, status) + } +} diff --git a/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs b/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs new file mode 100644 index 0000000000..dd9c9937ff --- /dev/null +++ b/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs @@ -0,0 +1,140 @@ +use crate::{ + database::Database, + fuel_core_graphql_api::{ + database::OnChainView, + ports::{ + DatabaseBlocks, + DatabaseChain, + DatabaseContracts, + DatabaseMessages, + OnChainDatabase, + }, + }, +}; +use fuel_core_storage::{ + iter::{ + BoxedIter, + IntoBoxedIter, + IterDirection, + }, + not_found, + transactional::AtomicView, + Error as StorageError, + Result as StorageResult, +}; +use fuel_core_txpool::types::ContractId; +use fuel_core_types::{ + blockchain::primitives::{ + BlockId, + DaBlockHeight, + }, + entities::message::Message, + fuel_tx::AssetId, + fuel_types::{ + BlockHeight, + Nonce, + }, + services::graphql_api::ContractBalance, +}; +use std::sync::Arc; + +impl DatabaseBlocks for Database { + fn block_id(&self, height: &BlockHeight) -> StorageResult { + self.get_block_id(height) + .and_then(|height| height.ok_or(not_found!("BlockId"))) + } + + fn blocks_ids( + &self, + start: Option, + direction: IterDirection, + ) -> BoxedIter<'_, StorageResult<(BlockHeight, BlockId)>> { + self.all_block_ids(start, direction) + .map(|result| result.map_err(StorageError::from)) + .into_boxed() + } + + fn ids_of_latest_block(&self) -> StorageResult<(BlockHeight, BlockId)> { + self.ids_of_latest_block() + .transpose() + .ok_or(not_found!("BlockId"))? + } +} + +impl DatabaseMessages for Database { + fn all_messages( + &self, + start_message_id: Option, + direction: IterDirection, + ) -> BoxedIter<'_, StorageResult> { + self.all_messages(start_message_id, Some(direction)) + .map(|result| result.map_err(StorageError::from)) + .into_boxed() + } + + fn message_is_spent(&self, nonce: &Nonce) -> StorageResult { + self.message_is_spent(nonce) + } + + fn message_exists(&self, nonce: &Nonce) -> StorageResult { + self.message_exists(nonce) + } +} + +impl DatabaseContracts for Database { + fn contract_balances( + &self, + contract: ContractId, + start_asset: Option, + direction: IterDirection, + ) -> BoxedIter> { + self.contract_balances(contract, start_asset, Some(direction)) + .map(move |result| { + result + .map_err(StorageError::from) + .map(|(asset_id, amount)| ContractBalance { + owner: contract, + amount, + asset_id, + }) + }) + .into_boxed() + } +} + +impl DatabaseChain for Database { + fn chain_name(&self) -> StorageResult { + pub const DEFAULT_NAME: &str = "Fuel.testnet"; + + Ok(self + .get_chain_name()? + .unwrap_or_else(|| DEFAULT_NAME.to_string())) + } + + fn da_height(&self) -> StorageResult { + #[cfg(feature = "relayer")] + { + use fuel_core_relayer::ports::RelayerDb; + self.get_finalized_da_height() + } + #[cfg(not(feature = "relayer"))] + { + Ok(0u64.into()) + } + } +} + +impl OnChainDatabase for Database {} + +impl AtomicView for Database { + fn view_at(&self, _: BlockHeight) -> StorageResult { + unimplemented!( + "Unimplemented until of the https://github.com/FuelLabs/fuel-core/issues/451" + ) + } + + fn latest_view(&self) -> OnChainView { + // TODO: https://github.com/FuelLabs/fuel-core/issues/1581 + Arc::new(self.clone()) + } +} diff --git a/crates/fuel-core/src/service/adapters/txpool.rs b/crates/fuel-core/src/service/adapters/txpool.rs index 6f1593f6d7..ccd33474df 100644 --- a/crates/fuel-core/src/service/adapters/txpool.rs +++ b/crates/fuel-core/src/service/adapters/txpool.rs @@ -7,7 +7,6 @@ use crate::{ }; use fuel_core_services::stream::BoxStream; use fuel_core_storage::{ - not_found, tables::{ Coins, ContractsRawCode, @@ -33,7 +32,7 @@ use fuel_core_types::{ Nonce, }, services::{ - block_importer::ImportResult, + block_importer::SharedImportResult, p2p::{ GossipsubMessageAcceptance, GossipsubMessageInfo, @@ -44,7 +43,7 @@ use fuel_core_types::{ use std::sync::Arc; impl BlockImporter for BlockImporterAdapter { - fn block_events(&self) -> BoxStream> { + fn block_events(&self) -> BoxStream { use tokio_stream::{ wrappers::BroadcastStream, StreamExt, @@ -144,13 +143,4 @@ impl fuel_core_txpool::ports::TxPoolDb for Database { fn current_block_height(&self) -> StorageResult { self.latest_height() } - - fn transaction_status( - &self, - tx_id: &fuel_core_types::fuel_types::Bytes32, - ) -> StorageResult { - self.get_tx_status(tx_id) - .transpose() - .ok_or(not_found!("TransactionId"))? - } } diff --git a/crates/fuel-core/src/service/genesis.rs b/crates/fuel-core/src/service/genesis.rs index 8039f438d1..9942df0a81 100644 --- a/crates/fuel-core/src/service/genesis.rs +++ b/crates/fuel-core/src/service/genesis.rs @@ -136,7 +136,8 @@ fn import_genesis_block( (), (), ); - importer.commit_result(UncommittedImportResult::new( + // We commit Genesis block before start of any service, so there is no listeners. + importer.commit_result_without_awaiting_listeners(UncommittedImportResult::new( ImportResult::new_from_local(block, vec![]), database_transaction, ))?; diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index 1523fe41c1..ba8dc05e93 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -3,6 +3,7 @@ use super::adapters::P2PAdapter; use crate::{ database::Database, + fuel_core_graphql_api, fuel_core_graphql_api::Config as GraphQLConfig, schema::build_schema, service::{ @@ -41,7 +42,7 @@ pub type BlockProducerService = fuel_core_producer::block_producer::Producer< TxPoolAdapter, ExecutorAdapter, >; -pub type GraphQL = crate::fuel_core_graphql_api::service::Service; +pub type GraphQL = crate::fuel_core_graphql_api::api_service::Service; pub fn init_sub_services( config: &Config, @@ -189,20 +190,28 @@ pub fn init_sub_services( ) .data(database.clone()); - let graph_ql = crate::fuel_core_graphql_api::service::new_service( - GraphQLConfig { - addr: config.addr, - utxo_validation: config.utxo_validation, - debug: config.debug, - vm_backtrace: config.vm.backtrace, - min_gas_price: config.txpool.min_gas_price, - max_tx: config.txpool.max_tx, - max_depth: config.txpool.max_depth, - consensus_parameters: config.chain_conf.consensus_parameters.clone(), - consensus_key: config.consensus_key.clone(), - }, + let graphql_worker = fuel_core_graphql_api::worker_service::new_service( + importer_adapter.clone(), + database.clone(), + ); + + let graphql_config = GraphQLConfig { + addr: config.addr, + utxo_validation: config.utxo_validation, + debug: config.debug, + vm_backtrace: config.vm.backtrace, + min_gas_price: config.txpool.min_gas_price, + max_tx: config.txpool.max_tx, + max_depth: config.txpool.max_depth, + consensus_parameters: config.chain_conf.consensus_parameters.clone(), + consensus_key: config.consensus_key.clone(), + }; + + let graph_ql = fuel_core_graphql_api::api_service::new_service( + graphql_config, schema, - Box::new(database.clone()), + database.clone(), + database.clone(), Box::new(tx_pool_adapter), Box::new(producer_adapter), Box::new(poa_adapter.clone()), @@ -249,5 +258,7 @@ pub fn init_sub_services( } } + services.push(Box::new(graphql_worker)); + Ok((services, shared)) } diff --git a/crates/fuel-core/src/state.rs b/crates/fuel-core/src/state.rs index 49ca2b7a73..83c93851df 100644 --- a/crates/fuel-core/src/state.rs +++ b/crates/fuel-core/src/state.rs @@ -1,8 +1,14 @@ -use crate::database::{ - Column, - Database, - Error as DatabaseError, - Result as DatabaseResult, +use crate::{ + database::{ + Column, + Database, + Error as DatabaseError, + Result as DatabaseResult, + }, + state::in_memory::{ + memory_store::MemoryStore, + transaction::MemoryTransactionView, + }, }; use fuel_core_storage::{ iter::{ @@ -16,7 +22,47 @@ use std::{ sync::Arc, }; -pub type DataSource = Arc>; +pub mod in_memory; +#[cfg(feature = "rocksdb")] +pub mod rocks_db; + +type DataSourceInner = Arc>; + +#[derive(Clone, Debug)] +pub struct DataSource(DataSourceInner); + +impl From> for DataSource { + fn from(inner: Arc) -> Self { + Self(inner) + } +} + +#[cfg(feature = "rocksdb")] +impl From> for DataSource { + fn from(inner: Arc) -> Self { + Self(inner) + } +} + +impl From> for DataSource { + fn from(inner: Arc) -> Self { + Self(inner) + } +} + +impl core::ops::Deref for DataSource { + type Target = DataSourceInner; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl core::ops::DerefMut for DataSource { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} pub trait TransactableStorage: IteratorableStore + BatchOperations + Debug + Send + Sync @@ -29,7 +75,3 @@ pub trait TransactableStorage: fn flush(&self) -> DatabaseResult<()>; } - -pub mod in_memory; -#[cfg(feature = "rocksdb")] -pub mod rocks_db; diff --git a/crates/fuel-core/src/state/in_memory/transaction.rs b/crates/fuel-core/src/state/in_memory/transaction.rs index e249a3b5c7..7dcb96d827 100644 --- a/crates/fuel-core/src/state/in_memory/transaction.rs +++ b/crates/fuel-core/src/state/in_memory/transaction.rs @@ -50,11 +50,14 @@ pub struct MemoryTransactionView { } impl MemoryTransactionView { - pub fn new(source: DataSource) -> Self { + pub fn new(source: D) -> Self + where + D: Into, + { Self { view_layer: MemoryStore::default(), changes: Default::default(), - data_source: source, + data_source: source.into(), } } diff --git a/crates/services/consensus_module/poa/src/ports.rs b/crates/services/consensus_module/poa/src/ports.rs index fdb8a2d11d..c93180645b 100644 --- a/crates/services/consensus_module/poa/src/ports.rs +++ b/crates/services/consensus_module/poa/src/ports.rs @@ -66,10 +66,11 @@ pub trait BlockProducer: Send + Sync { } #[cfg_attr(test, mockall::automock(type Database=EmptyStorage;))] +#[async_trait::async_trait] pub trait BlockImporter: Send + Sync { type Database; - fn commit_result( + async fn commit_result( &self, result: UncommittedImportResult>, ) -> anyhow::Result<()>; diff --git a/crates/services/consensus_module/poa/src/service.rs b/crates/services/consensus_module/poa/src/service.rs index 3ec7b8727d..4fd65a220e 100644 --- a/crates/services/consensus_module/poa/src/service.rs +++ b/crates/services/consensus_module/poa/src/service.rs @@ -356,10 +356,12 @@ where consensus: seal, }; // Import the sealed block - self.block_importer.commit_result(Uncommitted::new( - ImportResult::new_from_local(block, tx_status), - db_transaction, - ))?; + self.block_importer + .commit_result(Uncommitted::new( + ImportResult::new_from_local(block, tx_status), + db_transaction, + )) + .await?; // Update last block time self.last_height = height; diff --git a/crates/services/executor/src/executor.rs b/crates/services/executor/src/executor.rs index 6be1e94498..a2041c56f4 100644 --- a/crates/services/executor/src/executor.rs +++ b/crates/services/executor/src/executor.rs @@ -14,7 +14,6 @@ use fuel_core_storage::{ ContractsLatestUtxo, Messages, ProcessedTransactions, - Receipts, SpentMessages, }, transactional::{ @@ -23,7 +22,6 @@ use fuel_core_storage::{ }, StorageAsMut, StorageAsRef, - StorageInspect, }; use fuel_core_types::{ blockchain::{ @@ -45,11 +43,9 @@ use fuel_core_types::{ fuel_tx::{ field::{ InputContract, - Inputs, MintAmount, MintAssetId, OutputContract, - Outputs, TxPointer as TxPointerField, }, input, @@ -79,7 +75,6 @@ use fuel_core_types::{ Transaction, TxId, TxPointer, - UniqueIdentifier, UtxoId, }, fuel_types::{ @@ -123,7 +118,6 @@ use fuel_core_types::{ TransactionValidityError, UncommittedResult, }, - txpool::TransactionStatus, }, }; use parking_lot::Mutex as ParkingMutex; @@ -267,11 +261,11 @@ where let ( ExecutionResult { - block, skipped_transactions, + tx_status, .. }, - temporary_db, + _temporary_db, ) = self .execute_without_commit(ExecutionTypes::DryRun(component), options)? .into(); @@ -281,19 +275,11 @@ where return Err(err) } - block - .transactions() - .iter() - .map(|tx| { - let id = tx.id(&self.config.consensus_parameters.chain_id); - StorageInspect::::get(temporary_db.as_ref(), &id) - .transpose() - .unwrap_or_else(|| Ok(Default::default())) - .map(|v| v.into_owned()) - }) - .collect::>, _>>() - .map_err(Into::into) - // drop `temporary_db` without committing to avoid altering state. + Ok(tx_status + .into_iter() + .map(|tx| tx.receipts) + .collect::>>()) + // drop `_temporary_db` without committing to avoid altering state. } } @@ -447,16 +433,6 @@ where tx_status, }; - // ------------ GraphQL API Functionality BEGIN ------------ - - // save the status for every transaction using the finalized block id - self.persist_transaction_status(&result, block_st_transaction.as_mut())?; - - // save the associated owner for each transaction in the block - self.index_tx_owners_for_block(&result.block, block_st_transaction.as_mut())?; - - // ------------ GraphQL API Functionality END ------------ - // Get the complete fuel block. Ok(UncommittedResult::new(result, block_st_transaction)) } @@ -807,6 +783,7 @@ where execution_data.tx_status.push(TransactionExecutionStatus { id: coinbase_id, result: TransactionExecutionResult::Success { result: None }, + receipts: vec![], }); if block_st_transaction @@ -895,7 +872,10 @@ where debug_assert_eq!(tx.id(&self.config.consensus_parameters.chain_id), tx_id); } - // Wrap inputs in the execution kind. + // TODO: We need to call this function before `vm.transact` but we can't do that because of + // `Checked` immutability requirements. So we do it here after its execution for now. + // But it should be fixed in the future. + // https://github.com/FuelLabs/fuel-vm/issues/651 self.compute_inputs( match execution_kind { ExecutionKind::DryRun => ExecutionTypes::DryRun(tx.inputs_mut()), @@ -970,9 +950,6 @@ where .storage::() .insert(&tx_id, &())?; - // persist receipts - self.persist_receipts(&tx_id, &receipts, tx_st_transaction.as_mut())?; - let status = if reverted { self.log_backtrace(&vm, &receipts); // get reason for revert @@ -1004,14 +981,15 @@ where .checked_add(tx_fee) .ok_or(ExecutorError::FeeOverflow)?; execution_data.used_gas = execution_data.used_gas.saturating_add(used_gas); + execution_data + .message_ids + .extend(receipts.iter().filter_map(|r| r.message_id())); // queue up status for this tx to be stored once block id is finalized. execution_data.tx_status.push(TransactionExecutionStatus { id: tx_id, result: status, + receipts, }); - execution_data - .message_ids - .extend(receipts.iter().filter_map(|r| r.message_id())); Ok(final_tx) } @@ -1070,7 +1048,7 @@ where | Input::MessageDataSigned(MessageDataSigned { nonce, .. }) | Input::MessageDataPredicate(MessageDataPredicate { nonce, .. }) => { // Eagerly return already spent if status is known. - if db.message_is_spent(nonce)? { + if db.storage::().contains_key(nonce)? { return Err( TransactionValidityError::MessageAlreadySpent(*nonce).into() ) @@ -1545,130 +1523,6 @@ where Ok(()) } - - fn persist_receipts( - &self, - tx_id: &TxId, - receipts: &[Receipt], - db: &mut D, - ) -> ExecutorResult<()> { - if db.storage::().insert(tx_id, receipts)?.is_some() { - return Err(ExecutorError::OutputAlreadyExists) - } - Ok(()) - } - - /// Associate all transactions within a block to their respective UTXO owners - fn index_tx_owners_for_block( - &self, - block: &Block, - block_st_transaction: &mut D, - ) -> ExecutorResult<()> { - for (tx_idx, tx) in block.transactions().iter().enumerate() { - let block_height = *block.header().height(); - let inputs; - let outputs; - let tx_idx = - u16::try_from(tx_idx).map_err(|_| ExecutorError::TooManyTransactions)?; - let tx_id = tx.id(&self.config.consensus_parameters.chain_id); - match tx { - Transaction::Script(tx) => { - inputs = tx.inputs().as_slice(); - outputs = tx.outputs().as_slice(); - } - Transaction::Create(tx) => { - inputs = tx.inputs().as_slice(); - outputs = tx.outputs().as_slice(); - } - Transaction::Mint(_) => continue, - } - self.persist_owners_index( - block_height, - inputs, - outputs, - &tx_id, - tx_idx, - block_st_transaction, - )?; - } - Ok(()) - } - - /// Index the tx id by owner for all of the inputs and outputs - fn persist_owners_index( - &self, - block_height: BlockHeight, - inputs: &[Input], - outputs: &[Output], - tx_id: &Bytes32, - tx_idx: u16, - db: &mut D, - ) -> ExecutorResult<()> { - let mut owners = vec![]; - for input in inputs { - if let Input::CoinSigned(CoinSigned { owner, .. }) - | Input::CoinPredicate(CoinPredicate { owner, .. }) = input - { - owners.push(owner); - } - } - - for output in outputs { - match output { - Output::Coin { to, .. } - | Output::Change { to, .. } - | Output::Variable { to, .. } => { - owners.push(to); - } - Output::Contract(_) | Output::ContractCreated { .. } => {} - } - } - - // dedupe owners from inputs and outputs prior to indexing - owners.sort(); - owners.dedup(); - - for owner in owners { - db.record_tx_id_owner(owner, block_height, tx_idx, tx_id)?; - } - - Ok(()) - } - - fn persist_transaction_status( - &self, - result: &ExecutionResult, - db: &D, - ) -> ExecutorResult<()> { - let time = result.block.header().time(); - let block_id = result.block.id(); - for TransactionExecutionStatus { id, result } in result.tx_status.iter() { - match result { - TransactionExecutionResult::Success { result } => { - db.update_tx_status( - id, - TransactionStatus::Success { - block_id, - time, - result: *result, - }, - )?; - } - TransactionExecutionResult::Failed { result, reason } => { - db.update_tx_status( - id, - TransactionStatus::Failed { - block_id, - time, - result: *result, - reason: reason.clone(), - }, - )?; - } - } - } - Ok(()) - } } trait Fee { diff --git a/crates/services/executor/src/ports.rs b/crates/services/executor/src/ports.rs index 1ca5a5058f..e9c5b1b9b4 100644 --- a/crates/services/executor/src/ports.rs +++ b/crates/services/executor/src/ports.rs @@ -8,14 +8,12 @@ use fuel_core_storage::{ ContractsState, Messages, ProcessedTransactions, - Receipts, SpentMessages, }, transactional::Transactional, vm_storage::VmStorageRequirements, Error as StorageError, MerkleRootStorage, - StorageInspect, StorageMutate, StorageRead, }; @@ -25,18 +23,14 @@ use fuel_core_types::{ entities::message::Message, fuel_tx, fuel_tx::{ - Address, - Bytes32, TxId, UniqueIdentifier, }, fuel_types::{ - BlockHeight, ChainId, Nonce, }, fuel_vm::checked_transaction::CheckedTransaction, - services::txpool::TransactionStatus, }; use fuel_core_types::fuel_tx::ContractId; @@ -79,50 +73,20 @@ pub trait RelayerPort { ) -> anyhow::Result>; } -pub trait MessageIsSpent: - StorageInspect - + StorageInspect -{ - type Error; - - fn message_is_spent(&self, nonce: &Nonce) -> Result; -} - -pub trait TxIdOwnerRecorder { - type Error; - - fn record_tx_id_owner( - &self, - owner: &Address, - block_height: BlockHeight, - tx_idx: u16, - tx_id: &Bytes32, - ) -> Result, Self::Error>; - - fn update_tx_status( - &self, - id: &Bytes32, - status: TransactionStatus, - ) -> Result, Self::Error>; -} - // TODO: Remove `Clone` bound pub trait ExecutorDatabaseTrait: - StorageMutate + StorageMutate + StorageMutate + MerkleRootStorage - + MessageIsSpent + StorageMutate + StorageMutate + StorageMutate - + StorageMutate + StorageMutate - + StorageRead + + StorageRead + StorageMutate + MerkleRootStorage + VmStorageRequirements + Transactional - + TxIdOwnerRecorder + Clone { } diff --git a/crates/services/importer/Cargo.toml b/crates/services/importer/Cargo.toml index 7cd9384042..6b47a8272f 100644 --- a/crates/services/importer/Cargo.toml +++ b/crates/services/importer/Cargo.toml @@ -17,6 +17,7 @@ fuel-core-metrics = { workspace = true } fuel-core-storage = { workspace = true } fuel-core-types = { workspace = true } tokio = { workspace = true, features = ["full"] } +tokio-rayon = { workspace = true } tracing = { workspace = true } [dev-dependencies] diff --git a/crates/services/importer/src/config.rs b/crates/services/importer/src/config.rs index c551127c68..0e9d938be9 100644 --- a/crates/services/importer/src/config.rs +++ b/crates/services/importer/src/config.rs @@ -22,7 +22,7 @@ impl Config { impl Default for Config { fn default() -> Self { Self { - max_block_notify_buffer: 1 << 10, + max_block_notify_buffer: 1, metrics: false, chain_id: ChainId::default(), } diff --git a/crates/services/importer/src/importer.rs b/crates/services/importer/src/importer.rs index 82a9898de9..60cc3c1f09 100644 --- a/crates/services/importer/src/importer.rs +++ b/crates/services/importer/src/importer.rs @@ -29,6 +29,7 @@ use fuel_core_types::{ services::{ block_importer::{ ImportResult, + SharedImportResult, UncommittedResult, }, executor, @@ -38,7 +39,10 @@ use fuel_core_types::{ }; use std::{ ops::Deref, - sync::Arc, + sync::{ + Arc, + Mutex, + }, time::{ Instant, SystemTime, @@ -47,6 +51,7 @@ use std::{ }; use tokio::sync::{ broadcast, + oneshot, TryAcquireError, }; @@ -106,10 +111,14 @@ impl PartialEq for Error { pub struct Importer { database: D, - executor: E, - verifier: V, + executor: Arc, + verifier: Arc, chain_id: ChainId, - broadcast: broadcast::Sender>, + broadcast: broadcast::Sender, + /// The channel to notify about the end of the processing of the previous block by all listeners. + /// It is used to await until all receivers of the notification process the `SharedImportResult` + /// before starting committing a new block. + prev_block_process_result: Mutex>>, guard: tokio::sync::Semaphore, } @@ -119,15 +128,16 @@ impl Importer { Self { database, - executor, - verifier, + executor: Arc::new(executor), + verifier: Arc::new(verifier), chain_id: config.chain_id, broadcast, + prev_block_process_result: Default::default(), guard: tokio::sync::Semaphore::new(1), } } - pub fn subscribe(&self) -> broadcast::Receiver> { + pub fn subscribe(&self) -> broadcast::Receiver { self.broadcast.subscribe() } @@ -163,7 +173,7 @@ where /// /// Only one commit may be in progress at the time. All other calls will fail. /// Returns an error if called while another call is in progress. - pub fn commit_result( + pub async fn commit_result( &self, result: UncommittedResult>, ) -> Result<(), Error> @@ -171,9 +181,36 @@ where ExecutorDatabase: ports::ExecutorDatabase, { let _guard = self.lock()?; + // It is safe to unwrap the channel because we have the `_guard`. + let previous_block_result = self + .prev_block_process_result + .lock() + .expect("poisoned") + .take(); + + // Await until all receivers of the notification process the result. + if let Some(channel) = previous_block_result { + let _ = channel.await; + } + self._commit_result(result) } + /// The method works in the same way as [`Importer::commit_result`], but it doesn't + /// wait for listeners to process the result. + pub fn commit_result_without_awaiting_listeners( + &self, + result: UncommittedResult>, + ) -> Result<(), Error> + where + ExecutorDatabase: ports::ExecutorDatabase, + { + let _guard = self.lock()?; + self._commit_result(result)?; + Ok(()) + } + + /// The method commits the result of the block execution and notifies about a new imported block. #[tracing::instrument( skip_all, fields( @@ -277,7 +314,13 @@ where .set(current_time); tracing::info!("Committed block {:#x}", result.sealed_block.entity.id()); - let _ = self.broadcast.send(Arc::new(result)); + + // The `tokio::sync::oneshot::Sender` is used to notify about the end + // of the processing of a new block by all listeners. + let (sender, receiver) = oneshot::channel(); + let _ = self.broadcast.send(Arc::new(Awaiter::new(result, sender))); + *self.prev_block_process_result.lock().expect("poisoned") = Some(receiver); + Ok(()) } @@ -331,13 +374,24 @@ where pub fn verify_and_execute_block( &self, sealed_block: SealedBlock, + ) -> Result>, Error> { + Self::verify_and_execute_block_inner( + self.executor.clone(), + self.verifier.clone(), + sealed_block, + ) + } + + fn verify_and_execute_block_inner( + executor: Arc, + verifier: Arc, + sealed_block: SealedBlock, ) -> Result>, Error> { let consensus = sealed_block.consensus; let block = sealed_block.entity; let sealed_block_id = block.id(); - let result_of_verification = - self.verifier.verify_block_fields(&consensus, &block); + let result_of_verification = verifier.verify_block_fields(&consensus, &block); if let Err(err) = result_of_verification { return Err(Error::FailedVerification(err)) } @@ -357,8 +411,7 @@ where tx_status, }, db_tx, - ) = self - .executor + ) = executor .execute_without_commit(block) .map_err(Error::FailedExecution)? .into(); @@ -387,19 +440,47 @@ where impl Importer where - IDatabase: ImporterDatabase, - E: Executor, - V: BlockVerifier, + IDatabase: ImporterDatabase + 'static, + E: Executor + 'static, + V: BlockVerifier + 'static, { /// The method validates the `Block` fields and commits the `SealedBlock`. /// It is a combination of the [`Importer::verify_and_execute_block`] and [`Importer::commit_result`]. - pub fn execute_and_commit(&self, sealed_block: SealedBlock) -> Result<(), Error> { + pub async fn execute_and_commit( + &self, + sealed_block: SealedBlock, + ) -> Result<(), Error> { let _guard = self.lock()?; + + let executor = self.executor.clone(); + let verifier = self.verifier.clone(); + let (result, execute_time) = tokio_rayon::spawn_fifo(|| { + let start = Instant::now(); + let result = + Self::verify_and_execute_block_inner(executor, verifier, sealed_block); + let execute_time = start.elapsed().as_secs_f64(); + (result, execute_time) + }) + .await; + + let result = result?; + + // It is safe to unwrap the channel because we have the `_guard`. + let previous_block_result = self + .prev_block_process_result + .lock() + .expect("poisoned") + .take(); + + // Await until all receivers of the notification process the result. + if let Some(channel) = previous_block_result { + let _ = channel.await; + } + let start = Instant::now(); - let result = self.verify_and_execute_block(sealed_block)?; let commit_result = self._commit_result(result); - // record the execution time to prometheus - let time = start.elapsed().as_secs_f64(); + let commit_time = start.elapsed().as_secs_f64(); + let time = execute_time + commit_time; importer_metrics().execute_and_commit_duration.observe(time); // return execution result commit_result @@ -419,3 +500,34 @@ impl ShouldBeUnique for Option { } } } + +/// The wrapper around `ImportResult` to notify about the end of the processing of a new block. +struct Awaiter { + result: ImportResult, + release_channel: Option>, +} + +impl Drop for Awaiter { + fn drop(&mut self) { + if let Some(release_channel) = core::mem::take(&mut self.release_channel) { + let _ = release_channel.send(()); + } + } +} + +impl Deref for Awaiter { + type Target = ImportResult; + + fn deref(&self) -> &Self::Target { + &self.result + } +} + +impl Awaiter { + fn new(result: ImportResult, channel: oneshot::Sender<()>) -> Self { + Self { + result, + release_channel: Some(channel), + } + } +} diff --git a/crates/services/importer/src/importer/test.rs b/crates/services/importer/src/importer/test.rs index 897be9f994..717271093f 100644 --- a/crates/services/importer/src/importer/test.rs +++ b/crates/services/importer/src/importer/test.rs @@ -261,12 +261,13 @@ where => Err(Error::NotUnique(0u32.into())); "fails to import genesis block when block exists for height 0" )] -fn commit_result_genesis( +#[tokio::test] +async fn commit_result_genesis( sealed_block: SealedBlock, underlying_db: impl Fn() -> MockDatabase, executor_db: impl Fn() -> MockDatabase, ) -> Result<(), Error> { - commit_result_assert(sealed_block, underlying_db(), executor_db()) + commit_result_assert(sealed_block, underlying_db(), executor_db()).await } //////////////////////////// PoA Block //////////////////////////// @@ -333,7 +334,8 @@ fn commit_result_genesis( => Err(storage_failure_error()); "fails to import block when executor db fails to find block" )] -fn commit_result_and_execute_and_commit_poa( +#[tokio::test] +async fn commit_result_and_execute_and_commit_poa( sealed_block: SealedBlock, underlying_db: impl Fn() -> MockDatabase, executor_db: impl Fn() -> MockDatabase, @@ -342,18 +344,19 @@ fn commit_result_and_execute_and_commit_poa( // validation rules(-> test cases) during committing the result. let height = *sealed_block.entity.header().height(); let commit_result = - commit_result_assert(sealed_block.clone(), underlying_db(), executor_db()); + commit_result_assert(sealed_block.clone(), underlying_db(), executor_db()).await; let execute_and_commit_result = execute_and_commit_assert( sealed_block, underlying_db(), executor(ok(ex_result(height.into(), 0)), executor_db()), verifier(ok(())), - ); + ) + .await; assert_eq!(commit_result, execute_and_commit_result); commit_result } -fn commit_result_assert( +async fn commit_result_assert( sealed_block: SealedBlock, underlying_db: MockDatabase, executor_db: MockDatabase, @@ -366,23 +369,22 @@ fn commit_result_assert( ); let mut imported_blocks = importer.subscribe(); - let result = importer.commit_result(uncommitted_result); + let result = importer.commit_result(uncommitted_result).await; if result.is_ok() { let actual_sealed_block = imported_blocks.try_recv().unwrap(); assert_eq!(actual_sealed_block.sealed_block, expected_to_broadcast); - assert_eq!( - imported_blocks - .try_recv() - .expect_err("We should broadcast only one block"), - TryRecvError::Empty - ) + if let Err(err) = imported_blocks.try_recv() { + assert_eq!(err, TryRecvError::Empty); + } else { + panic!("We should broadcast only one block"); + } } result } -fn execute_and_commit_assert( +async fn execute_and_commit_assert( sealed_block: SealedBlock, underlying_db: MockDatabase, executor: MockExecutor, @@ -392,24 +394,24 @@ fn execute_and_commit_assert( let importer = Importer::new(Default::default(), underlying_db, executor, verifier); let mut imported_blocks = importer.subscribe(); - let result = importer.execute_and_commit(sealed_block); + let result = importer.execute_and_commit(sealed_block).await; if result.is_ok() { let actual_sealed_block = imported_blocks.try_recv().unwrap(); assert_eq!(actual_sealed_block.sealed_block, expected_to_broadcast); - assert_eq!( - imported_blocks - .try_recv() - .expect_err("We should broadcast only one block"), - TryRecvError::Empty - ) + + if let Err(err) = imported_blocks.try_recv() { + assert_eq!(err, TryRecvError::Empty); + } else { + panic!("We should broadcast only one block"); + } } result } -#[test] -fn commit_result_fail_when_locked() { +#[tokio::test] +async fn commit_result_fail_when_locked() { let importer = Importer::new(Default::default(), MockDatabase::default(), (), ()); let uncommitted_result = UncommittedResult::new( ImportResult::default(), @@ -418,13 +420,13 @@ fn commit_result_fail_when_locked() { let _guard = importer.lock(); assert_eq!( - importer.commit_result(uncommitted_result), + importer.commit_result(uncommitted_result).await, Err(Error::SemaphoreError(TryAcquireError::NoPermits)) ); } -#[test] -fn execute_and_commit_fail_when_locked() { +#[tokio::test] +async fn execute_and_commit_fail_when_locked() { let importer = Importer::new( Default::default(), MockDatabase::default(), @@ -434,7 +436,7 @@ fn execute_and_commit_fail_when_locked() { let _guard = importer.lock(); assert_eq!( - importer.execute_and_commit(Default::default()), + importer.execute_and_commit(Default::default()).await, Err(Error::SemaphoreError(TryAcquireError::NoPermits)) ); } @@ -491,7 +493,8 @@ fn one_lock_at_the_same_time() { => Err(verification_failure_error()); "commit fails if verification fails" )] -fn execute_and_commit_and_verify_and_execute_block_poa( +#[tokio::test] +async fn execute_and_commit_and_verify_and_execute_block_poa( sealed_block: SealedBlock, block_after_execution: P, verifier_result: V, @@ -521,7 +524,8 @@ where executor_db(ok(Some(previous_height)), ok(true), commits)(), ), verifier(verifier_result), - ); + ) + .await; assert_eq!(verify_and_execute_result, execute_and_commit_result); execute_and_commit_result } diff --git a/crates/services/importer/src/ports.rs b/crates/services/importer/src/ports.rs index 51c14e5085..99f097fefe 100644 --- a/crates/services/importer/src/ports.rs +++ b/crates/services/importer/src/ports.rs @@ -33,7 +33,7 @@ pub trait Executor: Send + Sync { } /// The database port used by the block importer. -pub trait ImporterDatabase { +pub trait ImporterDatabase: Send + Sync { /// Returns the latest block height. fn latest_block_height(&self) -> StorageResult>; /// Update metadata about the total number of transactions on the chain. @@ -57,7 +57,7 @@ pub trait ExecutorDatabase: ImporterDatabase { #[cfg_attr(test, mockall::automock)] /// The verifier of the block. -pub trait BlockVerifier { +pub trait BlockVerifier: Send + Sync { /// Verifies the consistency of the block fields for the block's height. /// It includes the verification of **all** fields, it includes the consensus rules for /// the corresponding height. diff --git a/crates/services/p2p/src/config.rs b/crates/services/p2p/src/config.rs index 6f5f6198b5..242e208abe 100644 --- a/crates/services/p2p/src/config.rs +++ b/crates/services/p2p/src/config.rs @@ -74,7 +74,7 @@ pub struct Config { /// Name of the Network pub network_name: String, - /// Checksum is a hash(sha256) of [`Genesis`](fuel_core_types::blockchain::consensus::Genesis) - chain id. + /// Checksum is a hash(sha256) of [`Genesis`] - chain id. pub checksum: Checksum, /// IP address for Swarm to listen on diff --git a/crates/services/p2p/src/gossipsub/messages.rs b/crates/services/p2p/src/gossipsub/messages.rs index f47e40b626..0707068599 100644 --- a/crates/services/p2p/src/gossipsub/messages.rs +++ b/crates/services/p2p/src/gossipsub/messages.rs @@ -14,7 +14,7 @@ pub enum GossipTopicTag { NewTx, } -/// Takes Arc and wraps it in a matching GossipsubBroadcastRequest +/// Takes `Arc` and wraps it in a matching GossipsubBroadcastRequest /// The inner referenced value is serialized and broadcast to the network /// It is deserialized as `GossipsubMessage` #[derive(Debug, Clone)] diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index 0bc9ab8c9f..afb2c9cf8e 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -106,7 +106,7 @@ pub struct FuelP2PService { /// It will send it to the specified Peer via its unique ResponseChannel inbound_requests_table: HashMap>, - /// NetworkCodec used as for encoding and decoding of Gossipsub messages + /// NetworkCodec used as `` for encoding and decoding of Gossipsub messages network_codec: PostcardCodec, /// Stores additional p2p network info diff --git a/crates/services/relayer/Cargo.toml b/crates/services/relayer/Cargo.toml index 2f2be488b1..0d9ea134ab 100644 --- a/crates/services/relayer/Cargo.toml +++ b/crates/services/relayer/Cargo.toml @@ -40,6 +40,7 @@ fuel-core-services = { path = "../../services", features = ["test-helpers"] } fuel-core-storage = { path = "../../storage", features = ["test-helpers"] } fuel-core-trace = { path = "../../trace" } mockall = { workspace = true } +rand = { workspace = true } test-case = { workspace = true } tokio = { workspace = true, features = ["macros", "test-util"] } diff --git a/crates/services/relayer/src/ports.rs b/crates/services/relayer/src/ports.rs index 725c231d6d..2dbd210678 100644 --- a/crates/services/relayer/src/ports.rs +++ b/crates/services/relayer/src/ports.rs @@ -2,6 +2,13 @@ use async_trait::async_trait; use fuel_core_storage::{ + blueprint::plain::Plain, + codec::{ + postcard::Postcard, + primitive::Primitive, + }, + column::Column, + structured_storage::TableWithBlueprint, tables::Messages, transactional::Transactional, Error as StorageError, @@ -138,3 +145,18 @@ impl Mappable for RelayerMetadata { /// If the relayer metadata ever contains more than one key, this should be /// changed from a unit value. const METADATA_KEY: () = (); + +impl TableWithBlueprint for RelayerMetadata { + type Blueprint = Plain>; + + fn column() -> Column { + Column::RelayerMetadata + } +} + +#[cfg(test)] +fuel_core_storage::basic_storage_tests!( + RelayerMetadata, + ::Key::default(), + ::Value::default() +); diff --git a/crates/services/relayer/src/service.rs b/crates/services/relayer/src/service.rs index 7e6521754b..dea4877042 100644 --- a/crates/services/relayer/src/service.rs +++ b/crates/services/relayer/src/service.rs @@ -249,7 +249,7 @@ where } impl SharedState { - /// Wait for the [`Task`] to be in sync with + /// Wait for the `Task` to be in sync with /// the data availability layer. /// /// Yields until the relayer reaches a point where it diff --git a/crates/services/src/service.rs b/crates/services/src/service.rs index ef084e0496..14cb155fa4 100644 --- a/crates/services/src/service.rs +++ b/crates/services/src/service.rs @@ -14,7 +14,7 @@ use futures::FutureExt; use tokio::sync::watch; use tracing::Instrument; -/// Alias for Arc +/// Alias for `Arc` pub type Shared = std::sync::Arc; /// A mutex that can safely be in async contexts and avoids deadlocks. diff --git a/crates/services/txpool/src/mock_db.rs b/crates/services/txpool/src/mock_db.rs index 157e5e7f27..5435585a3f 100644 --- a/crates/services/txpool/src/mock_db.rs +++ b/crates/services/txpool/src/mock_db.rs @@ -95,11 +95,4 @@ impl TxPoolDb for MockDb { fn current_block_height(&self) -> StorageResult { Ok(Default::default()) } - - fn transaction_status( - &self, - _tx_id: &fuel_core_types::fuel_types::Bytes32, - ) -> StorageResult { - unimplemented!() - } } diff --git a/crates/services/txpool/src/ports.rs b/crates/services/txpool/src/ports.rs index de51f429e9..375d706698 100644 --- a/crates/services/txpool/src/ports.rs +++ b/crates/services/txpool/src/ports.rs @@ -11,18 +11,16 @@ use fuel_core_types::{ }, fuel_types::{ BlockHeight, - Bytes32, ContractId, Nonce, }, services::{ - block_importer::ImportResult, + block_importer::SharedImportResult, p2p::{ GossipsubMessageAcceptance, GossipsubMessageInfo, NetworkData, }, - txpool::TransactionStatus, }, }; use std::sync::Arc; @@ -46,7 +44,7 @@ pub trait PeerToPeer: Send + Sync { pub trait BlockImporter: Send + Sync { /// Wait until the next block is available - fn block_events(&self) -> BoxStream>; + fn block_events(&self) -> BoxStream; } pub trait TxPoolDb: Send + Sync { @@ -59,6 +57,4 @@ pub trait TxPoolDb: Send + Sync { fn is_message_spent(&self, message_id: &Nonce) -> StorageResult; fn current_block_height(&self) -> StorageResult; - - fn transaction_status(&self, tx_id: &Bytes32) -> StorageResult; } diff --git a/crates/services/txpool/src/service.rs b/crates/services/txpool/src/service.rs index e247e196a7..38ac9b7592 100644 --- a/crates/services/txpool/src/service.rs +++ b/crates/services/txpool/src/service.rs @@ -34,7 +34,6 @@ use fuel_core_types::{ Bytes32, }, services::{ - block_importer::ImportResult, p2p::{ GossipData, GossipsubMessageAcceptance, @@ -52,6 +51,7 @@ use fuel_core_types::{ }; use anyhow::anyhow; +use fuel_core_types::services::block_importer::SharedImportResult; use parking_lot::Mutex as ParkingMutex; use std::{ sync::Arc, @@ -143,7 +143,7 @@ impl Clone for SharedState { pub struct Task { gossiped_tx_stream: BoxStream, - committed_block_stream: BoxStream>, + committed_block_stream: BoxStream, shared: SharedState, ttl_timer: tokio::time::Interval, } @@ -201,14 +201,13 @@ where result = self.committed_block_stream.next() => { if let Some(result) = result { - let block = result + let block = &result .sealed_block - .entity - .compress(&self.shared.consensus_params.chain_id); + .entity; self.shared.txpool.lock().block_update( &self.shared.tx_status_sender, - block.header().height(), - block.transactions() + block, + &result.tx_status, ); should_continue = true; } else { diff --git a/crates/services/txpool/src/service/test_helpers.rs b/crates/services/txpool/src/service/test_helpers.rs index decaf2f98d..3cf532bfa8 100644 --- a/crates/services/txpool/src/service/test_helpers.rs +++ b/crates/services/txpool/src/service/test_helpers.rs @@ -21,7 +21,10 @@ use fuel_core_types::{ TransactionBuilder, Word, }, - services::p2p::GossipsubMessageAcceptance, + services::{ + block_importer::ImportResult, + p2p::GossipsubMessageAcceptance, + }, }; use std::cell::RefCell; @@ -103,7 +106,7 @@ mockall::mock! { pub Importer {} impl BlockImporter for Importer { - fn block_events(&self) -> BoxStream>; + fn block_events(&self) -> BoxStream; } } @@ -115,7 +118,7 @@ impl MockImporter { let stream = fuel_core_services::stream::unfold(blocks, |mut blocks| async { let block = blocks.pop(); if let Some(sealed_block) = block { - let result = + let result: SharedImportResult = Arc::new(ImportResult::new_from_local(sealed_block, vec![])); Some((result, blocks)) diff --git a/crates/services/txpool/src/txpool.rs b/crates/services/txpool/src/txpool.rs index 50c7d2484e..1c3c0376e8 100644 --- a/crates/services/txpool/src/txpool.rs +++ b/crates/services/txpool/src/txpool.rs @@ -35,8 +35,16 @@ use fuel_core_types::{ tai64::Tai64, }; +use crate::service::TxStatusMessage; use fuel_core_metrics::txpool_metrics::txpool_metrics; -use fuel_core_types::fuel_vm::checked_transaction::CheckPredicateParams; +use fuel_core_types::{ + blockchain::block::Block, + fuel_vm::checked_transaction::CheckPredicateParams, + services::{ + executor::TransactionExecutionStatus, + txpool::from_executor_to_status, + }, +}; use std::{ cmp::Reverse, collections::HashMap, @@ -315,14 +323,19 @@ where pub fn block_update( &mut self, tx_status_sender: &TxStatusChange, - height: &BlockHeight, - transactions: &[TxId], + block: &Block, + tx_status: &[TransactionExecutionStatus], // spend_outputs: [Input], added_outputs: [AddedOutputs] ) { - for tx_id in transactions { - let tx_id = *tx_id; - let result = self.database.transaction_status(&tx_id); - tx_status_sender.send_complete(tx_id, height, result); + let height = block.header().height(); + for status in tx_status { + let tx_id = status.id; + let status = from_executor_to_status(block, status.result.clone()); + tx_status_sender.send_complete( + tx_id, + height, + TxStatusMessage::Status(status), + ); self.remove_committed_tx(&tx_id); } } diff --git a/crates/storage/Cargo.toml b/crates/storage/Cargo.toml index 70f9a1c5d2..7380c55990 100644 --- a/crates/storage/Cargo.toml +++ b/crates/storage/Cargo.toml @@ -19,10 +19,23 @@ version = { workspace = true } [dependencies] anyhow = { workspace = true } derive_more = { workspace = true } -fuel-core-types = { workspace = true, default-features = false } +enum-iterator = { workspace = true } +fuel-core-types = { workspace = true, default-features = false, features = ["serde"] } fuel-vm-private = { workspace = true, default-features = false } +impl-tools = "0.10" +itertools = { workspace = true } mockall = { workspace = true, optional = true } +paste = "1" +postcard = { workspace = true, features = ["alloc"] } primitive-types = { workspace = true, default-features = false } +rand = { workspace = true, optional = true } +serde = { workspace = true } +strum = { workspace = true } +strum_macros = { workspace = true } + +[dev-dependencies] +fuel-core-storage = { path = ".", features = ["test-helpers"] } +fuel-core-types = { workspace = true, default-features = false, features = ["serde", "random", "test-helpers"] } [features] -test-helpers = ["dep:mockall"] +test-helpers = ["dep:mockall", "dep:rand"] diff --git a/crates/storage/src/blueprint.rs b/crates/storage/src/blueprint.rs new file mode 100644 index 0000000000..3db1ee73e8 --- /dev/null +++ b/crates/storage/src/blueprint.rs @@ -0,0 +1,139 @@ +//! The module defines structures for the [`Mappable`] tables. +//! Each table may have its blueprint that defines how it works with the storage. +//! The table may have a plain blueprint that simply works in CRUD mode, or it may be an SMT-based +//! blueprint that maintains a valid Merkle tree over the storage entries. + +use crate::{ + codec::{ + Decode, + Encode, + Encoder, + }, + kv_store::{ + BatchOperations, + KeyValueStore, + }, + Mappable, + Result as StorageResult, +}; + +pub mod plain; +pub mod sparse; + +/// This trait allows defining the agnostic implementation for all storage +/// traits(`StorageInspect,` `StorageMutate,` etc) while the main logic is +/// hidden inside the blueprint. It allows quickly adding support for new +/// structures only by implementing the trait and reusing the existing +/// infrastructure in other places. It allows changing the blueprint on the +/// fly in the definition of the table without affecting other areas of the codebase. +/// +/// The blueprint is responsible for encoding/decoding(usually it is done via `KeyCodec` and `ValueCodec`) +/// the key and value and putting/extracting it to/from the storage. +pub trait Blueprint +where + M: Mappable, + S: KeyValueStore, +{ + /// The codec used to encode and decode storage key. + type KeyCodec: Encode + Decode; + /// The codec used to encode and decode storage value. + type ValueCodec: Encode + Decode; + + /// Puts the key-value pair into the storage. + fn put( + storage: &mut S, + key: &M::Key, + column: S::Column, + value: &M::Value, + ) -> StorageResult<()>; + + /// Puts the key-value pair into the storage and returns the old value. + fn replace( + storage: &mut S, + key: &M::Key, + column: S::Column, + value: &M::Value, + ) -> StorageResult>; + + /// Takes the value from the storage and returns it. + /// The value is removed from the storage. + fn take( + storage: &mut S, + key: &M::Key, + column: S::Column, + ) -> StorageResult>; + + /// Removes the value from the storage. + fn delete(storage: &mut S, key: &M::Key, column: S::Column) -> StorageResult<()>; + + /// Checks if the value exists in the storage. + fn exists(storage: &S, key: &M::Key, column: S::Column) -> StorageResult { + let key_encoder = Self::KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes(); + storage.exists(key_bytes.as_ref(), column) + } + + /// Returns the size of the value in the storage. + fn size_of_value( + storage: &S, + key: &M::Key, + column: S::Column, + ) -> StorageResult> { + let key_encoder = Self::KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes(); + storage.size_of_value(key_bytes.as_ref(), column) + } + + /// Returns the value from the storage. + fn get( + storage: &S, + key: &M::Key, + column: S::Column, + ) -> StorageResult> { + let key_encoder = Self::KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes(); + storage + .get(key_bytes.as_ref(), column)? + .map(|value| { + Self::ValueCodec::decode_from_value(value).map_err(crate::Error::Codec) + }) + .transpose() + } +} + +/// It is an extension of the blueprint that allows supporting batch operations. +/// Usually, they are more performant than initializing/inserting/removing values one by one. +pub trait SupportsBatching: Blueprint +where + M: Mappable, + S: BatchOperations, +{ + /// Initializes the storage with a bunch of key-value pairs. + /// In some cases, this method may be more performant than [`Self::insert`]. + fn init<'a, Iter>(storage: &mut S, column: S::Column, set: Iter) -> StorageResult<()> + where + Iter: 'a + Iterator, + M::Key: 'a, + M::Value: 'a; + + /// Inserts the batch of key-value pairs into the storage. + fn insert<'a, Iter>( + storage: &mut S, + column: S::Column, + set: Iter, + ) -> StorageResult<()> + where + Iter: 'a + Iterator, + M::Key: 'a, + M::Value: 'a; + + /// Removes the batch of key-value pairs from the storage. + fn remove<'a, Iter>( + storage: &mut S, + column: S::Column, + set: Iter, + ) -> StorageResult<()> + where + Iter: 'a + Iterator, + M::Key: 'a; +} diff --git a/crates/storage/src/blueprint/plain.rs b/crates/storage/src/blueprint/plain.rs new file mode 100644 index 0000000000..7a9e696e81 --- /dev/null +++ b/crates/storage/src/blueprint/plain.rs @@ -0,0 +1,144 @@ +//! This module implements the plain blueprint for the storage. +//! The plain blueprint is the simplest one. It doesn't maintain any additional data structures +//! and doesn't provide any additional functionality. It is just a key-value store that encodes/decodes +//! the key and value and puts/takes them into/from the storage. + +use crate::{ + blueprint::{ + Blueprint, + SupportsBatching, + }, + codec::{ + Decode, + Encode, + Encoder, + }, + column::Column, + kv_store::{ + BatchOperations, + KeyValueStore, + WriteOperation, + }, + structured_storage::TableWithBlueprint, + Error as StorageError, + Mappable, + Result as StorageResult, +}; + +/// The type that represents the plain blueprint. +/// The `KeyCodec` and `ValueCodec` are used to encode/decode the key and value. +pub struct Plain { + _marker: core::marker::PhantomData<(KeyCodec, ValueCodec)>, +} + +impl Blueprint for Plain +where + M: Mappable, + S: KeyValueStore, + KeyCodec: Encode + Decode, + ValueCodec: Encode + Decode, +{ + type KeyCodec = KeyCodec; + type ValueCodec = ValueCodec; + + fn put( + storage: &mut S, + key: &M::Key, + column: S::Column, + value: &M::Value, + ) -> StorageResult<()> { + let key_encoder = KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes(); + let value = ValueCodec::encode_as_value(value); + storage.put(key_bytes.as_ref(), column, value) + } + + fn replace( + storage: &mut S, + key: &M::Key, + column: S::Column, + value: &M::Value, + ) -> StorageResult> { + let key_encoder = KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes(); + let value = ValueCodec::encode_as_value(value); + storage + .replace(key_bytes.as_ref(), column, value)? + .map(|value| { + ValueCodec::decode_from_value(value).map_err(StorageError::Codec) + }) + .transpose() + } + + fn take( + storage: &mut S, + key: &M::Key, + column: S::Column, + ) -> StorageResult> { + let key_encoder = KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes(); + storage + .take(key_bytes.as_ref(), column)? + .map(|value| { + ValueCodec::decode_from_value(value).map_err(StorageError::Codec) + }) + .transpose() + } + + fn delete(storage: &mut S, key: &M::Key, column: S::Column) -> StorageResult<()> { + let key_encoder = KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes(); + storage.delete(key_bytes.as_ref(), column) + } +} + +impl SupportsBatching for Plain +where + S: BatchOperations, + M: Mappable + TableWithBlueprint>, + M::Blueprint: Blueprint, +{ + fn init<'a, Iter>(storage: &mut S, column: S::Column, set: Iter) -> StorageResult<()> + where + Iter: 'a + Iterator, + M::Key: 'a, + M::Value: 'a, + { + Self::insert(storage, column, set) + } + + fn insert<'a, Iter>( + storage: &mut S, + column: S::Column, + set: Iter, + ) -> StorageResult<()> + where + Iter: 'a + Iterator, + M::Key: 'a, + M::Value: 'a, + { + storage.batch_write(&mut set.map(|(key, value)| { + let key_encoder = >::KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes().to_vec(); + let value = + >::ValueCodec::encode_as_value(value); + (key_bytes, column, WriteOperation::Insert(value)) + })) + } + + fn remove<'a, Iter>( + storage: &mut S, + column: S::Column, + set: Iter, + ) -> StorageResult<()> + where + Iter: 'a + Iterator, + M::Key: 'a, + { + storage.batch_write(&mut set.map(|key| { + let key_encoder = >::KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes().to_vec(); + (key_bytes, column, WriteOperation::Remove) + })) + } +} diff --git a/crates/storage/src/blueprint/sparse.rs b/crates/storage/src/blueprint/sparse.rs new file mode 100644 index 0000000000..3607bdd752 --- /dev/null +++ b/crates/storage/src/blueprint/sparse.rs @@ -0,0 +1,472 @@ +//! The module defines the `Sparse` blueprint for the storage. +//! The `Sparse` blueprint implements the sparse merkle tree on top of the storage. +//! It is like a [`Plain`](super::plain::Plain) blueprint that builds the sparse +//! merkle tree parallel to the normal storage and maintains it. + +use crate::{ + blueprint::{ + Blueprint, + SupportsBatching, + }, + codec::{ + Decode, + Encode, + Encoder, + }, + column::Column, + kv_store::{ + BatchOperations, + KeyValueStore, + StorageColumn, + WriteOperation, + }, + structured_storage::{ + StructuredStorage, + TableWithBlueprint, + }, + tables::merkle::SparseMerkleMetadata, + Error as StorageError, + Mappable, + MerkleRoot, + MerkleRootStorage, + Result as StorageResult, + StorageAsMut, + StorageInspect, + StorageMutate, +}; +use fuel_core_types::fuel_merkle::{ + sparse, + sparse::{ + in_memory, + MerkleTree, + MerkleTreeKey, + }, +}; +use itertools::Itertools; +use std::borrow::Cow; + +/// The trait that allows to convert the key of the table into the key of the metadata table. +/// If the key comprises several entities, it is possible to build a Merkle tree over different primary keys. +/// The trait defines the key over which to build an SMT. +pub trait PrimaryKey { + /// The storage key of the table. + type InputKey: ?Sized; + /// The extracted primary key. + type OutputKey: ?Sized; + + /// Converts the key of the table into the primary key of the metadata table. + fn primary_key(key: &Self::InputKey) -> &Self::OutputKey; +} + +/// The `Sparse` blueprint builds the storage as a [`Plain`](super::plain::Plain) +/// blueprint and maintains the sparse merkle tree by the `Metadata` and `Nodes` tables. +/// +/// It uses the `KeyCodec` and `ValueCodec` to encode/decode the key and value in the +/// same way as a plain blueprint. +/// +/// The `Metadata` table stores the metadata of the tree(like a root of the tree), +/// and the `Nodes` table stores the tree's nodes. The SMT is built over the encoded +/// keys and values using the same encoding as for main key-value pairs. +/// +/// The `KeyConverter` is used to convert the key of the table into the primary key of the metadata table. +pub struct Sparse { + _marker: + core::marker::PhantomData<(KeyCodec, ValueCodec, Metadata, Nodes, KeyConverter)>, +} + +impl + Sparse +where + Metadata: Mappable, + Nodes: Mappable< + Key = MerkleRoot, + Value = sparse::Primitive, + OwnedValue = sparse::Primitive, + >, +{ + fn insert_into_tree( + storage: &mut S, + key: &K, + key_bytes: &[u8], + value_bytes: &[u8], + ) -> StorageResult<()> + where + K: ?Sized, + for<'a> StructuredStorage<&'a mut S>: StorageMutate + + StorageMutate, + KeyConverter: PrimaryKey, + { + let mut storage = StructuredStorage::new(storage); + let primary_key = KeyConverter::primary_key(key); + // Get latest metadata entry for this `primary_key` + let prev_metadata: Cow = storage + .storage::() + .get(primary_key)? + .unwrap_or_default(); + + let root = prev_metadata.root; + let mut tree: MerkleTree = MerkleTree::load(&mut storage, &root) + .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; + + tree.update(MerkleTreeKey::new(key_bytes), value_bytes) + .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; + + // Generate new metadata for the updated tree + let root = tree.root(); + let metadata = SparseMerkleMetadata { root }; + storage + .storage::() + .insert(primary_key, &metadata)?; + Ok(()) + } + + fn remove_from_tree( + storage: &mut S, + key: &K, + key_bytes: &[u8], + ) -> StorageResult<()> + where + K: ?Sized, + for<'a> StructuredStorage<&'a mut S>: StorageMutate + + StorageMutate, + KeyConverter: PrimaryKey, + { + let mut storage = StructuredStorage::new(storage); + let primary_key = KeyConverter::primary_key(key); + // Get latest metadata entry for this `primary_key` + let prev_metadata: Option> = + storage.storage::().get(primary_key)?; + + if let Some(prev_metadata) = prev_metadata { + let root = prev_metadata.root; + + let mut tree: MerkleTree = MerkleTree::load(&mut storage, &root) + .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; + + tree.delete(MerkleTreeKey::new(key_bytes)) + .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; + + let root = tree.root(); + if &root == MerkleTree::::empty_root() { + // The tree is now empty; remove the metadata + storage.storage::().remove(primary_key)?; + } else { + // Generate new metadata for the updated tree + let metadata = SparseMerkleMetadata { root }; + storage + .storage::() + .insert(primary_key, &metadata)?; + } + } + + Ok(()) + } +} + +impl Blueprint + for Sparse +where + M: Mappable, + S: KeyValueStore, + KeyCodec: Encode + Decode, + ValueCodec: Encode + Decode, + Metadata: Mappable, + Nodes: Mappable< + Key = MerkleRoot, + Value = sparse::Primitive, + OwnedValue = sparse::Primitive, + >, + KeyConverter: PrimaryKey, + for<'a> StructuredStorage<&'a mut S>: StorageMutate + + StorageMutate, +{ + type KeyCodec = KeyCodec; + type ValueCodec = ValueCodec; + + fn put( + storage: &mut S, + key: &M::Key, + column: S::Column, + value: &M::Value, + ) -> StorageResult<()> { + let key_encoder = KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes(); + let value = ValueCodec::encode_as_value(value); + storage.put(key_bytes.as_ref(), column, value.clone())?; + Self::insert_into_tree(storage, key, key_bytes.as_ref(), value.as_ref()) + } + + fn replace( + storage: &mut S, + key: &M::Key, + column: S::Column, + value: &M::Value, + ) -> StorageResult> { + let key_encoder = KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes(); + let value = ValueCodec::encode_as_value(value); + let prev = storage + .replace(key_bytes.as_ref(), column, value.clone())? + .map(|value| { + ValueCodec::decode_from_value(value).map_err(StorageError::Codec) + }) + .transpose()?; + + Self::insert_into_tree(storage, key, key_bytes.as_ref(), value.as_ref())?; + Ok(prev) + } + + fn take( + storage: &mut S, + key: &M::Key, + column: S::Column, + ) -> StorageResult> { + let key_encoder = KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes(); + let prev = storage + .take(key_bytes.as_ref(), column)? + .map(|value| { + ValueCodec::decode_from_value(value).map_err(StorageError::Codec) + }) + .transpose()?; + Self::remove_from_tree(storage, key, key_bytes.as_ref())?; + Ok(prev) + } + + fn delete(storage: &mut S, key: &M::Key, column: S::Column) -> StorageResult<()> { + let key_encoder = KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes(); + storage.delete(key_bytes.as_ref(), column)?; + Self::remove_from_tree(storage, key, key_bytes.as_ref()) + } +} + +impl + MerkleRootStorage for StructuredStorage +where + S: KeyValueStore, + M: Mappable + + TableWithBlueprint< + Blueprint = Sparse, + >, + Self: StorageMutate + + StorageInspect, + Metadata: Mappable, + Metadata::Key: Sized, +{ + fn root(&self, key: &Metadata::Key) -> StorageResult { + use crate::StorageAsRef; + let metadata: Option> = + self.storage_as_ref::().get(key)?; + let root = metadata + .map(|metadata| metadata.root) + .unwrap_or_else(|| in_memory::MerkleTree::new().root()); + Ok(root) + } +} + +type NodeKeyCodec = + <::Blueprint as Blueprint>::KeyCodec; +type NodeValueCodec = + <::Blueprint as Blueprint>::ValueCodec; + +impl SupportsBatching + for Sparse +where + S: BatchOperations, + M: Mappable + + TableWithBlueprint< + Blueprint = Sparse, + >, + KeyCodec: Encode + Decode, + ValueCodec: Encode + Decode, + Metadata: Mappable, + Nodes: Mappable< + Key = MerkleRoot, + Value = sparse::Primitive, + OwnedValue = sparse::Primitive, + > + TableWithBlueprint, + KeyConverter: PrimaryKey, + Nodes::Blueprint: Blueprint, + for<'a> StructuredStorage<&'a mut S>: StorageMutate + + StorageMutate + + StorageMutate, +{ + fn init<'a, Iter>(storage: &mut S, column: S::Column, set: Iter) -> StorageResult<()> + where + Iter: 'a + Iterator, + M::Key: 'a, + M::Value: 'a, + { + let mut set = set.peekable(); + + let primary_key; + if let Some((key, _)) = set.peek() { + primary_key = KeyConverter::primary_key(*key); + } else { + return Ok(()) + } + + let mut storage = StructuredStorage::new(storage); + + if storage.storage::().contains_key(primary_key)? { + return Err(anyhow::anyhow!( + "The {} is already initialized", + M::column().name() + ) + .into()) + } + + let encoded_set = set + .map(|(key, value)| { + let key = KeyCodec::encode(key).as_bytes().into_owned(); + let value = ValueCodec::encode(value).as_bytes().into_owned(); + (key, value) + }) + .collect_vec(); + + let (root, nodes) = in_memory::MerkleTree::nodes_from_set( + encoded_set + .iter() + .map(|(key, value)| (MerkleTreeKey::new(key), value)), + ); + + storage.as_mut().batch_write( + &mut encoded_set + .into_iter() + .map(|(key, value)| (key, column, WriteOperation::Insert(value.into()))), + )?; + + let mut nodes = nodes.iter().map(|(key, value)| { + let key = NodeKeyCodec::::encode(key) + .as_bytes() + .into_owned(); + let value = NodeValueCodec::::encode_as_value(value); + (key, Nodes::column(), WriteOperation::Insert(value)) + }); + storage.as_mut().batch_write(&mut nodes)?; + + let metadata = SparseMerkleMetadata { root }; + storage + .storage::() + .insert(primary_key, &metadata)?; + + Ok(()) + } + + fn insert<'a, Iter>( + storage: &mut S, + column: S::Column, + set: Iter, + ) -> StorageResult<()> + where + Iter: 'a + Iterator, + M::Key: 'a, + M::Value: 'a, + { + let mut set = set.peekable(); + + let primary_key; + if let Some((key, _)) = set.peek() { + primary_key = KeyConverter::primary_key(*key); + } else { + return Ok(()) + } + + let mut storage = StructuredStorage::new(storage); + let prev_metadata: Cow = storage + .storage::() + .get(primary_key)? + .unwrap_or_default(); + + let root = prev_metadata.root; + let mut tree: MerkleTree = MerkleTree::load(&mut storage, &root) + .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; + + let encoded_set = set + .map(|(key, value)| { + let key = KeyCodec::encode(key).as_bytes().into_owned(); + let value = ValueCodec::encode(value).as_bytes().into_owned(); + (key, value) + }) + .collect_vec(); + + for (key_bytes, value_bytes) in encoded_set.iter() { + tree.update(MerkleTreeKey::new(key_bytes), value_bytes) + .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; + } + let root = tree.root(); + + storage.as_mut().batch_write( + &mut encoded_set + .into_iter() + .map(|(key, value)| (key, column, WriteOperation::Insert(value.into()))), + )?; + + // Generate new metadata for the updated tree + let metadata = SparseMerkleMetadata { root }; + storage + .storage::() + .insert(primary_key, &metadata)?; + + Ok(()) + } + + fn remove<'a, Iter>( + storage: &mut S, + column: S::Column, + set: Iter, + ) -> StorageResult<()> + where + Iter: 'a + Iterator, + M::Key: 'a, + { + let mut set = set.peekable(); + + let primary_key; + if let Some(key) = set.peek() { + primary_key = KeyConverter::primary_key(*key); + } else { + return Ok(()) + } + + let mut storage = StructuredStorage::new(storage); + let prev_metadata: Cow = storage + .storage::() + .get(primary_key)? + .unwrap_or_default(); + + let root = prev_metadata.root; + let mut tree: MerkleTree = MerkleTree::load(&mut storage, &root) + .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; + + let encoded_set = set + .map(|key| KeyCodec::encode(key).as_bytes().into_owned()) + .collect_vec(); + + for key_bytes in encoded_set.iter() { + tree.delete(MerkleTreeKey::new(key_bytes)) + .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; + } + let root = tree.root(); + + storage.as_mut().batch_write( + &mut encoded_set + .into_iter() + .map(|key| (key, column, WriteOperation::Remove)), + )?; + + if &root == MerkleTree::::empty_root() { + // The tree is now empty; remove the metadata + storage.storage::().remove(primary_key)?; + } else { + // Generate new metadata for the updated tree + let metadata = SparseMerkleMetadata { root }; + storage + .storage::() + .insert(primary_key, &metadata)?; + } + + Ok(()) + } +} diff --git a/crates/storage/src/codec.rs b/crates/storage/src/codec.rs new file mode 100644 index 0000000000..baf6a7ee7a --- /dev/null +++ b/crates/storage/src/codec.rs @@ -0,0 +1,65 @@ +//! The module contains the traits for encoding and decoding the types(a.k.a Codec). +//! It implements common codecs and encoders, but it is always possible to define own codecs. + +use crate::kv_store::Value; +use std::{ + borrow::Cow, + ops::Deref, +}; + +pub mod manual; +pub mod postcard; +pub mod primitive; +pub mod raw; + +/// The trait is usually implemented by the encoder that stores serialized objects. +pub trait Encoder { + /// Returns the serialized object as a slice. + fn as_bytes(&self) -> Cow<[u8]>; +} + +/// The trait encodes the type to the bytes and passes it to the `Encoder`, +/// which stores it and provides a reference to it. That allows gives more +/// flexibility and more performant encoding, allowing the use of slices and arrays +/// instead of vectors in some cases. Since the [`Encoder`] returns `Cow<[u8]>`, +/// it is always possible to take ownership of the serialized value. +pub trait Encode { + /// The encoder type that stores serialized object. + type Encoder<'a>: Encoder + where + T: 'a; + + /// Encodes the object to the bytes and passes it to the `Encoder`. + fn encode(t: &T) -> Self::Encoder<'_>; + + /// Returns the serialized object as an [`Value`]. + fn encode_as_value(t: &T) -> Value { + Value::new(Self::encode(t).as_bytes().into_owned()) + } +} + +/// The trait decodes the type from the bytes. +pub trait Decode { + /// Decodes the type `T` from the bytes. + fn decode(bytes: &[u8]) -> anyhow::Result; + + /// Decodes the type `T` from the [`Value`]. + fn decode_from_value(value: Value) -> anyhow::Result { + Self::decode(value.deref()) + } +} + +impl<'a> Encoder for Cow<'a, [u8]> { + fn as_bytes(&self) -> Cow<[u8]> { + match self { + Cow::Borrowed(borrowed) => Cow::Borrowed(borrowed), + Cow::Owned(owned) => Cow::Borrowed(owned.as_ref()), + } + } +} + +impl Encoder for [u8; SIZE] { + fn as_bytes(&self) -> Cow<[u8]> { + Cow::Borrowed(self.as_slice()) + } +} diff --git a/crates/storage/src/codec/manual.rs b/crates/storage/src/codec/manual.rs new file mode 100644 index 0000000000..34a93566cd --- /dev/null +++ b/crates/storage/src/codec/manual.rs @@ -0,0 +1,50 @@ +//! The module contains the implementation of the `Manual` codec. +//! The codec allows the definition of manual implementation for specific +//! types that don't follow any patterns from other codecs. Anyone can implement +//! a codec like that, and it's more of an example of how it can be done for foreign types. + +use crate::codec::{ + Decode, + Encode, +}; +use fuel_core_types::fuel_vm::{ + ContractsAssetKey, + ContractsStateKey, +}; +use std::borrow::Cow; + +/// The codec allows the definition of manual implementation for specific type `T`. +pub struct Manual(core::marker::PhantomData); + +// TODO: Use `Raw` instead of `Manual` for `ContractsAssetKey`, `ContractsStateKey`, and `OwnedMessageKey` +// when `double_key` macro will generate `TryFrom<&[u8]>` implementation. + +impl Encode for Manual { + type Encoder<'a> = Cow<'a, [u8]>; + + fn encode(t: &ContractsAssetKey) -> Self::Encoder<'_> { + Cow::Borrowed(t.as_ref()) + } +} + +impl Decode for Manual { + fn decode(bytes: &[u8]) -> anyhow::Result { + ContractsAssetKey::from_slice(bytes) + .map_err(|_| anyhow::anyhow!("Unable to decode bytes")) + } +} + +impl Encode for Manual { + type Encoder<'a> = Cow<'a, [u8]>; + + fn encode(t: &ContractsStateKey) -> Self::Encoder<'_> { + Cow::Borrowed(t.as_ref()) + } +} + +impl Decode for Manual { + fn decode(bytes: &[u8]) -> anyhow::Result { + ContractsStateKey::from_slice(bytes) + .map_err(|_| anyhow::anyhow!("Unable to decode bytes")) + } +} diff --git a/crates/storage/src/codec/postcard.rs b/crates/storage/src/codec/postcard.rs new file mode 100644 index 0000000000..4b1284afcf --- /dev/null +++ b/crates/storage/src/codec/postcard.rs @@ -0,0 +1,36 @@ +//! The module contains the implementation of the `Postcard` codec. +//! Any type that implements `serde::Serialize` and `serde::Deserialize` +//! can use the `Postcard` codec to be encoded/decoded into/from bytes. +//! The `serde` serialization and deserialization add their own overhead, +//! so this codec shouldn't be used for simple types. + +use crate::codec::{ + Decode, + Encode, +}; +use std::borrow::Cow; + +/// The codec is used to serialized/deserialized types that supports `serde::Serialize` and `serde::Deserialize`. +pub struct Postcard; + +impl Encode for Postcard +where + T: ?Sized + serde::Serialize, +{ + type Encoder<'a> = Cow<'a, [u8]> where T: 'a; + + fn encode(value: &T) -> Self::Encoder<'_> { + Cow::Owned(postcard::to_allocvec(value).expect( + "It should be impossible to fail unless serialization is not implemented, which is not true for our types.", + )) + } +} + +impl Decode for Postcard +where + T: serde::de::DeserializeOwned, +{ + fn decode(bytes: &[u8]) -> anyhow::Result { + Ok(postcard::from_bytes(bytes)?) + } +} diff --git a/crates/storage/src/codec/primitive.rs b/crates/storage/src/codec/primitive.rs new file mode 100644 index 0000000000..4f39ddb982 --- /dev/null +++ b/crates/storage/src/codec/primitive.rs @@ -0,0 +1,100 @@ +//! The module contains the implementation of the `Postcard` codec. +//! The codec is used for types that can be represented by an array. +//! It includes all primitive types and types that are arrays inside +//! or could be represented by arrays. + +use crate::codec::{ + Decode, + Encode, +}; +use fuel_core_types::{ + blockchain::primitives::DaBlockHeight, + fuel_tx::{ + TxId, + UtxoId, + }, + fuel_types::BlockHeight, +}; + +/// The codec is used for types that can be represented by an array. +/// The `SIZE` const specifies the size of the array used to represent the type. +pub struct Primitive; + +macro_rules! impl_encode { + ($($ty:ty, $size:expr),*) => { + $( + impl Encode<$ty> for Primitive<{ $size }> { + type Encoder<'a> = [u8; { $size }]; + + fn encode(t: &$ty) -> Self::Encoder<'_> { + t.to_be_bytes() + } + } + )* + }; +} +macro_rules! impl_decode { + ($($ty:ty, $size:expr),*) => { + $( + impl Decode<$ty> for Primitive<{ $size }> { + fn decode(bytes: &[u8]) -> anyhow::Result<$ty> { + Ok(<$ty>::from_be_bytes(<[u8; { $size }]>::try_from(bytes)?)) + } + } + )* + }; +} + +impl_encode! { + u8, 1, + u16, 2, + u32, 4, + BlockHeight, 4, + DaBlockHeight, 8, + u64, 8, + u128, 16 +} + +impl_decode! { + u8, 1, + u16, 2, + u32, 4, + u64, 8, + u128, 16 +} + +impl Decode for Primitive<4> { + fn decode(bytes: &[u8]) -> anyhow::Result { + Ok(BlockHeight::from(<[u8; 4]>::try_from(bytes)?)) + } +} + +impl Decode for Primitive<8> { + fn decode(bytes: &[u8]) -> anyhow::Result { + Ok(DaBlockHeight::from(<[u8; 8]>::try_from(bytes)?)) + } +} + +/// Converts the `UtxoId` into an array of bytes. +pub fn utxo_id_to_bytes(utxo_id: &UtxoId) -> [u8; TxId::LEN + 1] { + let mut default = [0; TxId::LEN + 1]; + default[0..TxId::LEN].copy_from_slice(utxo_id.tx_id().as_ref()); + default[TxId::LEN] = utxo_id.output_index(); + default +} + +impl Encode for Primitive<{ TxId::LEN + 1 }> { + type Encoder<'a> = [u8; TxId::LEN + 1]; + + fn encode(t: &UtxoId) -> Self::Encoder<'_> { + utxo_id_to_bytes(t) + } +} + +impl Decode for Primitive<{ TxId::LEN + 1 }> { + fn decode(bytes: &[u8]) -> anyhow::Result { + let bytes = <[u8; TxId::LEN + 1]>::try_from(bytes)?; + let tx_id: [u8; TxId::LEN] = bytes[0..TxId::LEN].try_into()?; + Ok(UtxoId::new(TxId::from(tx_id), bytes[TxId::LEN])) + } +} diff --git a/crates/storage/src/codec/raw.rs b/crates/storage/src/codec/raw.rs new file mode 100644 index 0000000000..fba697c2ae --- /dev/null +++ b/crates/storage/src/codec/raw.rs @@ -0,0 +1,32 @@ +//! The module contains the implementation of the `Raw` codec. +//! The codec is used for types that are already represented by bytes +//! and can be deserialized into bytes-based objects. + +use crate::codec::{ + Decode, + Encode, +}; +use std::borrow::Cow; + +/// The codec is used for types that are already represented by bytes. +pub struct Raw; + +impl Encode for Raw +where + T: ?Sized + AsRef<[u8]>, +{ + type Encoder<'a> = Cow<'a, [u8]> where T: 'a; + + fn encode(t: &T) -> Self::Encoder<'_> { + Cow::Borrowed(t.as_ref()) + } +} + +impl Decode for Raw +where + for<'a> T: TryFrom<&'a [u8]>, +{ + fn decode(bytes: &[u8]) -> anyhow::Result { + T::try_from(bytes).map_err(|_| anyhow::anyhow!("Unable to decode bytes")) + } +} diff --git a/crates/storage/src/column.rs b/crates/storage/src/column.rs new file mode 100644 index 0000000000..45d4cbc11e --- /dev/null +++ b/crates/storage/src/column.rs @@ -0,0 +1,191 @@ +//! The module defines the `Column` and default tables used by the current `fuel-core` codebase. +//! In the future, the `Column` enum should contain only the required tables for the execution. +//! All other tables should live in the downstream creates in the place where they are really used. + +use crate::kv_store::StorageColumn; + +/// Helper macro to generate the `Column` enum and its implementation for `as_u32` method. +macro_rules! column_definition { + ($(#[$meta:meta])* $vis:vis enum $name:ident { + $(#[$complex_meta:meta])* $complex_variants:ident($body:ident), + $($(#[$const_meta:meta])* $const_variants:ident = $const_number:expr,)* + }) => { + $(#[$meta])* + $vis enum $name { + $($(#[$const_meta])* $const_variants = $const_number,)* + $(#[$complex_meta])* $complex_variants($body), + } + + impl $name { + /// Returns the `u32` representation of the `Self`. + pub fn as_u32(&self) -> u32 { + match self { + $($name::$const_variants => $const_number,)* + $name::$complex_variants(foreign) => foreign.id, + } + } + } + } +} + +column_definition! { + /// Database tables column ids to the corresponding [`crate::Mappable`] table. + #[repr(u32)] + #[derive( + Copy, + Clone, + Debug, + strum_macros::EnumCount, + strum_macros::IntoStaticStr, + PartialEq, + Eq, + enum_iterator::Sequence, + Hash, + )] + pub enum Column { + /// The foreign column is not related to the required tables. + ForeignColumn(ForeignColumn), + + // Tables that are required for the state transition and fraud proving. + + /// See [`ContractsRawCode`](crate::tables::ContractsRawCode) + ContractsRawCode = 0, + /// See [`ContractsInfo`](crate::tables::ContractsInfo) + ContractsInfo = 1, + /// See [`ContractsState`](crate::tables::ContractsState) + ContractsState = 2, + /// See [`ContractsLatestUtxo`](crate::tables::ContractsLatestUtxo) + ContractsLatestUtxo = 3, + /// See [`ContractsAssets`](crate::tables::ContractsAssets) + ContractsAssets = 4, + /// See [`Coins`](crate::tables::Coins) + Coins = 5, + /// See [`Transactions`](crate::tables::Transactions) + Transactions = 6, + /// See [`FuelBlocks`](crate::tables::FuelBlocks) + FuelBlocks = 7, + /// See [`FuelBlockMerkleData`](crate::tables::merkle::FuelBlockMerkleData) + FuelBlockMerkleData = 8, + /// See [`FuelBlockMerkleMetadata`](crate::tables::merkle::FuelBlockMerkleMetadata) + FuelBlockMerkleMetadata = 9, + /// Messages that have been spent. + /// Existence of a key in this column means that the message has been spent. + /// See [`SpentMessages`](crate::tables::SpentMessages) + SpentMessages = 10, + /// See [`ContractsAssetsMerkleData`](crate::tables::merkle::ContractsAssetsMerkleData) + ContractsAssetsMerkleData = 11, + /// See [`ContractsAssetsMerkleMetadata`](crate::tables::merkle::ContractsAssetsMerkleMetadata) + ContractsAssetsMerkleMetadata = 12, + /// See [`ContractsStateMerkleData`](crate::tables::merkle::ContractsStateMerkleData) + ContractsStateMerkleData = 13, + /// See [`ContractsStateMerkleMetadata`](crate::tables::merkle::ContractsStateMerkleMetadata) + ContractsStateMerkleMetadata = 14, + /// See [`Messages`](crate::tables::Messages) + Messages = 15, + /// See [`ProcessedTransactions`](crate::tables::ProcessedTransactions) + ProcessedTransactions = 16, + + // TODO: Extract the columns below into a separate enum to not mix + // required columns and non-required columns. It will break `MemoryStore` + // and `MemoryTransactionView` because they rely on linear index incrementation. + + // Below are the tables used for p2p, block production, starting the node. + + /// The column id of metadata about the blockchain + Metadata = 17, + /// See [`Receipts`](crate::tables::Receipts) + Receipts = 18, + /// See `FuelBlockSecondaryKeyBlockHeights` + FuelBlockSecondaryKeyBlockHeights = 19, + /// See [`SealedBlockConsensus`](crate::tables::SealedBlockConsensus) + FuelBlockConsensus = 20, + /// Metadata for the relayer + /// See `RelayerMetadata` + RelayerMetadata = 21, + + // Below are not required tables. They are used for API and may be removed or moved to another place in the future. + + /// The column of the table that stores `true` if `owner` owns `Coin` with `coin_id` + OwnedCoins = 22, + /// Transaction id to current status + TransactionStatus = 23, + /// The column of the table of all `owner`'s transactions + TransactionsByOwnerBlockIdx = 24, + /// The column of the table that stores `true` if `owner` owns `Message` with `message_id` + OwnedMessageIds = 25, + } +} + +impl Column { + /// The total count of variants in the enum. + pub const COUNT: usize = ::COUNT; + + /// Returns the `usize` representation of the `Column`. + pub fn as_usize(&self) -> usize { + self.as_u32() as usize + } +} + +impl StorageColumn for Column { + fn name(&self) -> &'static str { + match self { + Column::ForeignColumn(foreign) => foreign.name, + variant => variant.into(), + } + } + + fn id(&self) -> u32 { + self.as_u32() + } +} + +/// The foreign column is not related to the required tables. +/// It can be used to extend the database with additional tables. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct ForeignColumn { + id: u32, + name: &'static str, +} + +impl ForeignColumn { + /// Creates the foreign column ensuring that the id and name + /// are not already used by the [`Column`] required tables. + pub fn new(id: u32, name: &'static str) -> anyhow::Result { + for column in enum_iterator::all::() { + if column.id() == id { + anyhow::bail!("Column id {} is already used by {}", id, column.name()); + } + if column.name() == name { + anyhow::bail!( + "Column name {} is already used by {}", + name, + column.name() + ); + } + } + Ok(Self { id, name }) + } +} + +/// It is required to implement iteration over the variants of the enum. +/// The `ForeignColumn` is not iterable, so we implement the `Sequence` trait +/// to do nothing. +impl enum_iterator::Sequence for ForeignColumn { + const CARDINALITY: usize = 0; + + fn next(&self) -> Option { + None + } + + fn previous(&self) -> Option { + None + } + + fn first() -> Option { + None + } + + fn last() -> Option { + None + } +} diff --git a/crates/storage/src/kv_store.rs b/crates/storage/src/kv_store.rs index 430d50f426..5d6154684d 100644 --- a/crates/storage/src/kv_store.rs +++ b/crates/storage/src/kv_store.rs @@ -20,7 +20,10 @@ pub trait StorageColumn: Clone { fn id(&self) -> u32; } +// TODO: Use `&mut self` for all mutable methods. +// It requires refactoring of all services because right now, most of them work with `&self` storage. /// The definition of the key-value store. +#[impl_tools::autoimpl(for &T, &mut T, Box, Arc)] pub trait KeyValueStore { /// The type of the column. type Column: StorageColumn; @@ -107,12 +110,15 @@ pub enum WriteOperation { } /// The definition of the key-value store with batch operations. +#[impl_tools::autoimpl(for &T, &mut T, Box, Arc)] pub trait BatchOperations: KeyValueStore { /// Writes the batch of the entries into the storage. + // TODO: Replace `dyn Iterator` with a generic iterator when `Database` will not use `dyn BatchOperations`. fn batch_write( &self, entries: &mut dyn Iterator, Self::Column, WriteOperation)>, ) -> StorageResult<()> { + // TODO: Optimize implementation for in-memory storages. for (key, column, op) in entries { match op { WriteOperation::Insert(value) => { diff --git a/crates/storage/src/lib.rs b/crates/storage/src/lib.rs index e6a345a1ce..9a6d6ba832 100644 --- a/crates/storage/src/lib.rs +++ b/crates/storage/src/lib.rs @@ -21,8 +21,12 @@ pub use fuel_vm_private::{ }, }; +pub mod blueprint; +pub mod codec; +pub mod column; pub mod iter; pub mod kv_store; +pub mod structured_storage; pub mod tables; #[cfg(feature = "test-helpers")] pub mod test_helpers; @@ -33,6 +37,11 @@ pub use fuel_vm_private::storage::{ ContractsAssetKey, ContractsStateKey, }; +#[doc(hidden)] +pub use paste; +#[cfg(feature = "test-helpers")] +#[doc(hidden)] +pub use rand; /// The storage result alias. pub type Result = core::result::Result; @@ -42,8 +51,8 @@ pub type Result = core::result::Result; /// Error occurring during interaction with storage pub enum Error { /// Error occurred during serialization or deserialization of the entity. - #[display(fmt = "error performing serialization or deserialization")] - Codec, + #[display(fmt = "error performing serialization or deserialization `{_0}`")] + Codec(anyhow::Error), /// Error occurred during interaction with database. #[display(fmt = "error occurred in the underlying datastore `{_0:?}`")] DatabaseError(Box), @@ -107,6 +116,35 @@ impl IsNotFound for Result { } } +/// The traits allow work with the storage in batches. +/// Some implementations can perform batch operations faster than one by one. +pub trait StorageBatchMutate: StorageMutate { + /// Initialize the storage with batch insertion. This method is more performant than + /// [`Self::insert_batch`] in some cases. + /// + /// # Errors + /// + /// Returns an error if the storage is already initialized. + fn init_storage<'a, Iter>(&mut self, set: Iter) -> Result<()> + where + Iter: 'a + Iterator, + Type::Key: 'a, + Type::Value: 'a; + + /// Inserts the key-value pair into the storage in batch. + fn insert_batch<'a, Iter>(&mut self, set: Iter) -> Result<()> + where + Iter: 'a + Iterator, + Type::Key: 'a, + Type::Value: 'a; + + /// Removes the key-value pairs from the storage in batch. + fn remove_batch<'a, Iter>(&mut self, set: Iter) -> Result<()> + where + Iter: 'a + Iterator, + Type::Key: 'a; +} + /// Creates `StorageError::NotFound` error with file and line information inside. /// /// # Examples diff --git a/crates/storage/src/structured_storage.rs b/crates/storage/src/structured_storage.rs new file mode 100644 index 0000000000..04076644ce --- /dev/null +++ b/crates/storage/src/structured_storage.rs @@ -0,0 +1,663 @@ +//! The module contains the [`StructuredStorage`] wrapper around the key-value storage +//! that implements the storage traits for the tables with blueprint. + +use crate::{ + blueprint::{ + Blueprint, + SupportsBatching, + }, + column::Column, + kv_store::{ + BatchOperations, + KeyValueStore, + }, + Error as StorageError, + Mappable, + StorageBatchMutate, + StorageInspect, + StorageMutate, + StorageSize, +}; +use std::borrow::Cow; + +pub mod balances; +pub mod blocks; +pub mod coins; +pub mod contracts; +pub mod merkle_data; +pub mod messages; +pub mod receipts; +pub mod sealed_block; +pub mod state; +pub mod transactions; + +/// The table can implement this trait to indicate that it has a blueprint. +/// It inherits the default implementation of the storage traits through the [`StructuredStorage`] +/// for the table. +pub trait TableWithBlueprint: Mappable + Sized { + /// The type of the blueprint used by the table. + type Blueprint; + + /// The column occupied by the table. + fn column() -> Column; +} + +/// The wrapper around the key-value storage that implements the storage traits for the tables +/// with blueprint. +#[derive(Clone, Debug)] +pub struct StructuredStorage { + pub(crate) storage: S, +} + +impl StructuredStorage { + /// Creates a new instance of the structured storage. + pub fn new(storage: S) -> Self { + Self { storage } + } +} + +impl AsRef for StructuredStorage { + fn as_ref(&self) -> &S { + &self.storage + } +} + +impl AsMut for StructuredStorage { + fn as_mut(&mut self) -> &mut S { + &mut self.storage + } +} + +impl StorageInspect for StructuredStorage +where + S: KeyValueStore, + M: Mappable + TableWithBlueprint, + M::Blueprint: Blueprint, +{ + type Error = StorageError; + + fn get(&self, key: &M::Key) -> Result>, Self::Error> { + ::Blueprint::get(&self.storage, key, M::column()) + .map(|value| value.map(Cow::Owned)) + } + + fn contains_key(&self, key: &M::Key) -> Result { + ::Blueprint::exists(&self.storage, key, M::column()) + } +} + +impl StorageMutate for StructuredStorage +where + S: KeyValueStore, + M: Mappable + TableWithBlueprint, + M::Blueprint: Blueprint, +{ + fn insert( + &mut self, + key: &M::Key, + value: &M::Value, + ) -> Result, Self::Error> { + ::Blueprint::replace( + &mut self.storage, + key, + M::column(), + value, + ) + } + + fn remove(&mut self, key: &M::Key) -> Result, Self::Error> { + ::Blueprint::take(&mut self.storage, key, M::column()) + } +} + +impl StorageSize for StructuredStorage +where + S: KeyValueStore, + M: Mappable + TableWithBlueprint, + M::Blueprint: Blueprint, +{ + fn size_of_value(&self, key: &M::Key) -> Result, Self::Error> { + ::Blueprint::size_of_value( + &self.storage, + key, + M::column(), + ) + } +} + +impl StorageBatchMutate for StructuredStorage +where + S: BatchOperations, + M: Mappable + TableWithBlueprint, + M::Blueprint: SupportsBatching, +{ + fn init_storage<'a, Iter>(&mut self, set: Iter) -> Result<(), Self::Error> + where + Iter: 'a + Iterator, + M::Key: 'a, + M::Value: 'a, + { + ::Blueprint::init(&mut self.storage, M::column(), set) + } + + fn insert_batch<'a, Iter>(&mut self, set: Iter) -> Result<(), Self::Error> + where + Iter: 'a + Iterator, + M::Key: 'a, + M::Value: 'a, + { + ::Blueprint::insert(&mut self.storage, M::column(), set) + } + + fn remove_batch<'a, Iter>(&mut self, set: Iter) -> Result<(), Self::Error> + where + Iter: 'a + Iterator, + M::Key: 'a, + { + ::Blueprint::remove(&mut self.storage, M::column(), set) + } +} + +/// The module that provides helper macros for testing the structured storage. +#[cfg(feature = "test-helpers")] +pub mod test { + use crate as fuel_core_storage; + use fuel_core_storage::{ + column::Column, + kv_store::{ + BatchOperations, + KeyValueStore, + Value, + }, + Result as StorageResult, + }; + use std::{ + cell::RefCell, + collections::HashMap, + }; + + type Storage = RefCell), Vec>>; + + /// The in-memory storage for testing purposes. + #[derive(Default, Debug, PartialEq, Eq)] + pub struct InMemoryStorage { + storage: Storage, + } + + impl KeyValueStore for InMemoryStorage { + type Column = Column; + + fn write( + &self, + key: &[u8], + column: Self::Column, + buf: &[u8], + ) -> StorageResult { + let write = buf.len(); + self.storage + .borrow_mut() + .insert((column, key.to_vec()), buf.to_vec()); + Ok(write) + } + + fn delete(&self, key: &[u8], column: Self::Column) -> StorageResult<()> { + self.storage.borrow_mut().remove(&(column, key.to_vec())); + Ok(()) + } + + fn get(&self, key: &[u8], column: Self::Column) -> StorageResult> { + Ok(self + .storage + .borrow_mut() + .get(&(column, key.to_vec())) + .map(|v| v.clone().into())) + } + } + + impl BatchOperations for InMemoryStorage {} + + /// The macro that generates basic storage tests for the table with [`InMemoryStorage`]. + #[macro_export] + macro_rules! basic_storage_tests { + ($table:ident, $key:expr, $value_insert:expr, $value_return:expr, $random_key:expr) => { + $crate::paste::item! { + #[cfg(test)] + #[allow(unused_imports)] + mod [< $table:snake _basic_tests >] { + use super::*; + use $crate::{ + structured_storage::{ + test::InMemoryStorage, + StructuredStorage, + }, + StorageAsMut, + }; + use $crate::StorageInspect; + use $crate::StorageMutate; + use $crate::rand; + + #[allow(dead_code)] + fn random(rng: &mut R) -> T + where + rand::distributions::Standard: rand::distributions::Distribution, + R: rand::Rng, + { + use rand::Rng; + rng.gen() + } + + #[test] + fn get() { + let mut storage = InMemoryStorage::default(); + let mut structured_storage = StructuredStorage::new(&mut storage); + let key = $key; + + structured_storage + .storage_as_mut::<$table>() + .insert(&key, &$value_insert) + .unwrap(); + + assert_eq!( + structured_storage + .storage_as_mut::<$table>() + .get(&key) + .expect("Should get without errors") + .expect("Should not be empty") + .into_owned(), + $value_return + ); + } + + #[test] + fn insert() { + let mut storage = InMemoryStorage::default(); + let mut structured_storage = StructuredStorage::new(&mut storage); + let key = $key; + + structured_storage + .storage_as_mut::<$table>() + .insert(&key, &$value_insert) + .unwrap(); + + let returned = structured_storage + .storage_as_mut::<$table>() + .get(&key) + .unwrap() + .unwrap() + .into_owned(); + assert_eq!(returned, $value_return); + } + + #[test] + fn remove() { + let mut storage = InMemoryStorage::default(); + let mut structured_storage = StructuredStorage::new(&mut storage); + let key = $key; + + structured_storage + .storage_as_mut::<$table>() + .insert(&key, &$value_insert) + .unwrap(); + + structured_storage.storage_as_mut::<$table>().remove(&key).unwrap(); + + assert!(!structured_storage + .storage_as_mut::<$table>() + .contains_key(&key) + .unwrap()); + } + + #[test] + fn exists() { + let mut storage = InMemoryStorage::default(); + let mut structured_storage = StructuredStorage::new(&mut storage); + let key = $key; + + // Given + assert!(!structured_storage + .storage_as_mut::<$table>() + .contains_key(&key) + .unwrap()); + + // When + structured_storage + .storage_as_mut::<$table>() + .insert(&key, &$value_insert) + .unwrap(); + + // Then + assert!(structured_storage + .storage_as_mut::<$table>() + .contains_key(&key) + .unwrap()); + } + + #[test] + fn exists_false_after_removing() { + let mut storage = InMemoryStorage::default(); + let mut structured_storage = StructuredStorage::new(&mut storage); + let key = $key; + + // Given + structured_storage + .storage_as_mut::<$table>() + .insert(&key, &$value_insert) + .unwrap(); + + // When + structured_storage + .storage_as_mut::<$table>() + .remove(&key) + .unwrap(); + + // Then + assert!(!structured_storage + .storage_as_mut::<$table>() + .contains_key(&key) + .unwrap()); + } + + #[test] + fn batch_mutate_works() { + use $crate::rand::{ + Rng, + rngs::StdRng, + RngCore, + SeedableRng, + }; + + let empty_storage = InMemoryStorage::default(); + + let mut init_storage = InMemoryStorage::default(); + let mut init_structured_storage = StructuredStorage::new(&mut init_storage); + + let mut rng = &mut StdRng::seed_from_u64(1234); + let gen = || Some($random_key(&mut rng)); + let data = core::iter::from_fn(gen).take(5_000).collect::>(); + let value = $value_insert; + + <_ as $crate::StorageBatchMutate<$table>>::init_storage( + &mut init_structured_storage, + &mut data.iter().map(|k| { + let value: &<$table as $crate::Mappable>::Value = &value; + (k, value) + }) + ).expect("Should initialize the storage successfully"); + + let mut insert_storage = InMemoryStorage::default(); + let mut insert_structured_storage = StructuredStorage::new(&mut insert_storage); + + <_ as $crate::StorageBatchMutate<$table>>::insert_batch( + &mut insert_structured_storage, + &mut data.iter().map(|k| { + let value: &<$table as $crate::Mappable>::Value = &value; + (k, value) + }) + ).expect("Should insert batch successfully"); + + assert_eq!(init_storage, insert_storage); + assert_ne!(init_storage, empty_storage); + assert_ne!(insert_storage, empty_storage); + + let mut remove_from_insert_structured_storage = StructuredStorage::new(&mut insert_storage); + <_ as $crate::StorageBatchMutate<$table>>::remove_batch( + &mut remove_from_insert_structured_storage, + &mut data.iter() + ).expect("Should remove all entries successfully from insert storage"); + assert_ne!(init_storage, insert_storage); + assert_eq!(insert_storage, empty_storage); + + let mut remove_from_init_structured_storage = StructuredStorage::new(&mut init_storage); + <_ as $crate::StorageBatchMutate<$table>>::remove_batch( + &mut remove_from_init_structured_storage, + &mut data.iter() + ).expect("Should remove all entries successfully from init storage"); + assert_eq!(init_storage, insert_storage); + assert_eq!(init_storage, empty_storage); + } + }} + }; + ($table:ident, $key:expr, $value_insert:expr, $value_return:expr) => { + $crate::basic_storage_tests!($table, $key, $value_insert, $value_return, random); + }; + ($table:ident, $key:expr, $value:expr) => { + $crate::basic_storage_tests!($table, $key, $value, $value); + }; + } + + /// The macro that generates SMT storage tests for the table with [`InMemoryStorage`]. + #[macro_export] + macro_rules! root_storage_tests { + ($table:ident, $metadata_table:ident, $current_key:expr, $foreign_key:expr, $generate_key:ident, $generate_value:ident) => { + paste::item! { + #[cfg(test)] + mod [< $table:snake _root_tests >] { + use super::*; + use $crate::{ + structured_storage::{ + test::InMemoryStorage, + StructuredStorage, + }, + StorageAsMut, + }; + use $crate::rand::{ + rngs::StdRng, + SeedableRng, + }; + + #[test] + fn root() { + let mut storage = InMemoryStorage::default(); + let mut structured_storage = StructuredStorage::new(&mut storage); + + let rng = &mut StdRng::seed_from_u64(1234); + let key = $generate_key(&$current_key, rng); + let value = $generate_value(rng); + structured_storage.storage_as_mut::<$table>().insert(&key, &value) + .unwrap(); + + let root = structured_storage.storage_as_mut::<$table>().root(&$current_key); + assert!(root.is_ok()) + } + + #[test] + fn root_returns_empty_root_for_empty_metadata() { + let mut storage = InMemoryStorage::default(); + let mut structured_storage = StructuredStorage::new(&mut storage); + + let empty_root = fuel_core_types::fuel_merkle::sparse::in_memory::MerkleTree::new().root(); + let root = structured_storage + .storage_as_mut::<$table>() + .root(&$current_key) + .unwrap(); + assert_eq!(root, empty_root) + } + + #[test] + fn put_updates_the_state_merkle_root_for_the_given_metadata() { + let mut storage = InMemoryStorage::default(); + let mut structured_storage = StructuredStorage::new(&mut storage); + + let rng = &mut StdRng::seed_from_u64(1234); + let key = $generate_key(&$current_key, rng); + let state = $generate_value(rng); + + // Write the first contract state + structured_storage + .storage_as_mut::<$table>() + .insert(&key, &state) + .unwrap(); + + // Read the first Merkle root + let root_1 = structured_storage + .storage_as_mut::<$table>() + .root(&$current_key) + .unwrap(); + + // Write the second contract state + let key = $generate_key(&$current_key, rng); + let state = $generate_value(rng); + structured_storage + .storage_as_mut::<$table>() + .insert(&key, &state) + .unwrap(); + + // Read the second Merkle root + let root_2 = structured_storage + .storage_as_mut::<$table>() + .root(&$current_key) + .unwrap(); + + assert_ne!(root_1, root_2); + } + + #[test] + fn remove_updates_the_state_merkle_root_for_the_given_metadata() { + let mut storage = InMemoryStorage::default(); + let mut structured_storage = StructuredStorage::new(&mut storage); + + let rng = &mut StdRng::seed_from_u64(1234); + + // Write the first contract state + let first_key = $generate_key(&$current_key, rng); + let first_state = $generate_value(rng); + structured_storage + .storage_as_mut::<$table>() + .insert(&first_key, &first_state) + .unwrap(); + let root_0 = structured_storage + .storage_as_mut::<$table>() + .root(&$current_key) + .unwrap(); + + // Write the second contract state + let second_key = $generate_key(&$current_key, rng); + let second_state = $generate_value(rng); + structured_storage + .storage_as_mut::<$table>() + .insert(&second_key, &second_state) + .unwrap(); + + // Read the first Merkle root + let root_1 = structured_storage + .storage_as_mut::<$table>() + .root(&$current_key) + .unwrap(); + + // Remove the second contract state + structured_storage.storage_as_mut::<$table>().remove(&second_key).unwrap(); + + // Read the second Merkle root + let root_2 = structured_storage + .storage_as_mut::<$table>() + .root(&$current_key) + .unwrap(); + + assert_ne!(root_1, root_2); + assert_eq!(root_0, root_2); + } + + #[test] + fn updating_foreign_metadata_does_not_affect_the_given_metadata_insertion() { + let given_primary_key = $current_key; + let foreign_primary_key = $foreign_key; + + let mut storage = InMemoryStorage::default(); + let mut structured_storage = StructuredStorage::new(&mut storage); + + let rng = &mut StdRng::seed_from_u64(1234); + + let state_value = $generate_value(rng); + + // Given + let given_key = $generate_key(&given_primary_key, rng); + let foreign_key = $generate_key(&foreign_primary_key, rng); + structured_storage + .storage_as_mut::<$table>() + .insert(&given_key, &state_value) + .unwrap(); + + // When + structured_storage + .storage_as_mut::<$table>() + .insert(&foreign_key, &state_value) + .unwrap(); + structured_storage + .storage_as_mut::<$table>() + .remove(&foreign_key) + .unwrap(); + + // Then + let result = structured_storage + .storage_as_mut::<$table>() + .insert(&given_key, &state_value) + .unwrap(); + + assert!(result.is_some()); + } + + #[test] + fn put_creates_merkle_metadata_when_empty() { + let mut storage = InMemoryStorage::default(); + let mut structured_storage = StructuredStorage::new(&mut storage); + + let rng = &mut StdRng::seed_from_u64(1234); + + // Given + let key = $generate_key(&$current_key, rng); + let state = $generate_value(rng); + + // Write a contract state + structured_storage + .storage_as_mut::<$table>() + .insert(&key, &state) + .unwrap(); + + // Read the Merkle metadata + let metadata = structured_storage + .storage_as_mut::<$metadata_table>() + .get(&$current_key) + .unwrap(); + + assert!(metadata.is_some()); + } + + #[test] + fn remove_deletes_merkle_metadata_when_empty() { + let mut storage = InMemoryStorage::default(); + let mut structured_storage = StructuredStorage::new(&mut storage); + + let rng = &mut StdRng::seed_from_u64(1234); + + // Given + let key = $generate_key(&$current_key, rng); + let state = $generate_value(rng); + + // Write a contract state + structured_storage + .storage_as_mut::<$table>() + .insert(&key, &state) + .unwrap(); + + // Read the Merkle metadata + structured_storage + .storage_as_mut::<$metadata_table>() + .get(&$current_key) + .unwrap() + .expect("Expected Merkle metadata to be present"); + + // Remove the contract asset + structured_storage.storage_as_mut::<$table>().remove(&key).unwrap(); + + // Read the Merkle metadata + let metadata = structured_storage + .storage_as_mut::<$metadata_table>() + .get(&$current_key) + .unwrap(); + + assert!(metadata.is_none()); + } + }} + }; + } +} diff --git a/crates/storage/src/structured_storage/balances.rs b/crates/storage/src/structured_storage/balances.rs new file mode 100644 index 0000000000..2bd9019e9c --- /dev/null +++ b/crates/storage/src/structured_storage/balances.rs @@ -0,0 +1,91 @@ +//! The module contains implementations and tests for the `ContractsAssets` table. + +use crate::{ + blueprint::sparse::{ + PrimaryKey, + Sparse, + }, + codec::{ + manual::Manual, + primitive::Primitive, + }, + column::Column, + structured_storage::TableWithBlueprint, + tables::{ + merkle::{ + ContractsAssetsMerkleData, + ContractsAssetsMerkleMetadata, + }, + ContractsAssets, + }, + Mappable, +}; +use fuel_core_types::fuel_vm::ContractsAssetKey; + +/// The key convertor used to convert the key from the `ContractsAssets` table +/// to the key of the `ContractsAssetsMerkleMetadata` table. +pub struct KeyConverter; + +impl PrimaryKey for KeyConverter { + type InputKey = ::Key; + type OutputKey = ::Key; + + fn primary_key(key: &Self::InputKey) -> &Self::OutputKey { + key.contract_id() + } +} + +impl TableWithBlueprint for ContractsAssets { + type Blueprint = Sparse< + Manual, + Primitive<8>, + ContractsAssetsMerkleMetadata, + ContractsAssetsMerkleData, + KeyConverter, + >; + + fn column() -> Column { + Column::ContractsAssets + } +} + +#[cfg(test)] +mod test { + use super::*; + + fn generate_key( + primary_key: &::Key, + rng: &mut impl rand::Rng, + ) -> ::Key { + let mut bytes = [0u8; 32]; + rng.fill(bytes.as_mut()); + ::Key::new(primary_key, &bytes.into()) + } + + fn generate_key_for_same_contract( + rng: &mut impl rand::Rng, + ) -> ::Key { + generate_key(&fuel_core_types::fuel_tx::ContractId::zeroed(), rng) + } + + crate::basic_storage_tests!( + ContractsAssets, + ::Key::default(), + ::Value::default(), + ::Value::default(), + generate_key_for_same_contract + ); + + fn generate_value(rng: &mut impl rand::Rng) -> ::Value { + rng.gen() + } + + crate::root_storage_tests!( + ContractsAssets, + ContractsAssetsMerkleMetadata, + ::Key::from([1u8; 32]), + ::Key::from([2u8; 32]), + generate_key, + generate_value + ); +} diff --git a/crates/storage/src/structured_storage/blocks.rs b/crates/storage/src/structured_storage/blocks.rs new file mode 100644 index 0000000000..f31cbef580 --- /dev/null +++ b/crates/storage/src/structured_storage/blocks.rs @@ -0,0 +1,27 @@ +//! The module contains implementations and tests for the `FuelBlocks` table. + +use crate::{ + blueprint::plain::Plain, + codec::{ + postcard::Postcard, + raw::Raw, + }, + column::Column, + structured_storage::TableWithBlueprint, + tables::FuelBlocks, +}; + +impl TableWithBlueprint for FuelBlocks { + type Blueprint = Plain; + + fn column() -> Column { + Column::FuelBlocks + } +} + +#[cfg(test)] +crate::basic_storage_tests!( + FuelBlocks, + ::Key::default(), + ::Value::default() +); diff --git a/crates/storage/src/structured_storage/coins.rs b/crates/storage/src/structured_storage/coins.rs new file mode 100644 index 0000000000..53d45f6ca6 --- /dev/null +++ b/crates/storage/src/structured_storage/coins.rs @@ -0,0 +1,27 @@ +//! The module contains implementations and tests for the `Coins` table. + +use crate::{ + blueprint::plain::Plain, + codec::{ + postcard::Postcard, + primitive::Primitive, + }, + column::Column, + structured_storage::TableWithBlueprint, + tables::Coins, +}; + +impl TableWithBlueprint for Coins { + type Blueprint = Plain, Postcard>; + + fn column() -> Column { + Column::Coins + } +} + +#[cfg(test)] +crate::basic_storage_tests!( + Coins, + ::Key::default(), + ::Value::default() +); diff --git a/crates/storage/src/structured_storage/contracts.rs b/crates/storage/src/structured_storage/contracts.rs new file mode 100644 index 0000000000..5e935a2f07 --- /dev/null +++ b/crates/storage/src/structured_storage/contracts.rs @@ -0,0 +1,95 @@ +//! The module contains implementations and tests for the contracts tables. + +use crate::{ + blueprint::plain::Plain, + codec::{ + postcard::Postcard, + raw::Raw, + }, + column::Column, + kv_store::KeyValueStore, + structured_storage::{ + StructuredStorage, + TableWithBlueprint, + }, + tables::{ + ContractsInfo, + ContractsLatestUtxo, + ContractsRawCode, + }, + StorageRead, +}; +use core::ops::Deref; +use fuel_core_types::fuel_tx::ContractId; + +// # Dev-note: The value of the `ContractsRawCode` has a unique implementation of serialization +// and deserialization and uses `Raw` codec. Because the value is a contract byte code represented +// by bytes, we don't use `serde::Deserialization` and `serde::Serialization` for `Vec`, +// because we don't need to store the size of the contract. We store/load raw bytes. +impl TableWithBlueprint for ContractsRawCode { + type Blueprint = Plain; + + fn column() -> Column { + Column::ContractsRawCode + } +} + +impl StorageRead for StructuredStorage +where + S: KeyValueStore, +{ + fn read( + &self, + key: &ContractId, + buf: &mut [u8], + ) -> Result, Self::Error> { + self.storage + .read(key.as_ref(), Column::ContractsRawCode, buf) + } + + fn read_alloc(&self, key: &ContractId) -> Result>, Self::Error> { + self.storage + .get(key.as_ref(), Column::ContractsRawCode) + .map(|value| value.map(|value| value.deref().clone())) + } +} + +impl TableWithBlueprint for ContractsInfo { + type Blueprint = Plain; + + fn column() -> Column { + Column::ContractsInfo + } +} + +impl TableWithBlueprint for ContractsLatestUtxo { + type Blueprint = Plain; + + fn column() -> Column { + Column::ContractsLatestUtxo + } +} + +#[cfg(test)] +mod test { + use super::*; + + crate::basic_storage_tests!( + ContractsRawCode, + ::Key::from([1u8; 32]), + vec![32u8], + ::OwnedValue::from(vec![32u8]) + ); + + crate::basic_storage_tests!( + ContractsInfo, + ::Key::from([1u8; 32]), + ([2u8; 32].into(), [3u8; 32].into()) + ); + + crate::basic_storage_tests!( + ContractsLatestUtxo, + ::Key::from([1u8; 32]), + ::Value::default() + ); +} diff --git a/crates/storage/src/structured_storage/merkle_data.rs b/crates/storage/src/structured_storage/merkle_data.rs new file mode 100644 index 0000000000..b597be35f8 --- /dev/null +++ b/crates/storage/src/structured_storage/merkle_data.rs @@ -0,0 +1,52 @@ +//! The module contains implementations and tests for merkle related tables. + +use crate::{ + blueprint::plain::Plain, + codec::{ + postcard::Postcard, + primitive::Primitive, + raw::Raw, + }, + column::Column, + structured_storage::TableWithBlueprint, + tables::merkle::{ + ContractsAssetsMerkleData, + ContractsAssetsMerkleMetadata, + ContractsStateMerkleData, + ContractsStateMerkleMetadata, + FuelBlockMerkleData, + FuelBlockMerkleMetadata, + }, +}; + +macro_rules! merkle_table { + ($table:ident) => { + merkle_table!($table, Raw); + }; + ($table:ident, $key_codec:ident) => { + impl TableWithBlueprint for $table { + type Blueprint = Plain<$key_codec, Postcard>; + + fn column() -> Column { + Column::$table + } + } + + #[cfg(test)] + $crate::basic_storage_tests!( + $table, + <$table as $crate::Mappable>::Key::default(), + <$table as $crate::Mappable>::Value::default() + ); + }; +} + +type U64Codec = Primitive<8>; +type BlockHeightCodec = Primitive<4>; + +merkle_table!(FuelBlockMerkleData, U64Codec); +merkle_table!(FuelBlockMerkleMetadata, BlockHeightCodec); +merkle_table!(ContractsAssetsMerkleData); +merkle_table!(ContractsAssetsMerkleMetadata); +merkle_table!(ContractsStateMerkleData); +merkle_table!(ContractsStateMerkleMetadata); diff --git a/crates/storage/src/structured_storage/messages.rs b/crates/storage/src/structured_storage/messages.rs new file mode 100644 index 0000000000..08addab8ea --- /dev/null +++ b/crates/storage/src/structured_storage/messages.rs @@ -0,0 +1,48 @@ +//! The module contains implementations and tests for the messages tables. + +use crate::{ + blueprint::plain::Plain, + codec::{ + postcard::Postcard, + raw::Raw, + }, + column::Column, + structured_storage::TableWithBlueprint, + tables::{ + Messages, + SpentMessages, + }, +}; + +impl TableWithBlueprint for Messages { + type Blueprint = Plain; + + fn column() -> Column { + Column::Messages + } +} + +impl TableWithBlueprint for SpentMessages { + type Blueprint = Plain; + + fn column() -> Column { + Column::SpentMessages + } +} + +#[cfg(test)] +mod test { + use super::*; + + crate::basic_storage_tests!( + Messages, + ::Key::default(), + ::Value::default() + ); + + crate::basic_storage_tests!( + SpentMessages, + ::Key::default(), + ::Value::default() + ); +} diff --git a/crates/storage/src/structured_storage/receipts.rs b/crates/storage/src/structured_storage/receipts.rs new file mode 100644 index 0000000000..5e40cd2e4d --- /dev/null +++ b/crates/storage/src/structured_storage/receipts.rs @@ -0,0 +1,32 @@ +//! The module contains implementations and tests for the `Receipts` table. + +use crate::{ + blueprint::plain::Plain, + codec::{ + postcard::Postcard, + raw::Raw, + }, + column::Column, + structured_storage::TableWithBlueprint, + tables::Receipts, +}; + +impl TableWithBlueprint for Receipts { + type Blueprint = Plain; + + fn column() -> Column { + Column::Receipts + } +} + +#[cfg(test)] +crate::basic_storage_tests!( + Receipts, + ::Key::from([1u8; 32]), + vec![fuel_core_types::fuel_tx::Receipt::ret( + Default::default(), + Default::default(), + Default::default(), + Default::default() + )] +); diff --git a/crates/storage/src/structured_storage/sealed_block.rs b/crates/storage/src/structured_storage/sealed_block.rs new file mode 100644 index 0000000000..c0fb6d8db2 --- /dev/null +++ b/crates/storage/src/structured_storage/sealed_block.rs @@ -0,0 +1,27 @@ +//! The module contains implementations and tests for the `SealedBlockConsensus` table. + +use crate::{ + blueprint::plain::Plain, + codec::{ + postcard::Postcard, + raw::Raw, + }, + column::Column, + structured_storage::TableWithBlueprint, + tables::SealedBlockConsensus, +}; + +impl TableWithBlueprint for SealedBlockConsensus { + type Blueprint = Plain; + + fn column() -> Column { + Column::FuelBlockConsensus + } +} + +#[cfg(test)] +crate::basic_storage_tests!( + SealedBlockConsensus, + ::Key::from([1u8; 32]), + ::Value::default() +); diff --git a/crates/storage/src/structured_storage/state.rs b/crates/storage/src/structured_storage/state.rs new file mode 100644 index 0000000000..c28b8c2a30 --- /dev/null +++ b/crates/storage/src/structured_storage/state.rs @@ -0,0 +1,93 @@ +//! The module contains implementations and tests for the `ContractsState` table. + +use crate::{ + blueprint::sparse::{ + PrimaryKey, + Sparse, + }, + codec::{ + manual::Manual, + raw::Raw, + }, + column::Column, + structured_storage::TableWithBlueprint, + tables::{ + merkle::{ + ContractsStateMerkleData, + ContractsStateMerkleMetadata, + }, + ContractsState, + }, + Mappable, +}; +use fuel_core_types::fuel_vm::ContractsStateKey; + +/// The key convertor used to convert the key from the `ContractsState` table +/// to the key of the `ContractsStateMerkleMetadata` table. +pub struct KeyConverter; + +impl PrimaryKey for KeyConverter { + type InputKey = ::Key; + type OutputKey = ::Key; + + fn primary_key(key: &Self::InputKey) -> &Self::OutputKey { + key.contract_id() + } +} + +impl TableWithBlueprint for ContractsState { + type Blueprint = Sparse< + Manual, + Raw, + ContractsStateMerkleMetadata, + ContractsStateMerkleData, + KeyConverter, + >; + + fn column() -> Column { + Column::ContractsState + } +} + +#[cfg(test)] +mod test { + use super::*; + + fn generate_key( + primary_key: &::Key, + rng: &mut impl rand::Rng, + ) -> ::Key { + let mut bytes = [0u8; 32]; + rng.fill(bytes.as_mut()); + ::Key::new(primary_key, &bytes.into()) + } + + fn generate_key_for_same_contract( + rng: &mut impl rand::Rng, + ) -> ::Key { + generate_key(&fuel_core_types::fuel_tx::ContractId::zeroed(), rng) + } + + crate::basic_storage_tests!( + ContractsState, + ::Key::default(), + ::Value::zeroed(), + ::Value::zeroed(), + generate_key_for_same_contract + ); + + fn generate_value(rng: &mut impl rand::Rng) -> ::Value { + let mut bytes = [0u8; 32]; + rng.fill(bytes.as_mut()); + bytes.into() + } + + crate::root_storage_tests!( + ContractsState, + ContractsStateMerkleMetadata, + ::Key::from([1u8; 32]), + ::Key::from([2u8; 32]), + generate_key, + generate_value + ); +} diff --git a/crates/storage/src/structured_storage/transactions.rs b/crates/storage/src/structured_storage/transactions.rs new file mode 100644 index 0000000000..5605ecdbe1 --- /dev/null +++ b/crates/storage/src/structured_storage/transactions.rs @@ -0,0 +1,45 @@ +//! The module contains implementations and tests for the `Transactions` table. + +use crate::{ + blueprint::plain::Plain, + codec::{ + postcard::Postcard, + raw::Raw, + }, + column::Column, + structured_storage::TableWithBlueprint, + tables::{ + ProcessedTransactions, + Transactions, + }, +}; + +impl TableWithBlueprint for Transactions { + type Blueprint = Plain; + + fn column() -> Column { + Column::Transactions + } +} + +#[cfg(test)] +crate::basic_storage_tests!( + Transactions, + ::Key::from([1u8; 32]), + ::Value::default() +); + +impl TableWithBlueprint for ProcessedTransactions { + type Blueprint = Plain; + + fn column() -> Column { + Column::ProcessedTransactions + } +} + +#[cfg(test)] +crate::basic_storage_tests!( + ProcessedTransactions, + ::Key::from([1u8; 32]), + ::Value::default() +); diff --git a/crates/storage/src/tables.rs b/crates/storage/src/tables.rs index 27f5cb2fb2..1ec13b0f03 100644 --- a/crates/storage/src/tables.rs +++ b/crates/storage/src/tables.rs @@ -40,6 +40,7 @@ impl Mappable for FuelBlocks { /// Unique identifier of the fuel block. type Key = Self::OwnedKey; // TODO: Seems it would be faster to use `BlockHeight` as primary key. + // https://github.com/FuelLabs/fuel-core/issues/1580. type OwnedKey = BlockId; type Value = Self::OwnedValue; type OwnedValue = CompressedBlock; @@ -58,6 +59,7 @@ impl Mappable for ContractsLatestUtxo { type OwnedValue = ContractUtxoInfo; } +// TODO: Move definition to the service that is responsible for its usage. /// Receipts of different hidden internal operations. pub struct Receipts; @@ -79,8 +81,7 @@ impl Mappable for SealedBlockConsensus { type OwnedValue = Consensus; } -/// The storage table of coins. Each -/// [`CompressedCoin`](fuel_core_types::entities::coins::coin::CompressedCoin) +/// The storage table of coins. Each [`CompressedCoin`] /// is represented by unique `UtxoId`. pub struct Coins; @@ -91,7 +92,7 @@ impl Mappable for Coins { type OwnedValue = CompressedCoin; } -/// The storage table of bridged Ethereum [`Message`](crate::model::Message)s. +/// The storage table of bridged Ethereum message. pub struct Messages; impl Mappable for Messages { @@ -101,7 +102,7 @@ impl Mappable for Messages { type OwnedValue = Message; } -/// The storage table that indicates if the [`Message`](crate::model::Message) is spent or not. +/// The storage table that indicates if the message is spent or not. pub struct SpentMessages; impl Mappable for SpentMessages { @@ -132,5 +133,115 @@ impl Mappable for ProcessedTransactions { type OwnedValue = (); } -// TODO: Add macro to define all common tables to avoid copy/paste of the code. -// TODO: Add macro to define common unit tests. +/// The module contains definition of merkle-related tables. +pub mod merkle { + use crate::{ + Mappable, + MerkleRoot, + }; + use fuel_core_types::{ + fuel_merkle::{ + binary, + sparse, + }, + fuel_tx::ContractId, + fuel_types::BlockHeight, + }; + + /// Metadata for dense Merkle trees + #[derive(Debug, Clone, serde::Serialize, serde::Deserialize, PartialEq, Eq)] + pub struct DenseMerkleMetadata { + /// The root hash of the dense Merkle tree structure + pub root: MerkleRoot, + /// The version of the dense Merkle tree structure is equal to the number of + /// leaves. Every time we append a new leaf to the Merkle tree data set, we + /// increment the version number. + pub version: u64, + } + + impl Default for DenseMerkleMetadata { + fn default() -> Self { + let empty_merkle_tree = binary::root_calculator::MerkleRootCalculator::new(); + Self { + root: empty_merkle_tree.root(), + version: 0, + } + } + } + + /// Metadata for sparse Merkle trees + #[derive(Debug, Clone, serde::Serialize, serde::Deserialize, PartialEq, Eq)] + pub struct SparseMerkleMetadata { + /// The root hash of the sparse Merkle tree structure + pub root: MerkleRoot, + } + + impl Default for SparseMerkleMetadata { + fn default() -> Self { + let empty_merkle_tree = sparse::in_memory::MerkleTree::new(); + Self { + root: empty_merkle_tree.root(), + } + } + } + + /// The table of BMT data for Fuel blocks. + pub struct FuelBlockMerkleData; + + impl Mappable for FuelBlockMerkleData { + type Key = u64; + type OwnedKey = Self::Key; + type Value = binary::Primitive; + type OwnedValue = Self::Value; + } + + /// The metadata table for [`FuelBlockMerkleData`] table. + pub struct FuelBlockMerkleMetadata; + + impl Mappable for FuelBlockMerkleMetadata { + type Key = BlockHeight; + type OwnedKey = Self::Key; + type Value = DenseMerkleMetadata; + type OwnedValue = Self::Value; + } + + /// The table of SMT data for Contract assets. + pub struct ContractsAssetsMerkleData; + + impl Mappable for ContractsAssetsMerkleData { + type Key = [u8; 32]; + type OwnedKey = Self::Key; + type Value = sparse::Primitive; + type OwnedValue = Self::Value; + } + + /// The metadata table for [`ContractsAssetsMerkleData`] table + pub struct ContractsAssetsMerkleMetadata; + + impl Mappable for ContractsAssetsMerkleMetadata { + type Key = ContractId; + type OwnedKey = Self::Key; + type Value = SparseMerkleMetadata; + type OwnedValue = Self::Value; + } + + /// The table of SMT data for Contract state. + pub struct ContractsStateMerkleData; + + impl Mappable for ContractsStateMerkleData { + type Key = [u8; 32]; + type OwnedKey = Self::Key; + type Value = sparse::Primitive; + type OwnedValue = Self::Value; + } + + /// The metadata table for [`ContractsStateMerkleData`] table + pub struct ContractsStateMerkleMetadata; + + impl Mappable for ContractsStateMerkleMetadata { + type Key = ContractId; + type OwnedKey = Self::Key; + type Value = SparseMerkleMetadata; + type OwnedValue = Self::Value; + } +} diff --git a/crates/storage/src/transactional.rs b/crates/storage/src/transactional.rs index d44041113b..854557bd11 100644 --- a/crates/storage/src/transactional.rs +++ b/crates/storage/src/transactional.rs @@ -1,6 +1,7 @@ //! The primitives to work with storage in transactional mode. use crate::Result as StorageResult; +use fuel_core_types::fuel_types::BlockHeight; #[cfg_attr(feature = "test-helpers", mockall::automock(type Storage = crate::test_helpers::EmptyStorage;))] /// The types is transactional and may create `StorageTransaction`. @@ -75,3 +76,13 @@ impl StorageTransaction { self.transaction.commit() } } + +/// Provides a view of the storage at the given height. +/// It guarantees to be atomic, meaning the view is immutable to outside modifications. +pub trait AtomicView: Send + Sync { + /// Returns the view of the storage at the given `height`. + fn view_at(&self, height: BlockHeight) -> StorageResult; + + /// Returns the view of the storage for the latest block height. + fn latest_view(&self) -> View; +} diff --git a/crates/types/Cargo.toml b/crates/types/Cargo.toml index 0ad9985942..586408ab50 100644 --- a/crates/types/Cargo.toml +++ b/crates/types/Cargo.toml @@ -19,8 +19,10 @@ version = { workspace = true } [dependencies] anyhow = { workspace = true } bs58 = "0.5" +derivative = { version = "2" } derive_more = { version = "0.99" } fuel-vm-private = { workspace = true, default-features = false, features = ["alloc"] } +rand = { workspace = true, optional = true } secrecy = "0.8" serde = { workspace = true, features = ["derive"], optional = true } tai64 = { version = "4.0", features = ["serde"] } @@ -31,5 +33,5 @@ zeroize = "1.5" default = ["std"] serde = ["dep:serde", "fuel-vm-private/serde"] std = ["fuel-vm-private/std"] -random = ["fuel-vm-private/random"] +random = ["dep:rand", "fuel-vm-private/random"] test-helpers = ["random", "fuel-vm-private/test-helpers"] diff --git a/crates/types/src/blockchain/consensus/poa.rs b/crates/types/src/blockchain/consensus/poa.rs index 92192f43ad..44800fd536 100644 --- a/crates/types/src/blockchain/consensus/poa.rs +++ b/crates/types/src/blockchain/consensus/poa.rs @@ -7,7 +7,7 @@ use crate::fuel_crypto::Signature; /// The consensus related data that doesn't live on the /// header. pub struct PoAConsensus { - /// The signature of the [`FuelBlockHeader`]. + /// The signature of the `FuelBlockHeader`. pub signature: Signature, } diff --git a/crates/types/src/blockchain/header.rs b/crates/types/src/blockchain/header.rs index 68497d3f30..284fcebcb4 100644 --- a/crates/types/src/blockchain/header.rs +++ b/crates/types/src/blockchain/header.rs @@ -22,7 +22,8 @@ use tai64::Tai64; /// A fuel block header that has all the fields generated because it /// has been executed. -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, derivative::Derivative)] +#[derivative(PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct BlockHeader { /// The application header. @@ -32,6 +33,7 @@ pub struct BlockHeader { /// The header metadata calculated during creation. /// The field is private to enforce the use of the [`PartialBlockHeader::generate`] method. #[cfg_attr(feature = "serde", serde(skip))] + #[derivative(PartialEq = "ignore")] metadata: Option, } @@ -57,7 +59,7 @@ pub struct ApplicationHeader { /// to have some rules in place to ensure the block number was chosen in a reasonable way. For /// example, they should verify that the block number satisfies the finality requirements of the /// layer 1 chain. They should also verify that the block number isn't too stale and is increasing. - /// Some similar concerns are noted in this issue: https://github.com/FuelLabs/fuel-specs/issues/220 + /// Some similar concerns are noted in this issue: pub da_height: DaBlockHeight, /// Generated application fields. pub generated: Generated, diff --git a/crates/types/src/blockchain/primitives.rs b/crates/types/src/blockchain/primitives.rs index 468df2e240..a559407e09 100644 --- a/crates/types/src/blockchain/primitives.rs +++ b/crates/types/src/blockchain/primitives.rs @@ -5,6 +5,7 @@ use crate::{ fuel_crypto::SecretKey, fuel_types::Bytes32, }; +use core::array::TryFromSliceError; use derive_more::{ Add, AsRef, @@ -76,6 +77,13 @@ impl AsRef<[u8]> for BlockId { } } +#[cfg(feature = "random")] +impl rand::distributions::Distribution for rand::distributions::Standard { + fn sample(&self, rng: &mut R) -> BlockId { + BlockId(rng.gen()) + } +} + /// Block height of the data availability layer #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[derive( @@ -111,9 +119,20 @@ impl From for DaBlockHeight { } } +impl From<[u8; 8]> for DaBlockHeight { + fn from(n: [u8; 8]) -> Self { + DaBlockHeight(u64::from_be_bytes(n)) + } +} + impl DaBlockHeight { /// Convert to array of big endian bytes - pub fn to_bytes(self) -> [u8; 8] { + pub fn to_bytes(&self) -> [u8; 8] { + self.to_be_bytes() + } + + /// Convert to array of big endian bytes + pub fn to_be_bytes(&self) -> [u8; 8] { self.0.to_be_bytes() } @@ -144,3 +163,11 @@ impl From<[u8; 32]> for BlockId { Self(bytes.into()) } } + +impl TryFrom<&'_ [u8]> for BlockId { + type Error = TryFromSliceError; + + fn try_from(bytes: &[u8]) -> Result { + Ok(Self::from(TryInto::<[u8; 32]>::try_into(bytes)?)) + } +} diff --git a/crates/types/src/entities/coins/coin.rs b/crates/types/src/entities/coins/coin.rs index b28f6f6504..c22d8cd8e4 100644 --- a/crates/types/src/entities/coins/coin.rs +++ b/crates/types/src/entities/coins/coin.rs @@ -53,7 +53,7 @@ impl Coin { /// The compressed version of the `Coin` with minimum fields required for /// the proper work of the blockchain. #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[derive(Debug, Clone)] +#[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct CompressedCoin { /// The address with permission to spend this coin pub owner: Address, diff --git a/crates/types/src/lib.rs b/crates/types/src/lib.rs index 5f7c4743dd..2924c1390c 100644 --- a/crates/types/src/lib.rs +++ b/crates/types/src/lib.rs @@ -35,6 +35,7 @@ pub mod fuel_vm { checked_transaction, consts, crypto, + double_key, error::PredicateVerificationFailed, interpreter, prelude::{ @@ -54,6 +55,8 @@ pub mod fuel_vm { }, script_with_data_offset, state, + storage::ContractsAssetKey, + storage::ContractsStateKey, util, }; } diff --git a/crates/types/src/services/block_importer.rs b/crates/types/src/services/block_importer.rs index 494abb8b57..276a305b96 100644 --- a/crates/types/src/services/block_importer.rs +++ b/crates/types/src/services/block_importer.rs @@ -10,11 +10,16 @@ use crate::{ Uncommitted, }, }; +use core::ops::Deref; +use std::sync::Arc; /// The uncommitted result of the block importing. pub type UncommittedResult = Uncommitted; +/// The alias for the `ImportResult` that can be shared between threads. +pub type SharedImportResult = Arc + Send + Sync>; + /// The result of the block import. #[derive(Debug)] #[cfg_attr(any(test, feature = "test-helpers"), derive(Default))] @@ -27,6 +32,14 @@ pub struct ImportResult { pub source: Source, } +impl Deref for ImportResult { + type Target = Self; + + fn deref(&self) -> &Self::Target { + self + } +} + /// The source producer of the block. #[derive(Debug, Clone, Copy, PartialEq, Default)] pub enum Source { @@ -87,8 +100,8 @@ impl BlockImportInfo { } } -impl From<&ImportResult> for BlockImportInfo { - fn from(result: &ImportResult) -> Self { +impl From for BlockImportInfo { + fn from(result: SharedImportResult) -> Self { Self { block_header: result.sealed_block.entity.header().clone(), source: result.source, diff --git a/crates/types/src/services/executor.rs b/crates/types/src/services/executor.rs index 8f48c815e7..f240b31bba 100644 --- a/crates/types/src/services/executor.rs +++ b/crates/types/src/services/executor.rs @@ -9,6 +9,7 @@ use crate::{ primitives::BlockId, }, fuel_tx::{ + Receipt, TxId, UtxoId, ValidityError, @@ -53,6 +54,8 @@ pub struct TransactionExecutionStatus { pub id: Bytes32, /// The result of the executed transaction. pub result: TransactionExecutionResult, + /// The receipts generated by the executed transaction. + pub receipts: Vec, } /// The result of transaction execution. @@ -85,11 +88,11 @@ pub enum ExecutionTypes { } /// Starting point for executing a block. Production starts with a [`PartialFuelBlock`]. -/// Validation starts with a full [`FuelBlock`]. +/// Validation starts with a full `FuelBlock`. pub type ExecutionBlock = ExecutionTypes; impl

ExecutionTypes { - /// Get the hash of the full [`FuelBlock`] if validating. + /// Get the hash of the full `FuelBlock` if validating. pub fn id(&self) -> Option { match self { ExecutionTypes::DryRun(_) => None, diff --git a/crates/types/src/services/p2p.rs b/crates/types/src/services/p2p.rs index 6907ba8e0e..8cd98385c0 100644 --- a/crates/types/src/services/p2p.rs +++ b/crates/types/src/services/p2p.rs @@ -160,7 +160,7 @@ impl FromStr for PeerId { impl PeerId { /// Bind the PeerId and given data of type T together to generate a - /// SourcePeer + /// `SourcePeer` pub fn bind(self, data: T) -> SourcePeer { SourcePeer { peer_id: self, diff --git a/crates/types/src/services/txpool.rs b/crates/types/src/services/txpool.rs index c323761ec8..4cc483e6c7 100644 --- a/crates/types/src/services/txpool.rs +++ b/crates/types/src/services/txpool.rs @@ -1,7 +1,10 @@ //! Types for interoperability with the txpool service use crate::{ - blockchain::primitives::BlockId, + blockchain::{ + block::Block, + primitives::BlockId, + }, fuel_asm::Word, fuel_tx::{ field::{ @@ -27,6 +30,7 @@ use crate::{ checked_transaction::Checked, ProgramState, }, + services::executor::TransactionExecutionResult, }; use fuel_vm_private::checked_transaction::CheckedTransaction; use std::{ @@ -199,6 +203,30 @@ pub enum TransactionStatus { }, } +/// Converts the transaction execution result to the transaction status. +pub fn from_executor_to_status( + block: &Block, + result: TransactionExecutionResult, +) -> TransactionStatus { + let time = block.header().time(); + let block_id = block.id(); + match result { + TransactionExecutionResult::Success { result } => TransactionStatus::Success { + block_id, + time, + result, + }, + TransactionExecutionResult::Failed { result, reason } => { + TransactionStatus::Failed { + block_id, + time, + result, + reason: reason.clone(), + } + } + } +} + #[allow(missing_docs)] #[derive(thiserror::Error, Debug, PartialEq, Eq, Clone)] #[non_exhaustive]