diff --git a/.gitignore b/.gitignore index 75b10b8482..4f1f1adc1a 100644 --- a/.gitignore +++ b/.gitignore @@ -21,5 +21,6 @@ justfile spawn-and-move-db types-test-db examples/spawn-and-move/manifests/saya/** +**/*.log artifacts/ diff --git a/Cargo.lock b/Cargo.lock index 7d34b34a0c..6bcacf1931 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -166,7 +166,7 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy" version = "0.3.6" -source = "git+https://github.com/alloy-rs/alloy#57dd4c538293421c3d1a793cba79ad6f46d6444f" +source = "git+https://github.com/alloy-rs/alloy#04b1e0984b8661ef910b0ae88e1ef218db66b636" dependencies = [ "alloy-consensus 0.3.6 (git+https://github.com/alloy-rs/alloy)", "alloy-contract 0.3.6 (git+https://github.com/alloy-rs/alloy)", @@ -211,13 +211,14 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.3.6" -source = "git+https://github.com/alloy-rs/alloy#57dd4c538293421c3d1a793cba79ad6f46d6444f" +source = "git+https://github.com/alloy-rs/alloy#04b1e0984b8661ef910b0ae88e1ef218db66b636" dependencies = [ "alloy-eips 0.3.6 (git+https://github.com/alloy-rs/alloy)", "alloy-primitives", "alloy-rlp", "alloy-serde 0.3.6 (git+https://github.com/alloy-rs/alloy)", "c-kzg", + "derive_more 1.0.0", "serde", ] @@ -244,7 +245,7 @@ dependencies = [ [[package]] name = "alloy-contract" version = "0.3.6" -source = "git+https://github.com/alloy-rs/alloy#57dd4c538293421c3d1a793cba79ad6f46d6444f" +source = "git+https://github.com/alloy-rs/alloy#04b1e0984b8661ef910b0ae88e1ef218db66b636" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -332,7 +333,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.3.6" -source = "git+https://github.com/alloy-rs/alloy#57dd4c538293421c3d1a793cba79ad6f46d6444f" +source = "git+https://github.com/alloy-rs/alloy#04b1e0984b8661ef910b0ae88e1ef218db66b636" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -349,7 +350,7 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.3.6" -source = "git+https://github.com/alloy-rs/alloy#57dd4c538293421c3d1a793cba79ad6f46d6444f" +source = "git+https://github.com/alloy-rs/alloy#04b1e0984b8661ef910b0ae88e1ef218db66b636" dependencies = [ "alloy-primitives", "alloy-serde 0.3.6 (git+https://github.com/alloy-rs/alloy)", @@ -385,7 +386,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.3.6" -source = "git+https://github.com/alloy-rs/alloy#57dd4c538293421c3d1a793cba79ad6f46d6444f" +source = "git+https://github.com/alloy-rs/alloy#04b1e0984b8661ef910b0ae88e1ef218db66b636" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -419,7 +420,7 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.3.6" -source = "git+https://github.com/alloy-rs/alloy#57dd4c538293421c3d1a793cba79ad6f46d6444f" +source = "git+https://github.com/alloy-rs/alloy#04b1e0984b8661ef910b0ae88e1ef218db66b636" dependencies = [ "alloy-consensus 0.3.6 (git+https://github.com/alloy-rs/alloy)", "alloy-eips 0.3.6 (git+https://github.com/alloy-rs/alloy)", @@ -451,7 +452,7 @@ dependencies = [ [[package]] name = "alloy-network-primitives" version = "0.3.6" -source = "git+https://github.com/alloy-rs/alloy#57dd4c538293421c3d1a793cba79ad6f46d6444f" +source = "git+https://github.com/alloy-rs/alloy#04b1e0984b8661ef910b0ae88e1ef218db66b636" dependencies = [ "alloy-eips 0.3.6 (git+https://github.com/alloy-rs/alloy)", "alloy-primitives", @@ -462,7 +463,7 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.3.6" -source = "git+https://github.com/alloy-rs/alloy#57dd4c538293421c3d1a793cba79ad6f46d6444f" +source = "git+https://github.com/alloy-rs/alloy#04b1e0984b8661ef910b0ae88e1ef218db66b636" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -534,7 +535,7 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.3.6" -source = "git+https://github.com/alloy-rs/alloy#57dd4c538293421c3d1a793cba79ad6f46d6444f" +source = "git+https://github.com/alloy-rs/alloy#04b1e0984b8661ef910b0ae88e1ef218db66b636" dependencies = [ "alloy-chains", "alloy-consensus 0.3.6 (git+https://github.com/alloy-rs/alloy)", @@ -613,7 +614,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.3.6" -source = "git+https://github.com/alloy-rs/alloy#57dd4c538293421c3d1a793cba79ad6f46d6444f" +source = "git+https://github.com/alloy-rs/alloy#04b1e0984b8661ef910b0ae88e1ef218db66b636" dependencies = [ "alloy-json-rpc 0.3.6 (git+https://github.com/alloy-rs/alloy)", "alloy-transport 0.3.6 (git+https://github.com/alloy-rs/alloy)", @@ -633,7 +634,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.3.6" -source = "git+https://github.com/alloy-rs/alloy#57dd4c538293421c3d1a793cba79ad6f46d6444f" +source = "git+https://github.com/alloy-rs/alloy#04b1e0984b8661ef910b0ae88e1ef218db66b636" dependencies = [ "alloy-primitives", "alloy-serde 0.3.6 (git+https://github.com/alloy-rs/alloy)", @@ -664,7 +665,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" version = "0.3.6" -source = "git+https://github.com/alloy-rs/alloy#57dd4c538293421c3d1a793cba79ad6f46d6444f" +source = "git+https://github.com/alloy-rs/alloy#04b1e0984b8661ef910b0ae88e1ef218db66b636" dependencies = [ "alloy-consensus 0.3.6 (git+https://github.com/alloy-rs/alloy)", "alloy-eips 0.3.6 (git+https://github.com/alloy-rs/alloy)", @@ -695,7 +696,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.3.6" -source = "git+https://github.com/alloy-rs/alloy#57dd4c538293421c3d1a793cba79ad6f46d6444f" +source = "git+https://github.com/alloy-rs/alloy#04b1e0984b8661ef910b0ae88e1ef218db66b636" dependencies = [ "alloy-primitives", "serde", @@ -719,7 +720,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.3.6" -source = "git+https://github.com/alloy-rs/alloy#57dd4c538293421c3d1a793cba79ad6f46d6444f" +source = "git+https://github.com/alloy-rs/alloy#04b1e0984b8661ef910b0ae88e1ef218db66b636" dependencies = [ "alloy-primitives", "async-trait", @@ -732,7 +733,7 @@ dependencies = [ [[package]] name = "alloy-signer-local" version = "0.3.6" -source = "git+https://github.com/alloy-rs/alloy#57dd4c538293421c3d1a793cba79ad6f46d6444f" +source = "git+https://github.com/alloy-rs/alloy#04b1e0984b8661ef910b0ae88e1ef218db66b636" dependencies = [ "alloy-consensus 0.3.6 (git+https://github.com/alloy-rs/alloy)", "alloy-network 0.3.6 (git+https://github.com/alloy-rs/alloy)", @@ -839,7 +840,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.3.6" -source = "git+https://github.com/alloy-rs/alloy#57dd4c538293421c3d1a793cba79ad6f46d6444f" +source = "git+https://github.com/alloy-rs/alloy#04b1e0984b8661ef910b0ae88e1ef218db66b636" dependencies = [ "alloy-json-rpc 0.3.6 (git+https://github.com/alloy-rs/alloy)", "base64 0.22.1", @@ -872,7 +873,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.3.6" -source = "git+https://github.com/alloy-rs/alloy#57dd4c538293421c3d1a793cba79ad6f46d6444f" +source = "git+https://github.com/alloy-rs/alloy#04b1e0984b8661ef910b0ae88e1ef218db66b636" dependencies = [ "alloy-json-rpc 0.3.6 (git+https://github.com/alloy-rs/alloy)", "alloy-transport 0.3.6 (git+https://github.com/alloy-rs/alloy)", @@ -12795,9 +12796,9 @@ dependencies = [ [[package]] name = "simdutf8" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" +checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" [[package]] name = "similar" @@ -14677,6 +14678,7 @@ dependencies = [ "thiserror", "tokio", "tokio-util", + "toml 0.8.19", "tracing", ] @@ -15401,6 +15403,19 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" +[[package]] +name = "verify_db_balances" +version = "1.0.0-alpha.14" +dependencies = [ + "clap", + "num-traits 0.2.19", + "sqlx", + "starknet 0.12.0", + "tokio", + "tracing", + "tracing-subscriber", +] + [[package]] name = "version_check" version = "0.9.5" diff --git a/Cargo.toml b/Cargo.toml index b9b0bb7075..cf9d023051 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,6 +45,7 @@ members = [ "crates/torii/server", "crates/torii/types-test", "examples/spawn-and-move", + "scripts/verify_db_balances", "xtask/generate-test-db", ] diff --git a/bin/torii/src/main.rs b/bin/torii/src/main.rs index 3e7931cf8f..3d6d748025 100644 --- a/bin/torii/src/main.rs +++ b/bin/torii/src/main.rs @@ -11,11 +11,14 @@ //! for more info. use std::cmp; +use std::collections::VecDeque; use std::net::SocketAddr; +use std::path::PathBuf; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; +use anyhow::Context; use clap::{ArgAction, Parser}; use dojo_metrics::{metrics_process, prometheus_exporter}; use dojo_utils::parse::{parse_socket_address, parse_url}; @@ -33,18 +36,10 @@ use tokio::sync::broadcast::Sender; use tokio_stream::StreamExt; use torii_core::engine::{Engine, EngineConfig, IndexingFlags, Processors}; use torii_core::executor::Executor; -use torii_core::processors::event_message::EventMessageProcessor; -use torii_core::processors::generate_event_processors_map; -use torii_core::processors::metadata_update::MetadataUpdateProcessor; -use torii_core::processors::register_model::RegisterModelProcessor; -use torii_core::processors::store_del_record::StoreDelRecordProcessor; -use torii_core::processors::store_set_record::StoreSetRecordProcessor; use torii_core::processors::store_transaction::StoreTransactionProcessor; -use torii_core::processors::store_update_member::StoreUpdateMemberProcessor; -use torii_core::processors::store_update_record::StoreUpdateRecordProcessor; use torii_core::simple_broker::SimpleBroker; use torii_core::sql::Sql; -use torii_core::types::Model; +use torii_core::types::{Contract, ContractType, Model, ToriiConfig}; use torii_server::proxy::Proxy; use tracing::{error, info}; use tracing_subscriber::{fmt, EnvFilter}; @@ -58,7 +53,7 @@ pub(crate) const LOG_TARGET: &str = "torii::cli"; struct Args { /// The world to index #[arg(short, long = "world", env = "DOJO_WORLD_ADDRESS")] - world_address: Felt, + world_address: Option, /// The sequencer rpc endpoint to index. #[arg(long, value_name = "URL", default_value = ":5050", value_parser = parse_url)] @@ -69,10 +64,6 @@ struct Args { #[arg(short, long, default_value = "")] database: String, - /// Specify a block to start indexing from, ignored if stored head exists - #[arg(short, long, default_value = "0")] - start_block: u64, - /// Address to serve api endpoints at. #[arg(long, value_name = "SOCKET", default_value = "0.0.0.0:8080", value_parser = parse_socket_address)] addr: SocketAddr, @@ -142,11 +133,35 @@ struct Args { /// Whether or not to index raw events #[arg(long, action = ArgAction::Set, default_value_t = true)] index_raw_events: bool, + + /// ERC contract addresses to index + #[arg(long, value_parser = parse_erc_contracts)] + #[arg(conflicts_with = "config")] + contracts: Option>, + + /// Configuration file + #[arg(long)] + config: Option, } #[tokio::main] async fn main() -> anyhow::Result<()> { let args = Args::parse(); + + let mut config = if let Some(path) = args.config { + ToriiConfig::load_from_path(&path)? + } else { + let mut config = ToriiConfig::default(); + + if let Some(contracts) = args.contracts { + config.contracts = VecDeque::from(contracts); + } + + config + }; + + let world_address = verify_single_world_address(args.world_address, &mut config)?; + let filter_layer = EnvFilter::try_from_default_env() .unwrap_or_else(|_| EnvFilter::new("info,hyper_reverse_proxy=off")); @@ -189,25 +204,19 @@ async fn main() -> anyhow::Result<()> { let provider: Arc<_> = JsonRpcClient::new(HttpTransport::new(args.rpc)).into(); // Get world address - let world = WorldContractReader::new(args.world_address, provider.clone()); + let world = WorldContractReader::new(world_address, provider.clone()); + + let contracts = + config.contracts.iter().map(|contract| (contract.address, contract.r#type)).collect(); let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await?; tokio::spawn(async move { executor.run().await.unwrap(); }); - let db = Sql::new(pool.clone(), args.world_address, sender.clone()).await?; + let db = Sql::new(pool.clone(), sender.clone(), &contracts).await?; let processors = Processors { - event: generate_event_processors_map(vec![ - Arc::new(RegisterModelProcessor), - Arc::new(StoreSetRecordProcessor), - Arc::new(MetadataUpdateProcessor), - Arc::new(StoreDelRecordProcessor), - Arc::new(EventMessageProcessor), - Arc::new(StoreUpdateRecordProcessor), - Arc::new(StoreUpdateMemberProcessor), - ])?, transaction: vec![Box::new(StoreTransactionProcessor)], ..Processors::default() }; @@ -229,7 +238,7 @@ async fn main() -> anyhow::Result<()> { processors, EngineConfig { max_concurrent_tasks: args.max_concurrent_tasks, - start_block: args.start_block, + start_block: 0, events_chunk_size: args.events_chunk_size, index_pending: args.index_pending, polling_interval: Duration::from_millis(args.polling_interval), @@ -237,17 +246,13 @@ async fn main() -> anyhow::Result<()> { }, shutdown_tx.clone(), Some(block_tx), + Arc::new(contracts), ); let shutdown_rx = shutdown_tx.subscribe(); - let (grpc_addr, grpc_server) = torii_grpc::server::new( - shutdown_rx, - &pool, - block_rx, - args.world_address, - Arc::clone(&provider), - ) - .await?; + let (grpc_addr, grpc_server) = + torii_grpc::server::new(shutdown_rx, &pool, block_rx, world_address, Arc::clone(&provider)) + .await?; let mut libp2p_relay_server = torii_relay::server::Relay::new( db, @@ -310,6 +315,26 @@ async fn main() -> anyhow::Result<()> { Ok(()) } +// Verifies that the world address is defined at most once +// and returns the world address +fn verify_single_world_address( + world_address: Option, + config: &mut ToriiConfig, +) -> anyhow::Result { + let world_from_config = + config.contracts.iter().find(|c| c.r#type == ContractType::WORLD).map(|c| c.address); + + match (world_address, world_from_config) { + (Some(_), Some(_)) => Err(anyhow::anyhow!("World address specified multiple times")), + (Some(addr), _) => { + config.contracts.push_front(Contract { address: addr, r#type: ContractType::WORLD }); + Ok(addr) + } + (_, Some(addr)) => Ok(addr), + (None, None) => Err(anyhow::anyhow!("World address not specified")), + } +} + async fn spawn_rebuilding_graphql_server( shutdown_tx: Sender<()>, pool: Arc, @@ -333,3 +358,29 @@ async fn spawn_rebuilding_graphql_server( } } } + +// Parses clap cli argument which is expected to be in the format: +// - erc_type:address:start_block +// - address:start_block (erc_type defaults to ERC20) +fn parse_erc_contracts(s: &str) -> anyhow::Result> { + let parts: Vec<&str> = s.split(',').collect(); + let mut contracts = Vec::new(); + for part in parts { + match part.split(':').collect::>().as_slice() { + [r#type, address] => { + let r#type = r#type.parse::()?; + let address = Felt::from_str(address) + .with_context(|| format!("Expected address, found {}", address))?; + contracts.push(Contract { address, r#type }); + } + [address] => { + let r#type = ContractType::WORLD; + let address = Felt::from_str(address) + .with_context(|| format!("Expected address, found {}", address))?; + contracts.push(Contract { address, r#type }); + } + _ => return Err(anyhow::anyhow!("Invalid contract format")), + } + } + Ok(contracts) +} diff --git a/bin/torii/torii.toml b/bin/torii/torii.toml new file mode 100644 index 0000000000..93a444170f --- /dev/null +++ b/bin/torii/torii.toml @@ -0,0 +1,6 @@ +# Example configuration file for Torii +# contracts = [ +# { type = "WORLD", address = "" }, +# { type = "ERC20", address = "" }, +# { type = "ERC721", address = "" }, +# ] \ No newline at end of file diff --git a/crates/torii/core/Cargo.toml b/crates/torii/core/Cargo.toml index 51e2037ce1..30040d528b 100644 --- a/crates/torii/core/Cargo.toml +++ b/crates/torii/core/Cargo.toml @@ -28,11 +28,13 @@ serde.workspace = true serde_json.workspace = true slab = "0.4.2" sqlx.workspace = true -starknet.workspace = true starknet-crypto.workspace = true +starknet.workspace = true thiserror.workspace = true tokio = { version = "1.32.0", features = [ "sync" ], default-features = true } +# tokio-stream = "0.1.11" tokio-util.workspace = true +toml.workspace = true tracing.workspace = true [dev-dependencies] diff --git a/crates/torii/core/src/engine.rs b/crates/torii/core/src/engine.rs index d35451564e..6fe09063db 100644 --- a/crates/torii/core/src/engine.rs +++ b/crates/torii/core/src/engine.rs @@ -1,4 +1,4 @@ -use std::collections::{BTreeMap, HashMap}; +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; use std::fmt::Debug; use std::hash::{DefaultHasher, Hash, Hasher}; use std::sync::Arc; @@ -7,43 +7,115 @@ use std::time::Duration; use anyhow::Result; use bitflags::bitflags; use dojo_world::contracts::world::WorldContractReader; -use futures_util::future::try_join_all; +use futures_util::future::{join_all, try_join_all}; use hashlink::LinkedHashMap; use starknet::core::types::{ - BlockId, BlockTag, EmittedEvent, Event, EventFilter, Felt, MaybePendingBlockWithReceipts, - MaybePendingBlockWithTxHashes, PendingBlockWithReceipts, ReceiptBlock, Transaction, - TransactionReceipt, TransactionReceiptWithBlockInfo, TransactionWithReceipt, + BlockId, BlockTag, EmittedEvent, Event, EventFilter, EventsPage, MaybePendingBlockWithReceipts, + MaybePendingBlockWithTxHashes, PendingBlockWithReceipts, Transaction, TransactionReceipt, + TransactionWithReceipt, }; +use starknet::core::utils::get_selector_from_name; use starknet::providers::Provider; +use starknet_crypto::Felt; use tokio::sync::broadcast::Sender; use tokio::sync::mpsc::Sender as BoundedSender; use tokio::sync::Semaphore; +use tokio::task::JoinSet; use tokio::time::{sleep, Instant}; use tracing::{debug, error, info, trace, warn}; +use crate::processors::erc20_legacy_transfer::Erc20LegacyTransferProcessor; +use crate::processors::erc20_transfer::Erc20TransferProcessor; +use crate::processors::erc721_legacy_transfer::Erc721LegacyTransferProcessor; +use crate::processors::erc721_transfer::Erc721TransferProcessor; use crate::processors::event_message::EventMessageProcessor; +use crate::processors::metadata_update::MetadataUpdateProcessor; +use crate::processors::register_model::RegisterModelProcessor; +use crate::processors::store_del_record::StoreDelRecordProcessor; +use crate::processors::store_set_record::StoreSetRecordProcessor; +use crate::processors::store_update_member::StoreUpdateMemberProcessor; +use crate::processors::store_update_record::StoreUpdateRecordProcessor; use crate::processors::{BlockProcessor, EventProcessor, TransactionProcessor}; -use crate::sql::Sql; +use crate::sql::{Cursors, Sql}; +use crate::types::ContractType; + +type EventProcessorMap

= HashMap>>>; #[allow(missing_debug_implementations)] pub struct Processors { pub block: Vec>>, pub transaction: Vec>>, - pub event: HashMap>>, pub catch_all_event: Box>, + pub event_processors: HashMap>, } impl Default for Processors

{ fn default() -> Self { Self { block: vec![], - event: HashMap::new(), transaction: vec![], catch_all_event: Box::new(EventMessageProcessor) as Box>, + event_processors: Self::initialize_event_processors(), } } } +impl Processors

{ + pub fn initialize_event_processors() -> HashMap> { + let mut event_processors_map = HashMap::>::new(); + + let event_processors = vec![ + ( + ContractType::WORLD, + vec![ + Box::new(RegisterModelProcessor) as Box>, + Box::new(StoreSetRecordProcessor), + Box::new(MetadataUpdateProcessor), + Box::new(StoreDelRecordProcessor), + Box::new(StoreUpdateRecordProcessor), + Box::new(StoreUpdateMemberProcessor), + ], + ), + ( + ContractType::ERC20, + vec![ + Box::new(Erc20TransferProcessor) as Box>, + Box::new(Erc20LegacyTransferProcessor) as Box>, + ], + ), + ( + ContractType::ERC721, + vec![ + Box::new(Erc721TransferProcessor) as Box>, + Box::new(Erc721LegacyTransferProcessor) as Box>, + ], + ), + ]; + + for (contract_type, processors) in event_processors { + for processor in processors { + let key = get_selector_from_name(processor.event_key().as_str()) + .expect("Event key is ASCII so this should never fail"); + // event_processors_map.entry(contract_type).or_default().insert(key, processor); + event_processors_map + .entry(contract_type) + .or_default() + .entry(key) + .or_default() + .push(processor); + } + } + + event_processors_map + } + + pub fn get_event_processor( + &self, + contract_type: ContractType, + ) -> &HashMap>>> { + self.event_processors.get(&contract_type).unwrap() + } +} pub(crate) const LOG_TARGET: &str = "torii_core::engine"; pub const QUERY_QUEUE_BATCH_SIZE: usize = 1000; @@ -88,6 +160,7 @@ pub enum FetchDataResult { #[derive(Debug)] pub struct FetchRangeResult { // (block_number, transaction_hash) -> events + // NOTE: LinkedList might contains blocks in different order pub transactions: LinkedHashMap<(u64, Felt), Vec>, pub blocks: BTreeMap, pub latest_block_number: u64, @@ -108,23 +181,17 @@ pub struct ParallelizedEvent { pub event: Event, } -#[derive(Debug)] -pub struct EngineHead { - pub block_number: u64, - pub last_pending_block_world_tx: Option, - pub last_pending_block_tx: Option, -} - #[allow(missing_debug_implementations)] pub struct Engine { world: Arc>, db: Sql, - provider: Box

, + provider: Arc

, processors: Arc>, config: EngineConfig, shutdown_tx: Sender<()>, block_tx: Option>, - tasks: HashMap>, + tasks: HashMap>, + contracts: Arc>, } struct UnprocessedEvent { @@ -133,6 +200,7 @@ struct UnprocessedEvent { } impl Engine

{ + #[allow(clippy::too_many_arguments)] pub fn new( world: WorldContractReader

, db: Sql, @@ -141,22 +209,24 @@ impl Engine

{ config: EngineConfig, shutdown_tx: Sender<()>, block_tx: Option>, + contracts: Arc>, ) -> Self { Self { world: Arc::new(world), db, - provider: Box::new(provider), + provider: Arc::new(provider), processors: Arc::new(processors), config, shutdown_tx, block_tx, + contracts, tasks: HashMap::new(), } } pub async fn start(&mut self) -> Result<()> { // use the start block provided by user if head is 0 - let (head, _, _) = self.db.head().await?; + let (head, _, _) = self.db.head(self.world.address).await?; if head == 0 { self.db.set_head(self.config.start_block, 0, 0, self.world.address).await?; } else if self.config.start_block != 0 { @@ -170,13 +240,12 @@ impl Engine

{ let mut erroring_out = false; loop { - let (head, last_pending_block_world_tx, last_pending_block_tx) = self.db.head().await?; - + let cursors = self.db.cursors().await?; tokio::select! { _ = shutdown_rx.recv() => { break Ok(()); } - res = self.fetch_data(head, last_pending_block_world_tx, last_pending_block_tx) => { + res = self.fetch_data(&cursors) => { match res { Ok(fetch_result) => { let instant = Instant::now(); @@ -187,7 +256,10 @@ impl Engine

{ } match self.process(fetch_result).await { - Ok(_) => self.db.execute().await?, + Ok(_) => { + self.db.execute().await?; + self.db.apply_cache_diff().await?; + }, Err(e) => { error!(target: LOG_TARGET, error = %e, "Processing fetched data."); erroring_out = true; @@ -214,23 +286,19 @@ impl Engine

{ } } - pub async fn fetch_data( - &mut self, - from: u64, - last_pending_block_world_tx: Option, - last_pending_block_tx: Option, - ) -> Result { - let instant = Instant::now(); + pub async fn fetch_data(&mut self, cursors: &Cursors) -> Result { let latest_block_number = self.provider.block_hash_and_number().await?.block_number; + let from = cursors.head.unwrap_or(0); + let instant = Instant::now(); let result = if from < latest_block_number { let from = if from == 0 { from } else { from + 1 }; - let data = - self.fetch_range(from, latest_block_number, last_pending_block_world_tx).await?; + let data = self.fetch_range(from, latest_block_number, &cursors.cursor_map).await?; debug!(target: LOG_TARGET, duration = ?instant.elapsed(), from = %from, to = %latest_block_number, "Fetched data for range."); FetchDataResult::Range(data) } else if self.config.index_pending { - let data = self.fetch_pending(latest_block_number + 1, last_pending_block_tx).await?; + let data = + self.fetch_pending(latest_block_number + 1, cursors.last_pending_block_tx).await?; debug!(target: LOG_TARGET, duration = ?instant.elapsed(), latest_block_number = %latest_block_number, "Fetched pending data."); if let Some(data) = data { FetchDataResult::Pending(data) @@ -248,98 +316,107 @@ impl Engine

{ &mut self, from: u64, to: u64, - last_pending_block_world_tx: Option, + cursor_map: &HashMap, ) -> Result { // Process all blocks from current to latest. - let get_events = |token: Option| { - self.provider.get_events( - EventFilter { - from_block: Some(BlockId::Number(from)), - to_block: Some(BlockId::Number(to)), - address: Some(self.world.address), - keys: None, - }, - token, - self.config.events_chunk_size, - ) - }; + let mut fetch_all_events_tasks = VecDeque::new(); + + for contract in self.contracts.iter() { + let events_filter = EventFilter { + from_block: Some(BlockId::Number(from)), + to_block: Some(BlockId::Number(to)), + address: Some(*contract.0), + keys: None, + }; + let token_events_pages = + get_all_events(&self.provider, events_filter, self.config.events_chunk_size); + + // Prefer processing world events first + match contract.1 { + ContractType::WORLD => fetch_all_events_tasks.push_front(token_events_pages), + _ => fetch_all_events_tasks.push_back(token_events_pages), + } + } + + let task_result = join_all(fetch_all_events_tasks).await; + + let mut events = vec![]; + + for result in task_result { + let result = result?; + let contract_address = + result.0.expect("EventFilters that we use always have an address"); + let events_pages = result.1; + let last_contract_tx = cursor_map.get(&contract_address).cloned(); + let mut last_contract_tx_tmp = last_contract_tx; + + debug!(target: LOG_TARGET, "Total events pages fetched for contract ({:#x}): {}", &contract_address, &events_pages.len()); + + for events_page in events_pages { + debug!("Processing events page with events: {}", &events_page.events.len()); + for event in events_page.events { + // Then we skip all transactions until we reach the last pending processed + // transaction (if any) + if let Some(last_contract_tx) = last_contract_tx_tmp { + if event.transaction_hash != last_contract_tx { + continue; + } + + last_contract_tx_tmp = None; + } - // handle next events pages - let mut events_pages = vec![get_events(None).await?]; + // Skip the latest pending block transaction events + // * as we might have multiple events for the same transaction + if let Some(last_contract_tx) = last_contract_tx { + if event.transaction_hash == last_contract_tx { + continue; + } + } - while let Some(token) = &events_pages.last().unwrap().continuation_token { - debug!(target: LOG_TARGET, "Fetching events page with continuation token: {}", &token); - events_pages.push(get_events(Some(token.clone())).await?); + events.push(event); + } + } } - debug!(target: LOG_TARGET, "Total events pages fetched: {}", &events_pages.len()); // Transactions & blocks to process - let mut last_block = 0_u64; let mut blocks = BTreeMap::new(); // Flatten events pages and events according to the pending block cursor // to array of (block_number, transaction_hash) - let mut last_pending_block_world_tx_cursor = last_pending_block_world_tx; let mut transactions = LinkedHashMap::new(); - for events_page in events_pages { - debug!("Processing events page with events: {}", &events_page.events.len()); - for event in events_page.events { - let block_number = match event.block_number { - Some(block_number) => block_number, - // If the block number is not present, try to fetch it from the transaction - // receipt Should not/rarely happen. Thus the additional - // fetch is acceptable. - None => { - let TransactionReceiptWithBlockInfo { receipt, block } = - self.provider.get_transaction_receipt(event.transaction_hash).await?; - - match receipt { - TransactionReceipt::Invoke(_) | TransactionReceipt::L1Handler(_) => { - if let ReceiptBlock::Block { block_number, .. } = block { - block_number - } else { - // If the block is pending, we assume the block number is the - // latest + 1 - to + 1 - } - } - - _ => to + 1, - } - } - }; - // Keep track of last block number and fetch block timestamp - if block_number > last_block { - let block_timestamp = self.get_block_timestamp(block_number).await?; - blocks.insert(block_number, block_timestamp); + let mut block_set = HashSet::new(); + for event in events { + let block_number = match event.block_number { + Some(block_number) => block_number, + None => unreachable!("In fetch range all events should have block number"), + }; - last_block = block_number; - } + block_set.insert(block_number); - // Then we skip all transactions until we reach the last pending processed - // transaction (if any) - if let Some(tx) = last_pending_block_world_tx_cursor { - if event.transaction_hash != tx { - continue; - } + transactions + .entry((block_number, event.transaction_hash)) + .or_insert(vec![]) + .push(event); + } - last_pending_block_world_tx_cursor = None; - } + let semaphore = Arc::new(Semaphore::new(self.config.max_concurrent_tasks)); + let mut set: JoinSet> = JoinSet::new(); - // Skip the latest pending block transaction events - // * as we might have multiple events for the same transaction - if let Some(tx) = last_pending_block_world_tx { - if event.transaction_hash == tx { - continue; - } - } + for block_number in block_set { + let semaphore = semaphore.clone(); + let provider = self.provider.clone(); + set.spawn(async move { + let _permit = semaphore.acquire().await.unwrap(); + debug!("Fetching block timestamp for block number: {}", block_number); + let block_timestamp = get_block_timestamp(&provider, block_number).await?; + Ok((block_number, block_timestamp)) + }); + } - transactions - .entry((block_number, event.transaction_hash)) - .or_insert(vec![]) - .push(event); - } + while let Some(result) = set.join_next().await { + let (block_number, block_timestamp) = result??; + blocks.insert(block_number, block_timestamp); } debug!("Transactions: {}", &transactions.len()); @@ -371,25 +448,26 @@ impl Engine

{ })) } - pub async fn process(&mut self, fetch_result: FetchDataResult) -> Result> { + pub async fn process(&mut self, fetch_result: FetchDataResult) -> Result<()> { match fetch_result { - FetchDataResult::Range(data) => self.process_range(data).await.map(Some), - FetchDataResult::Pending(data) => self.process_pending(data).await.map(Some), - FetchDataResult::None => Ok(None), - } + FetchDataResult::Range(data) => self.process_range(data).await?, + FetchDataResult::Pending(data) => self.process_pending(data).await?, + FetchDataResult::None => {} + }; + + Ok(()) } - pub async fn process_pending(&mut self, data: FetchPendingResult) -> Result { + pub async fn process_pending(&mut self, data: FetchPendingResult) -> Result<()> { // Skip transactions that have been processed already // Our cursor is the last processed transaction let mut last_pending_block_tx_cursor = data.last_pending_block_tx; let mut last_pending_block_tx = data.last_pending_block_tx; - let mut last_pending_block_world_tx = None; let timestamp = data.pending_block.timestamp; - let mut world_txns_count = 0; + let mut cursor_map = HashMap::new(); for t in data.pending_block.transactions { let transaction_hash = t.transaction.transaction_hash(); if let Some(tx) = last_pending_block_tx_cursor { @@ -401,83 +479,35 @@ impl Engine

{ continue; } - match self.process_transaction_with_receipt(&t, data.block_number, timestamp).await { - Err(e) => { - match e.to_string().as_str() { - "TransactionHashNotFound" => { - // We failed to fetch the transaction, which is because - // the transaction might not have been processed fast enough by the - // provider. So we can fail silently and try - // again in the next iteration. - warn!(target: LOG_TARGET, transaction_hash = %format!("{:#x}", transaction_hash), "Retrieving pending transaction receipt."); - self.db - .set_head( - data.block_number - 1, - timestamp, - world_txns_count, - self.world.address, - ) - .await?; - if let Some(tx) = last_pending_block_tx { - self.db.set_last_pending_block_tx(Some(tx))?; - } - - if let Some(tx) = last_pending_block_world_tx { - self.db.set_last_pending_block_world_tx(Some(tx))?; - } - return Ok(EngineHead { - block_number: data.block_number - 1, - last_pending_block_tx, - last_pending_block_world_tx, - }); - } - _ => { - error!(target: LOG_TARGET, error = %e, transaction_hash = %format!("{:#x}", transaction_hash), "Processing pending transaction."); - return Err(e); - } - } - } - Ok(true) => { - world_txns_count += 1; - last_pending_block_world_tx = Some(*transaction_hash); - last_pending_block_tx = Some(*transaction_hash); - info!(target: LOG_TARGET, transaction_hash = %format!("{:#x}", transaction_hash), "Processed pending world transaction."); - } - Ok(_) => { - last_pending_block_tx = Some(*transaction_hash); - debug!(target: LOG_TARGET, transaction_hash = %format!("{:#x}", transaction_hash), "Processed pending transaction.") - } + if let Err(e) = self + .process_transaction_with_receipt(&t, data.block_number, timestamp, &mut cursor_map) + .await + { + error!(target: LOG_TARGET, error = %e, transaction_hash = %format!("{:#x}", transaction_hash), "Processing pending transaction."); + return Err(e); } + + last_pending_block_tx = Some(*transaction_hash); + debug!(target: LOG_TARGET, transaction_hash = %format!("{:#x}", transaction_hash), "Processed pending transaction."); } // Process parallelized events self.process_tasks().await?; - // Set the head to the last processed pending transaction - // Head block number should still be latest block number - self.db - .set_head(data.block_number - 1, timestamp, world_txns_count, self.world.address) - .await?; - - if let Some(tx) = last_pending_block_tx { - self.db.set_last_pending_block_tx(Some(tx))?; - } - - if let Some(tx) = last_pending_block_world_tx { - self.db.set_last_pending_block_world_tx(Some(tx))?; - } - - Ok(EngineHead { - block_number: data.block_number - 1, - last_pending_block_world_tx, + self.db.update_cursors( + data.block_number - 1, last_pending_block_tx, - }) + cursor_map, + timestamp, + )?; + + Ok(()) } - pub async fn process_range(&mut self, data: FetchRangeResult) -> Result { + pub async fn process_range(&mut self, data: FetchRangeResult) -> Result<()> { // Process all transactions - let mut last_block = 0; - let transactions_count = data.transactions.len(); + let mut processed_blocks = HashSet::new(); + let mut cursor_map = HashMap::new(); for ((block_number, transaction_hash), events) in data.transactions { debug!("Processing transaction hash: {:#x}", transaction_hash); // Process transaction @@ -493,40 +523,30 @@ impl Engine

{ block_number, data.blocks[&block_number], transaction, + &mut cursor_map, ) .await?; // Process block - if block_number > last_block { + if !processed_blocks.contains(&block_number) { if let Some(ref block_tx) = self.block_tx { block_tx.send(block_number).await?; } self.process_block(block_number, data.blocks[&block_number]).await?; - last_block = block_number; + processed_blocks.insert(block_number); } } // Process parallelized events self.process_tasks().await?; - let last_block_timestamp = self.get_block_timestamp(data.latest_block_number).await?; - self.db - .set_head( - data.latest_block_number, - last_block_timestamp, - transactions_count as u64, - self.world.address, - ) - .await?; - self.db.set_last_pending_block_world_tx(None)?; - self.db.set_last_pending_block_tx(None)?; - - Ok(EngineHead { - block_number: data.latest_block_number, - last_pending_block_tx: None, - last_pending_block_world_tx: None, - }) + let last_block_timestamp = + get_block_timestamp(&self.provider, data.latest_block_number).await?; + + self.db.reset_cursors(data.latest_block_number, cursor_map, last_block_timestamp)?; + + Ok(()) } async fn process_tasks(&mut self) -> Result<()> { @@ -538,14 +558,18 @@ impl Engine

{ for (task_id, events) in self.tasks.drain() { let db = self.db.clone(); let world = self.world.clone(); - let processors = self.processors.clone(); let semaphore = semaphore.clone(); + let processors = self.processors.clone(); handles.push(tokio::spawn(async move { let _permit = semaphore.acquire().await?; let mut local_db = db.clone(); - for ParallelizedEvent { event_id, event, block_number, block_timestamp } in events { - if let Some(processor) = processors.event.get(&event.keys[0]) { + for (contract_type, ParallelizedEvent { event_id, event, block_number, block_timestamp }) in events { + let contract_processors = processors.get_event_processor(contract_type); + if let Some(processors) = contract_processors.get(&event.keys[0]) { + + let processor = processors.iter().find(|p| p.validate(&event)).expect("Must find atleast one processor for the event"); + debug!(target: LOG_TARGET, event_name = processor.event_key(), task_id = %task_id, "Processing parallelized event."); if let Err(e) = processor @@ -567,13 +591,6 @@ impl Engine

{ Ok(()) } - async fn get_block_timestamp(&self, block_number: u64) -> Result { - match self.provider.get_block_with_tx_hashes(BlockId::Number(block_number)).await? { - MaybePendingBlockWithTxHashes::Block(block) => Ok(block.timestamp), - MaybePendingBlockWithTxHashes::PendingBlock(block) => Ok(block.timestamp), - } - } - async fn process_transaction_with_events( &mut self, transaction_hash: Felt, @@ -581,7 +598,10 @@ impl Engine

{ block_number: u64, block_timestamp: u64, transaction: Option, + cursor_map: &mut HashMap, ) -> Result<()> { + let mut unique_contracts = HashSet::new(); + // Contract -> Cursor for (event_idx, event) in events.iter().enumerate() { let event_id = format!("{:#064x}:{:#x}:{:#04x}", block_number, transaction_hash, event_idx); @@ -591,6 +611,13 @@ impl Engine

{ keys: event.keys.clone(), data: event.data.clone(), }; + + let Some(&contract_type) = self.contracts.get(&event.from_address) else { + continue; + }; + + unique_contracts.insert(event.from_address); + Self::process_event( self, block_number, @@ -598,10 +625,16 @@ impl Engine

{ &event_id, &event, transaction_hash, + contract_type, ) .await?; } + for contract in unique_contracts { + let entry = cursor_map.entry(contract).or_insert((transaction_hash, 0)); + entry.1 += 1; + } + if let Some(ref transaction) = transaction { Self::process_transaction( self, @@ -623,7 +656,8 @@ impl Engine

{ transaction_with_receipt: &TransactionWithReceipt, block_number: u64, block_timestamp: u64, - ) -> Result { + cursor_map: &mut HashMap, + ) -> Result<()> { let transaction_hash = transaction_with_receipt.transaction.transaction_hash(); let events = match &transaction_with_receipt.receipt { TransactionReceipt::Invoke(receipt) => Some(&receipt.events), @@ -631,14 +665,15 @@ impl Engine

{ _ => None, }; - let mut world_event = false; + let mut unique_contracts = HashSet::new(); if let Some(events) = events { for (event_idx, event) in events.iter().enumerate() { - if event.from_address != self.world.address { + let Some(&contract_type) = self.contracts.get(&event.from_address) else { continue; - } + }; + + unique_contracts.insert(event.from_address); - world_event = true; let event_id = format!("{:#064x}:{:#x}:{:#04x}", block_number, *transaction_hash, event_idx); @@ -649,11 +684,12 @@ impl Engine

{ &event_id, event, *transaction_hash, + contract_type, ) .await?; } - if world_event && self.config.flags.contains(IndexingFlags::TRANSACTIONS) { + if self.config.flags.contains(IndexingFlags::TRANSACTIONS) { Self::process_transaction( self, block_number, @@ -665,7 +701,12 @@ impl Engine

{ } } - Ok(world_event) + for contract in unique_contracts { + let entry = cursor_map.entry(contract).or_insert((*transaction_hash, 0)); + entry.1 += 1; + } + + Ok(()) } async fn process_block(&mut self, block_number: u64, block_timestamp: u64) -> Result<()> { @@ -709,14 +750,23 @@ impl Engine

{ event_id: &str, event: &Event, transaction_hash: Felt, + contract_type: ContractType, ) -> Result<()> { if self.config.flags.contains(IndexingFlags::RAW_EVENTS) { - self.db.store_event(event_id, event, transaction_hash, block_timestamp)?; + match contract_type { + ContractType::WORLD => { + self.db.store_event(event_id, event, transaction_hash, block_timestamp)?; + } + // ERC events needs to be processed inside there respective processor + // we store transfer events for ERC contracts regardless of this flag + ContractType::ERC20 | ContractType::ERC721 => {} + } } let event_key = event.keys[0]; - let Some(processor) = self.processors.event.get(&event_key) else { + let processors = self.processors.get_event_processor(contract_type); + let Some(processors) = processors.get(&event_key) else { // if we dont have a processor for this event, we try the catch all processor if self.processors.catch_all_event.validate(event) { if let Err(e) = self @@ -751,6 +801,11 @@ impl Engine

{ return Ok(()); }; + let processor = processors + .iter() + .find(|p| p.validate(event)) + .expect("Must find atleast one processor for the event"); + let task_identifier = match processor.event_key().as_str() { "StoreSetRecord" | "StoreUpdateRecord" | "StoreUpdateMember" | "StoreDelRecord" => { let mut hasher = DefaultHasher::new(); @@ -763,22 +818,77 @@ impl Engine

{ // if we have a task identifier, we queue the event to be parallelized if task_identifier != 0 { - self.tasks.entry(task_identifier).or_default().push(ParallelizedEvent { - event_id: event_id.to_string(), - event: event.clone(), - block_number, - block_timestamp, - }); + self.tasks.entry(task_identifier).or_default().push(( + contract_type, + ParallelizedEvent { + event_id: event_id.to_string(), + event: event.clone(), + block_number, + block_timestamp, + }, + )); } else { // if we dont have a task identifier, we process the event immediately - if let Err(e) = processor - .process(&self.world, &mut self.db, block_number, block_timestamp, event_id, event) - .await - { - error!(target: LOG_TARGET, event_name = processor.event_key(), error = %e, "Processing event."); + if processor.validate(event) { + if let Err(e) = processor + .process( + &self.world, + &mut self.db, + block_number, + block_timestamp, + event_id, + event, + ) + .await + { + error!(target: LOG_TARGET, event_name = processor.event_key(), error = ?e, "Processing event."); + } + } else { + warn!(target: LOG_TARGET, event_name = processor.event_key(), "Event not validated."); } } Ok(()) } } + +async fn get_all_events

( + provider: &P, + events_filter: EventFilter, + events_chunk_size: u64, +) -> Result<(Option, Vec)> +where + P: Provider + Sync, +{ + let mut events_pages = Vec::new(); + let mut continuation_token = None; + + loop { + debug!( + "Fetching events page with continuation token: {:?}, for contract: {:?}", + continuation_token, events_filter.address + ); + let events_page = provider + .get_events(events_filter.clone(), continuation_token.clone(), events_chunk_size) + .await?; + + continuation_token = events_page.continuation_token.clone(); + events_pages.push(events_page); + + if continuation_token.is_none() { + break; + } + } + + Ok((events_filter.address, events_pages)) +} + +async fn get_block_timestamp

(provider: &P, block_number: u64) -> Result +where + P: Provider + Sync, +{ + match provider.get_block_with_tx_hashes(BlockId::Number(block_number)).await? { + MaybePendingBlockWithTxHashes::Block(block) => Ok(block.timestamp), + MaybePendingBlockWithTxHashes::PendingBlock(block) => Ok(block.timestamp), + } +} diff --git a/crates/torii/core/src/executor.rs b/crates/torii/core/src/executor.rs index a1e5bf2e43..04d64676b9 100644 --- a/crates/torii/core/src/executor.rs +++ b/crates/torii/core/src/executor.rs @@ -1,11 +1,13 @@ +use std::collections::HashMap; use std::mem; +use std::str::FromStr; use anyhow::{Context, Result}; use dojo_types::schema::{Struct, Ty}; use sqlx::query::Query; use sqlx::sqlite::SqliteArguments; use sqlx::{FromRow, Pool, Sqlite, Transaction}; -use starknet::core::types::Felt; +use starknet::core::types::{Felt, U256}; use tokio::sync::broadcast::{Receiver, Sender}; use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}; use tokio::sync::oneshot; @@ -13,8 +15,10 @@ use tokio::time::Instant; use tracing::{debug, error}; use crate::simple_broker::SimpleBroker; +use crate::sql::utils::{felt_to_sql_string, sql_string_to_u256, u256_to_sql_string, I256}; +use crate::sql::FELT_DELIMITER; use crate::types::{ - Contract as ContractUpdated, Entity as EntityUpdated, Event as EventEmitted, + ContractCursor, ContractType, Entity as EntityUpdated, Event as EventEmitted, EventMessage as EventMessageUpdated, Model as ModelRegistered, OptimisticEntity, OptimisticEventMessage, }; @@ -32,7 +36,7 @@ pub enum Argument { #[derive(Debug, Clone)] pub enum BrokerMessage { - SetHead(ContractUpdated), + SetHead(ContractCursor), ModelRegistered(ModelRegistered), EntityUpdated(EntityUpdated), EventMessageUpdated(EventMessageUpdated), @@ -47,6 +51,11 @@ pub struct DeleteEntityQuery { pub ty: Ty, } +#[derive(Debug, Clone)] +pub struct ApplyBalanceDiffQuery { + pub erc_cache: HashMap<(ContractType, String), I256>, +} + #[derive(Debug, Clone)] pub struct SetHeadQuery { pub head: u64, @@ -55,12 +64,32 @@ pub struct SetHeadQuery { pub contract_address: Felt, } +#[derive(Debug, Clone)] +pub struct ResetCursorsQuery { + // contract => (last_txn, txn_count) + pub cursor_map: HashMap, + pub last_block_timestamp: u64, + pub last_block_number: u64, +} + +#[derive(Debug, Clone)] +pub struct UpdateCursorsQuery { + // contract => (last_txn, txn_count) + pub cursor_map: HashMap, + pub last_block_number: u64, + pub last_pending_block_tx: Option, + pub pending_block_timestamp: u64, +} + #[derive(Debug, Clone)] pub enum QueryType { SetHead(SetHeadQuery), + ResetCursors(ResetCursorsQuery), + UpdateCursors(UpdateCursorsQuery), SetEntity(Ty), DeleteEntity(DeleteEntityQuery), EventMessage(Ty), + ApplyBalanceDiff(ApplyBalanceDiffQuery), RegisterModel, StoreEvent, Execute, @@ -69,6 +98,8 @@ pub enum QueryType { #[derive(Debug)] pub struct Executor<'c> { + // Queries should use `transaction` instead of `pool` + // This `pool` is only used to create a new `transaction` pool: Pool, transaction: Transaction<'c, Sqlite>, publish_queue: Vec, @@ -215,9 +246,116 @@ impl<'c> Executor<'c> { .fetch_one(&mut **tx) .await?; - let contract = ContractUpdated::from_row(&row)?; + let contract = ContractCursor::from_row(&row)?; self.publish_queue.push(BrokerMessage::SetHead(contract)); } + QueryType::ResetCursors(reset_heads) => { + // Read all cursors from db + let mut cursors: Vec = + sqlx::query_as("SELECT * FROM contracts").fetch_all(&mut **tx).await?; + + let new_head = + reset_heads.last_block_number.try_into().expect("doesn't fit in i64"); + let new_timestamp = reset_heads.last_block_timestamp; + + for cursor in &mut cursors { + if let Some(new_cursor) = reset_heads + .cursor_map + .get(&Felt::from_str(&cursor.contract_address).unwrap()) + { + let cursor_timestamp: u64 = + cursor.last_block_timestamp.try_into().expect("doesn't fit in i64"); + + let new_tps = if new_timestamp - cursor_timestamp != 0 { + new_cursor.1 / (new_timestamp - cursor_timestamp) + } else { + new_cursor.1 + }; + + cursor.tps = new_tps.try_into().expect("does't fit in i64"); + } else { + cursor.tps = 0; + } + + cursor.head = new_head; + cursor.last_block_timestamp = + new_timestamp.try_into().expect("doesnt fit in i64"); + cursor.last_pending_block_tx = None; + cursor.last_pending_block_contract_tx = None; + + sqlx::query( + "UPDATE contracts SET head = ?, last_block_timestamp = ?, \ + last_pending_block_tx = ?, last_pending_block_contract_tx = ? WHERE id = \ + ?", + ) + .bind(cursor.head) + .bind(cursor.last_block_timestamp) + .bind(&cursor.last_pending_block_tx) + .bind(&cursor.last_pending_block_contract_tx) + .bind(&cursor.contract_address) + .execute(&mut **tx) + .await?; + + // Send appropriate ContractUpdated publish message + self.publish_queue.push(BrokerMessage::SetHead(cursor.clone())); + } + } + QueryType::UpdateCursors(update_cursors) => { + // Read all cursors from db + let mut cursors: Vec = + sqlx::query_as("SELECT * FROM contracts").fetch_all(&mut **tx).await?; + + let new_head = + update_cursors.last_block_number.try_into().expect("doesn't fit in i64"); + let new_timestamp = update_cursors.pending_block_timestamp; + + for cursor in &mut cursors { + if let Some(new_cursor) = update_cursors + .cursor_map + .get(&Felt::from_str(&cursor.contract_address).unwrap()) + { + let cursor_timestamp: u64 = + cursor.last_block_timestamp.try_into().expect("doesn't fit in i64"); + + let num_transactions = new_cursor.1; + + let new_tps = if new_timestamp - cursor_timestamp != 0 { + num_transactions / (new_timestamp - cursor_timestamp) + } else { + num_transactions + }; + + cursor.last_pending_block_contract_tx = + Some(felt_to_sql_string(&new_cursor.0)); + cursor.tps = new_tps.try_into().expect("does't fit in i64"); + } else { + cursor.tps = 0; + } + cursor.last_block_timestamp = update_cursors + .pending_block_timestamp + .try_into() + .expect("doesn't fit in i64"); + cursor.head = new_head; + cursor.last_pending_block_tx = + update_cursors.last_pending_block_tx.map(|felt| felt_to_sql_string(&felt)); + + sqlx::query( + "UPDATE contracts SET head = ?, last_block_timestamp = ?, \ + last_pending_block_tx = ?, last_pending_block_contract_tx = ? WHERE id = \ + ?", + ) + .bind(cursor.head) + .bind(cursor.last_block_timestamp) + .bind(&cursor.last_pending_block_tx) + .bind(&cursor.last_pending_block_contract_tx) + .bind(&cursor.contract_address) + .execute(&mut **tx) + .await?; + + // Send appropriate ContractUpdated publish message + self.publish_queue.push(BrokerMessage::SetHead(cursor.clone())); + } + } QueryType::SetEntity(entity) => { let row = query.fetch_one(&mut **tx).await.with_context(|| { format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) @@ -327,6 +465,12 @@ impl<'c> Executor<'c> { let event = EventEmitted::from_row(&row)?; self.publish_queue.push(BrokerMessage::EventEmitted(event)); } + QueryType::ApplyBalanceDiff(apply_balance_diff) => { + debug!(target: LOG_TARGET, "Applying balance diff."); + let instant = Instant::now(); + self.apply_balance_diff(apply_balance_diff).await?; + debug!(target: LOG_TARGET, duration = ?instant.elapsed(), "Applied balance diff."); + } QueryType::Execute => { debug!(target: LOG_TARGET, "Executing query."); let instant = Instant::now(); @@ -361,6 +505,102 @@ impl<'c> Executor<'c> { Ok(()) } + + async fn apply_balance_diff( + &mut self, + apply_balance_diff: ApplyBalanceDiffQuery, + ) -> Result<()> { + let erc_cache = apply_balance_diff.erc_cache; + for ((contract_type, id_str), balance) in erc_cache.iter() { + let id = id_str.split(FELT_DELIMITER).collect::>(); + match contract_type { + ContractType::WORLD => unreachable!(), + ContractType::ERC721 => { + // account_address/contract_address:id => ERC721 + assert!(id.len() == 2); + let account_address = id[0]; + let token_id = id[1]; + let mid = token_id.split(":").collect::>(); + let contract_address = mid[0]; + + self.apply_balance_diff_helper( + id_str, + account_address, + contract_address, + token_id, + balance, + ) + .await + .with_context(|| "Failed to apply balance diff in apply_cache_diff")?; + } + ContractType::ERC20 => { + // account_address/contract_address/ => ERC20 + assert!(id.len() == 3); + let account_address = id[0]; + let contract_address = id[1]; + let token_id = id[1]; + + self.apply_balance_diff_helper( + id_str, + account_address, + contract_address, + token_id, + balance, + ) + .await + .with_context(|| "Failed to apply balance diff in apply_cache_diff")?; + } + } + } + + Ok(()) + } + + async fn apply_balance_diff_helper( + &mut self, + id: &str, + account_address: &str, + contract_address: &str, + token_id: &str, + balance_diff: &I256, + ) -> Result<()> { + let tx = &mut self.transaction; + let balance: Option<(String,)> = + sqlx::query_as("SELECT balance FROM balances WHERE id = ?") + .bind(id) + .fetch_optional(&mut **tx) + .await?; + + let mut balance = if let Some(balance) = balance { + sql_string_to_u256(&balance.0) + } else { + U256::from(0u8) + }; + + if balance_diff.is_negative { + if balance < balance_diff.value { + dbg!(&balance_diff, balance, id); + } + balance -= balance_diff.value; + } else { + balance += balance_diff.value; + } + + // write the new balance to the database + sqlx::query( + "INSERT OR REPLACE INTO balances (id, contract_address, account_address, token_id, \ + balance) VALUES (?, ?, ?, ?, ?)", + ) + .bind(id) + .bind(contract_address) + .bind(account_address) + .bind(token_id) + .bind(u256_to_sql_string(&balance)) + .execute(&mut **tx) + .await?; + + Ok(()) + } } fn send_broker_message(message: BrokerMessage) { diff --git a/crates/torii/core/src/lib.rs b/crates/torii/core/src/lib.rs index 888726d903..0615f98b4e 100644 --- a/crates/torii/core/src/lib.rs +++ b/crates/torii/core/src/lib.rs @@ -1,6 +1,5 @@ #![warn(unused_crate_dependencies)] -pub mod cache; pub mod engine; pub mod error; pub mod executor; diff --git a/crates/torii/core/src/processors/erc20_legacy_transfer.rs b/crates/torii/core/src/processors/erc20_legacy_transfer.rs new file mode 100644 index 0000000000..4cef0dc19d --- /dev/null +++ b/crates/torii/core/src/processors/erc20_legacy_transfer.rs @@ -0,0 +1,59 @@ +use anyhow::Error; +use async_trait::async_trait; +use cainome::cairo_serde::{CairoSerde, U256 as U256Cainome}; +use dojo_world::contracts::world::WorldContractReader; +use starknet::core::types::{Event, U256}; +use starknet::providers::Provider; +use tracing::debug; + +use super::EventProcessor; +use crate::sql::Sql; + +pub(crate) const LOG_TARGET: &str = "torii_core::processors::erc20_legacy_transfer"; + +#[derive(Default, Debug)] +pub struct Erc20LegacyTransferProcessor; + +#[async_trait] +impl

EventProcessor

for Erc20LegacyTransferProcessor +where + P: Provider + Send + Sync + std::fmt::Debug, +{ + fn event_key(&self) -> String { + "Transfer".to_string() + } + + fn validate(&self, event: &Event) -> bool { + // ref: https://github.com/OpenZeppelin/cairo-contracts/blob/1f9359219a92cdb1576f953db71ee993b8ef5f70/src/openzeppelin/token/erc20/library.cairo#L19-L21 + // key: [hash(Transfer)] + // data: [from, to, value.0, value.1] + if event.keys.len() == 1 && event.data.len() == 4 { + return true; + } + + false + } + + async fn process( + &self, + world: &WorldContractReader

, + db: &mut Sql, + _block_number: u64, + block_timestamp: u64, + _event_id: &str, + event: &Event, + ) -> Result<(), Error> { + let token_address = event.from_address; + let from = event.data[0]; + let to = event.data[1]; + + let value = U256Cainome::cairo_deserialize(&event.data, 2)?; + let value = U256::from_words(value.low, value.high); + + db.handle_erc20_transfer(token_address, from, to, value, world.provider(), block_timestamp) + .await?; + debug!(target: LOG_TARGET,from = ?from, to = ?to, value = ?value, "Legacy ERC20 Transfer"); + + Ok(()) + } +} diff --git a/crates/torii/core/src/processors/erc20_transfer.rs b/crates/torii/core/src/processors/erc20_transfer.rs new file mode 100644 index 0000000000..10022d9eb0 --- /dev/null +++ b/crates/torii/core/src/processors/erc20_transfer.rs @@ -0,0 +1,59 @@ +use anyhow::Error; +use async_trait::async_trait; +use cainome::cairo_serde::{CairoSerde, U256 as U256Cainome}; +use dojo_world::contracts::world::WorldContractReader; +use starknet::core::types::{Event, U256}; +use starknet::providers::Provider; +use tracing::debug; + +use super::EventProcessor; +use crate::sql::Sql; + +pub(crate) const LOG_TARGET: &str = "torii_core::processors::erc20_transfer"; + +#[derive(Default, Debug)] +pub struct Erc20TransferProcessor; + +#[async_trait] +impl

EventProcessor

for Erc20TransferProcessor +where + P: Provider + Send + Sync + std::fmt::Debug, +{ + fn event_key(&self) -> String { + "Transfer".to_string() + } + + fn validate(&self, event: &Event) -> bool { + // ref: https://github.com/OpenZeppelin/cairo-contracts/blob/ba00ce76a93dcf25c081ab2698da20690b5a1cfb/packages/token/src/erc20/erc20.cairo#L38-L46 + // key: [hash(Transfer), from, to] + // data: [value.0, value.1] + if event.keys.len() == 3 && event.data.len() == 2 { + return true; + } + + false + } + + async fn process( + &self, + world: &WorldContractReader

, + db: &mut Sql, + _block_number: u64, + block_timestamp: u64, + _event_id: &str, + event: &Event, + ) -> Result<(), Error> { + let token_address = event.from_address; + let from = event.keys[1]; + let to = event.keys[2]; + + let value = U256Cainome::cairo_deserialize(&event.data, 0)?; + let value = U256::from_words(value.low, value.high); + + db.handle_erc20_transfer(token_address, from, to, value, world.provider(), block_timestamp) + .await?; + debug!(target: LOG_TARGET,from = ?from, to = ?to, value = ?value, "ERC20 Transfer"); + + Ok(()) + } +} diff --git a/crates/torii/core/src/processors/erc721_legacy_transfer.rs b/crates/torii/core/src/processors/erc721_legacy_transfer.rs new file mode 100644 index 0000000000..89a88f04a3 --- /dev/null +++ b/crates/torii/core/src/processors/erc721_legacy_transfer.rs @@ -0,0 +1,66 @@ +use anyhow::Error; +use async_trait::async_trait; +use cainome::cairo_serde::{CairoSerde, U256 as U256Cainome}; +use dojo_world::contracts::world::WorldContractReader; +use starknet::core::types::{Event, U256}; +use starknet::providers::Provider; +use tracing::debug; + +use super::EventProcessor; +use crate::sql::Sql; + +pub(crate) const LOG_TARGET: &str = "torii_core::processors::erc721_legacy_transfer"; + +#[derive(Default, Debug)] +pub struct Erc721LegacyTransferProcessor; + +#[async_trait] +impl

EventProcessor

for Erc721LegacyTransferProcessor +where + P: Provider + Send + Sync + std::fmt::Debug, +{ + fn event_key(&self) -> String { + "Transfer".to_string() + } + + fn validate(&self, event: &Event) -> bool { + // ref: https://github.com/OpenZeppelin/cairo-contracts/blob/1f9359219a92cdb1576f953db71ee993b8ef5f70/src/openzeppelin/token/erc721/library.cairo#L27-L29 + // key: [hash(Transfer)] + // data: [from, to, token_id.0, token_id.1] + if event.keys.len() == 1 && event.data.len() == 4 { + return true; + } + + false + } + + async fn process( + &self, + world: &WorldContractReader

, + db: &mut Sql, + _block_number: u64, + block_timestamp: u64, + _event_id: &str, + event: &Event, + ) -> Result<(), Error> { + let token_address = event.from_address; + let from = event.data[0]; + let to = event.data[1]; + + let token_id = U256Cainome::cairo_deserialize(&event.data, 2)?; + let token_id = U256::from_words(token_id.low, token_id.high); + + db.handle_erc721_transfer( + token_address, + from, + to, + token_id, + world.provider(), + block_timestamp, + ) + .await?; + debug!(target: LOG_TARGET, from = ?from, to = ?to, token_id = ?token_id, "ERC721 Transfer"); + + Ok(()) + } +} diff --git a/crates/torii/core/src/processors/erc721_transfer.rs b/crates/torii/core/src/processors/erc721_transfer.rs new file mode 100644 index 0000000000..319ea81833 --- /dev/null +++ b/crates/torii/core/src/processors/erc721_transfer.rs @@ -0,0 +1,66 @@ +use anyhow::Error; +use async_trait::async_trait; +use cainome::cairo_serde::{CairoSerde, U256 as U256Cainome}; +use dojo_world::contracts::world::WorldContractReader; +use starknet::core::types::{Event, U256}; +use starknet::providers::Provider; +use tracing::debug; + +use super::EventProcessor; +use crate::sql::Sql; + +pub(crate) const LOG_TARGET: &str = "torii_core::processors::erc721_transfer"; + +#[derive(Default, Debug)] +pub struct Erc721TransferProcessor; + +#[async_trait] +impl

EventProcessor

for Erc721TransferProcessor +where + P: Provider + Send + Sync + std::fmt::Debug, +{ + fn event_key(&self) -> String { + "Transfer".to_string() + } + + fn validate(&self, event: &Event) -> bool { + // ref: https://github.com/OpenZeppelin/cairo-contracts/blob/ba00ce76a93dcf25c081ab2698da20690b5a1cfb/packages/token/src/erc721/erc721.cairo#L40-L49 + // key: [hash(Transfer), from, to, token_id.low, token_id.high] + // data: [] + if event.keys.len() == 5 && event.data.is_empty() { + return true; + } + + false + } + + async fn process( + &self, + world: &WorldContractReader

, + db: &mut Sql, + _block_number: u64, + block_timestamp: u64, + _event_id: &str, + event: &Event, + ) -> Result<(), Error> { + let token_address = event.from_address; + let from = event.keys[1]; + let to = event.keys[2]; + + let token_id = U256Cainome::cairo_deserialize(&event.keys, 3)?; + let token_id = U256::from_words(token_id.low, token_id.high); + + db.handle_erc721_transfer( + token_address, + from, + to, + token_id, + world.provider(), + block_timestamp, + ) + .await?; + debug!(target: LOG_TARGET, from = ?from, to = ?to, token_id = ?token_id, "ERC721 Transfer"); + + Ok(()) + } +} diff --git a/crates/torii/core/src/processors/mod.rs b/crates/torii/core/src/processors/mod.rs index c6a8f13af5..cf25f36ca6 100644 --- a/crates/torii/core/src/processors/mod.rs +++ b/crates/torii/core/src/processors/mod.rs @@ -1,15 +1,15 @@ -use std::collections::HashMap; -use std::sync::Arc; - use anyhow::{Error, Result}; use async_trait::async_trait; use dojo_world::contracts::world::WorldContractReader; use starknet::core::types::{Event, Felt, Transaction}; -use starknet::core::utils::get_selector_from_name; use starknet::providers::Provider; use crate::sql::Sql; +pub mod erc20_legacy_transfer; +pub mod erc20_transfer; +pub mod erc721_legacy_transfer; +pub mod erc721_transfer; pub mod event_message; pub mod metadata_update; pub mod register_model; @@ -73,17 +73,3 @@ pub trait TransactionProcessor: Send + Sync { transaction: &Transaction, ) -> Result<(), Error>; } - -/// Given a list of event processors, generate a map of event keys to the event processor -pub fn generate_event_processors_map( - event_processor: Vec>>, -) -> Result>>> { - let mut event_processors = HashMap::new(); - - for processor in event_processor { - let key = get_selector_from_name(processor.event_key().as_str())?; - event_processors.insert(key, processor); - } - - Ok(event_processors) -} diff --git a/crates/torii/core/src/processors/store_set_record.rs b/crates/torii/core/src/processors/store_set_record.rs index c5f70a2a54..fa1351b156 100644 --- a/crates/torii/core/src/processors/store_set_record.rs +++ b/crates/torii/core/src/processors/store_set_record.rs @@ -8,7 +8,8 @@ use tracing::info; use super::EventProcessor; use crate::processors::{ENTITY_ID_INDEX, MODEL_INDEX, NUM_KEYS_INDEX}; -use crate::sql::{felts_sql_string, Sql}; +use crate::sql::utils::felts_to_sql_string; +use crate::sql::Sql; pub(crate) const LOG_TARGET: &str = "torii_core::processors::store_set_record"; @@ -60,7 +61,7 @@ where let keys_end: usize = keys_start + event.data[NUM_KEYS_INDEX].to_usize().context("invalid usize")?; let keys = event.data[keys_start..keys_end].to_vec(); - let keys_str = felts_sql_string(&keys); + let keys_str = felts_to_sql_string(&keys); // keys_end is already the length of the values array. diff --git a/crates/torii/core/src/cache.rs b/crates/torii/core/src/sql/cache.rs similarity index 66% rename from crates/torii/core/src/cache.rs rename to crates/torii/core/src/sql/cache.rs index f5afab2103..8cbcba36ed 100644 --- a/crates/torii/core/src/cache.rs +++ b/crates/torii/core/src/sql/cache.rs @@ -1,13 +1,15 @@ -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use dojo_types::schema::Ty; use dojo_world::contracts::abi::model::Layout; -use sqlx::SqlitePool; +use sqlx::{Pool, Sqlite, SqlitePool}; use starknet_crypto::Felt; use tokio::sync::RwLock; use crate::error::{Error, ParseError, QueryError}; use crate::model::{parse_sql_model_members, SqlModelMember}; +use crate::sql::utils::I256; +use crate::types::ContractType; #[derive(Debug, Clone)] pub struct Model { @@ -30,12 +32,12 @@ pub struct Model { #[derive(Debug)] pub struct ModelCache { pool: SqlitePool, - cache: RwLock>, + model_cache: RwLock>, } impl ModelCache { pub fn new(pool: SqlitePool) -> Self { - Self { pool, cache: RwLock::new(HashMap::new()) } + Self { pool, model_cache: RwLock::new(HashMap::new()) } } pub async fn models(&self, selectors: &[Felt]) -> Result, Error> { @@ -49,7 +51,7 @@ impl ModelCache { pub async fn model(&self, selector: &Felt) -> Result { { - let cache = self.cache.read().await; + let cache = self.model_cache.read().await; if let Some(model) = cache.get(selector).cloned() { return Ok(model); } @@ -95,7 +97,7 @@ impl ModelCache { } let schema = parse_sql_model_members(&namespace, &name, &model_members); - let mut cache = self.cache.write().await; + let mut cache = self.model_cache.write().await; let model = Model { namespace, @@ -114,11 +116,49 @@ impl ModelCache { } pub async fn set(&self, selector: Felt, model: Model) { - let mut cache = self.cache.write().await; + let mut cache = self.model_cache.write().await; cache.insert(selector, model); } pub async fn clear(&self) { - self.cache.write().await.clear(); + self.model_cache.write().await.clear(); + } +} + +#[derive(Debug)] +pub struct LocalCache { + pub erc_cache: HashMap<(ContractType, String), I256>, + pub token_id_registry: HashSet, +} + +impl Clone for LocalCache { + fn clone(&self) -> Self { + Self { erc_cache: HashMap::new(), token_id_registry: HashSet::new() } + } +} + +impl LocalCache { + pub async fn new(pool: Pool) -> Self { + // read existing token_id's from balances table and cache them + let token_id_registry: Vec<(String,)> = sqlx::query_as("SELECT token_id FROM balances") + .fetch_all(&pool) + .await + .expect("Should be able to read token_id's from blances table"); + + let token_id_registry = token_id_registry.into_iter().map(|token_id| token_id.0).collect(); + + Self { erc_cache: HashMap::new(), token_id_registry } + } + + pub fn empty() -> Self { + Self { erc_cache: HashMap::new(), token_id_registry: HashSet::new() } + } + + pub fn contains_token_id(&self, token_id: &str) -> bool { + self.token_id_registry.contains(token_id) + } + + pub fn register_token_id(&mut self, token_id: String) { + self.token_id_registry.insert(token_id); } } diff --git a/crates/torii/core/src/sql/erc.rs b/crates/torii/core/src/sql/erc.rs new file mode 100644 index 0000000000..36bf6216f6 --- /dev/null +++ b/crates/torii/core/src/sql/erc.rs @@ -0,0 +1,352 @@ +use std::collections::HashMap; +use std::mem; + +use anyhow::{Context, Result}; +use cainome::cairo_serde::{ByteArray, CairoSerde}; +use starknet::core::types::{BlockId, BlockTag, Felt, FunctionCall, U256}; +use starknet::core::utils::{get_selector_from_name, parse_cairo_short_string}; +use starknet::providers::Provider; +use tracing::debug; + +use super::utils::{u256_to_sql_string, I256}; +use super::{Sql, FELT_DELIMITER}; +use crate::executor::{ApplyBalanceDiffQuery, Argument, QueryMessage, QueryType}; +use crate::sql::utils::{felt_and_u256_to_sql_string, felt_to_sql_string, felts_to_sql_string}; +use crate::types::ContractType; +use crate::utils::utc_dt_string_from_timestamp; + +impl Sql { + #[allow(clippy::too_many_arguments)] + pub async fn handle_erc20_transfer( + &mut self, + contract_address: Felt, + from_address: Felt, + to_address: Felt, + amount: U256, + provider: &P, + block_timestamp: u64, + ) -> Result<()> { + // contract_address + let token_id = felt_to_sql_string(&contract_address); + + let token_exists: bool = self.local_cache.contains_token_id(&token_id); + + if !token_exists { + self.register_erc20_token_metadata(contract_address, &token_id, provider).await?; + self.execute().await.with_context(|| "Failed to execute in handle_erc20_transfer")?; + } + + self.store_erc_transfer_event( + contract_address, + from_address, + to_address, + amount, + &token_id, + block_timestamp, + )?; + + if from_address != Felt::ZERO { + // from_address/contract_address/ + let from_balance_id = felts_to_sql_string(&[from_address, contract_address]); + let from_balance = self + .local_cache + .erc_cache + .entry((ContractType::ERC20, from_balance_id)) + .or_default(); + *from_balance -= I256::from(amount); + } + + if to_address != Felt::ZERO { + let to_balance_id = felts_to_sql_string(&[to_address, contract_address]); + let to_balance = + self.local_cache.erc_cache.entry((ContractType::ERC20, to_balance_id)).or_default(); + *to_balance += I256::from(amount); + } + + if self.local_cache.erc_cache.len() >= 100000 { + self.apply_cache_diff().await?; + } + + Ok(()) + } + + #[allow(clippy::too_many_arguments)] + pub async fn handle_erc721_transfer( + &mut self, + contract_address: Felt, + from_address: Felt, + to_address: Felt, + token_id: U256, + provider: &P, + block_timestamp: u64, + ) -> Result<()> { + // contract_address:id + let token_id = felt_and_u256_to_sql_string(&contract_address, &token_id); + let token_exists: bool = self.local_cache.contains_token_id(&token_id); + + if !token_exists { + self.register_erc721_token_metadata(contract_address, &token_id, provider).await?; + self.execute().await?; + } + + self.store_erc_transfer_event( + contract_address, + from_address, + to_address, + U256::from(1u8), + &token_id, + block_timestamp, + )?; + + // from_address/contract_address:id + if from_address != Felt::ZERO { + let from_balance_id = + format!("{}{FELT_DELIMITER}{}", felt_to_sql_string(&from_address), &token_id); + let from_balance = self + .local_cache + .erc_cache + .entry((ContractType::ERC721, from_balance_id)) + .or_default(); + *from_balance -= I256::from(1u8); + } + + if to_address != Felt::ZERO { + let to_balance_id = + format!("{}{FELT_DELIMITER}{}", felt_to_sql_string(&to_address), &token_id); + let to_balance = self + .local_cache + .erc_cache + .entry((ContractType::ERC721, to_balance_id)) + .or_default(); + *to_balance += I256::from(1u8); + } + + if self.local_cache.erc_cache.len() >= 100000 { + self.apply_cache_diff().await?; + } + + Ok(()) + } + + async fn register_erc20_token_metadata( + &mut self, + contract_address: Felt, + token_id: &str, + provider: &P, + ) -> Result<()> { + // Fetch token information from the chain + let name = provider + .call( + FunctionCall { + contract_address, + entry_point_selector: get_selector_from_name("name").unwrap(), + calldata: vec![], + }, + BlockId::Tag(BlockTag::Pending), + ) + .await?; + + // len = 1 => return value felt (i.e. legacy erc20 token) + // len > 1 => return value ByteArray (i.e. new erc20 token) + let name = if name.len() == 1 { + parse_cairo_short_string(&name[0]).unwrap() + } else { + ByteArray::cairo_deserialize(&name, 0) + .expect("Return value not ByteArray") + .to_string() + .expect("Return value not String") + }; + + let symbol = provider + .call( + FunctionCall { + contract_address, + entry_point_selector: get_selector_from_name("symbol").unwrap(), + calldata: vec![], + }, + BlockId::Tag(BlockTag::Pending), + ) + .await?; + + let symbol = if symbol.len() == 1 { + parse_cairo_short_string(&symbol[0]).unwrap() + } else { + ByteArray::cairo_deserialize(&symbol, 0) + .expect("Return value not ByteArray") + .to_string() + .expect("Return value not String") + }; + + let decimals = provider + .call( + FunctionCall { + contract_address, + entry_point_selector: get_selector_from_name("decimals").unwrap(), + calldata: vec![], + }, + BlockId::Tag(BlockTag::Pending), + ) + .await?; + let decimals = u8::cairo_deserialize(&decimals, 0).expect("Return value not u8"); + + // Insert the token into the tokens table + self.executor.send(QueryMessage::other( + "INSERT INTO tokens (id, contract_address, name, symbol, decimals) VALUES (?, ?, ?, \ + ?, ?)" + .to_string(), + vec![ + Argument::String(token_id.to_string()), + Argument::FieldElement(contract_address), + Argument::String(name), + Argument::String(symbol), + Argument::Int(decimals.into()), + ], + ))?; + + self.local_cache.register_token_id(token_id.to_string()); + + Ok(()) + } + + async fn register_erc721_token_metadata( + &mut self, + contract_address: Felt, + token_id: &str, + provider: &P, + ) -> Result<()> { + let res = sqlx::query_as::<_, (String, String, u8)>( + "SELECT name, symbol, decimals FROM tokens WHERE contract_address = ?", + ) + .bind(felt_to_sql_string(&contract_address)) + .fetch_one(&self.pool) + .await; + + // If we find a token already registered for this contract_address we dont need to refetch + // the data since its same for all ERC721 tokens + if let Ok((name, symbol, decimals)) = res { + debug!( + contract_address = %felt_to_sql_string(&contract_address), + "Token already registered for contract_address, so reusing fetched data", + ); + self.executor.send(QueryMessage::other( + "INSERT INTO tokens (id, contract_address, name, symbol, decimals) VALUES (?, ?, \ + ?, ?, ?)" + .to_string(), + vec![ + Argument::String(token_id.to_string()), + Argument::FieldElement(contract_address), + Argument::String(name), + Argument::String(symbol), + Argument::Int(decimals.into()), + ], + ))?; + self.local_cache.register_token_id(token_id.to_string()); + return Ok(()); + } + + // Fetch token information from the chain + let name = provider + .call( + FunctionCall { + contract_address, + entry_point_selector: get_selector_from_name("name").unwrap(), + calldata: vec![], + }, + BlockId::Tag(BlockTag::Pending), + ) + .await?; + + // len = 1 => return value felt (i.e. legacy erc721 token) + // len > 1 => return value ByteArray (i.e. new erc721 token) + let name = if name.len() == 1 { + parse_cairo_short_string(&name[0]).unwrap() + } else { + ByteArray::cairo_deserialize(&name, 0) + .expect("Return value not ByteArray") + .to_string() + .expect("Return value not String") + }; + + let symbol = provider + .call( + FunctionCall { + contract_address, + entry_point_selector: get_selector_from_name("symbol").unwrap(), + calldata: vec![], + }, + BlockId::Tag(BlockTag::Pending), + ) + .await?; + let symbol = if symbol.len() == 1 { + parse_cairo_short_string(&symbol[0]).unwrap() + } else { + ByteArray::cairo_deserialize(&symbol, 0) + .expect("Return value not ByteArray") + .to_string() + .expect("Return value not String") + }; + + let decimals = 0; + + // Insert the token into the tokens table + self.executor.send(QueryMessage::other( + "INSERT INTO tokens (id, contract_address, name, symbol, decimals) VALUES (?, ?, ?, \ + ?, ?)" + .to_string(), + vec![ + Argument::String(token_id.to_string()), + Argument::FieldElement(contract_address), + Argument::String(name), + Argument::String(symbol), + Argument::Int(decimals.into()), + ], + ))?; + + self.local_cache.register_token_id(token_id.to_string()); + + Ok(()) + } + + fn store_erc_transfer_event( + &mut self, + contract_address: Felt, + from: Felt, + to: Felt, + amount: U256, + token_id: &str, + block_timestamp: u64, + ) -> Result<()> { + let insert_query = "INSERT INTO erc_transfers (contract_address, from_address, \ + to_address, amount, token_id, executed_at) VALUES (?, ?, ?, ?, ?, ?)"; + + self.executor.send(QueryMessage::other( + insert_query.to_string(), + vec![ + Argument::FieldElement(contract_address), + Argument::FieldElement(from), + Argument::FieldElement(to), + Argument::String(u256_to_sql_string(&amount)), + Argument::String(token_id.to_string()), + Argument::String(utc_dt_string_from_timestamp(block_timestamp)), + ], + ))?; + + Ok(()) + } + + pub async fn apply_cache_diff(&mut self) -> Result<()> { + if !self.local_cache.erc_cache.is_empty() { + self.executor.send(QueryMessage::new( + "".to_string(), + vec![], + QueryType::ApplyBalanceDiff(ApplyBalanceDiffQuery { + erc_cache: mem::replace( + &mut self.local_cache.erc_cache, + HashMap::with_capacity(64), + ), + }), + ))?; + } + Ok(()) + } +} diff --git a/crates/torii/core/src/sql.rs b/crates/torii/core/src/sql/mod.rs similarity index 89% rename from crates/torii/core/src/sql.rs rename to crates/torii/core/src/sql/mod.rs index 257f906a05..ad00c34ca6 100644 --- a/crates/torii/core/src/sql.rs +++ b/crates/torii/core/src/sql/mod.rs @@ -1,8 +1,9 @@ +use std::collections::HashMap; use std::convert::TryInto; use std::str::FromStr; use std::sync::Arc; -use anyhow::{anyhow, Result}; +use anyhow::{anyhow, Context, Result}; use dojo_types::primitive::Primitive; use dojo_types::schema::{EnumOption, Member, Struct, Ty}; use dojo_world::contracts::abi::model::Layout; @@ -13,9 +14,13 @@ use sqlx::{Pool, Sqlite}; use starknet::core::types::{Event, Felt, InvokeTransaction, Transaction}; use starknet_crypto::poseidon_hash_many; use tokio::sync::mpsc::UnboundedSender; +use utils::felts_to_sql_string; -use crate::cache::{Model, ModelCache}; -use crate::executor::{Argument, DeleteEntityQuery, QueryMessage, QueryType, SetHeadQuery}; +use crate::executor::{ + Argument, DeleteEntityQuery, QueryMessage, QueryType, ResetCursorsQuery, SetHeadQuery, + UpdateCursorsQuery, +}; +use crate::types::ContractType; use crate::utils::utc_dt_string_from_timestamp; type IsEventMessage = bool; @@ -24,57 +29,76 @@ type IsStoreUpdate = bool; pub const WORLD_CONTRACT_TYPE: &str = "WORLD"; pub const FELT_DELIMITER: &str = "/"; +pub mod cache; +pub mod erc; +pub mod query_queue; #[cfg(test)] -#[path = "sql_test.rs"] +#[path = "test.rs"] mod test; +pub mod utils; + +use cache::{LocalCache, Model, ModelCache}; #[derive(Debug, Clone)] pub struct Sql { - world_address: Felt, pub pool: Pool, pub executor: UnboundedSender, model_cache: Arc, + // when SQL struct is cloned a empty local_cache is created + local_cache: LocalCache, +} + +#[derive(Debug, Clone)] +pub struct Cursors { + pub cursor_map: HashMap, + pub last_pending_block_tx: Option, + pub head: Option, } impl Sql { pub async fn new( pool: Pool, - world_address: Felt, executor: UnboundedSender, + contracts: &HashMap, ) -> Result { - executor.send(QueryMessage::other( - "INSERT OR IGNORE INTO contracts (id, contract_address, contract_type) VALUES (?, ?, \ - ?)" - .to_string(), - vec![ - Argument::FieldElement(world_address), - Argument::FieldElement(world_address), - Argument::String(WORLD_CONTRACT_TYPE.to_string()), - ], - ))?; + for contract in contracts { + executor.send(QueryMessage::other( + "INSERT OR IGNORE INTO contracts (id, contract_address, contract_type) VALUES (?, \ + ?, ?)" + .to_string(), + vec![ + Argument::FieldElement(*contract.0), + Argument::FieldElement(*contract.0), + Argument::String(contract.1.to_string()), + ], + ))?; + } + let local_cache = LocalCache::new(pool.clone()).await; let db = Self { pool: pool.clone(), - world_address, executor, - model_cache: Arc::new(ModelCache::new(pool)), + model_cache: Arc::new(ModelCache::new(pool.clone())), + local_cache, }; + db.execute().await?; Ok(db) } - pub async fn head(&self) -> Result<(u64, Option, Option)> { + pub async fn head(&self, contract: Felt) -> Result<(u64, Option, Option)> { let indexer_query = sqlx::query_as::<_, (Option, Option, Option, String)>( - "SELECT head, last_pending_block_world_tx, last_pending_block_tx, contract_type \ - FROM contracts WHERE id = ?", + "SELECT head, last_pending_block_contract_tx, last_pending_block_tx, \ + contract_type FROM contracts WHERE id = ?", ) - .bind(format!("{:#x}", self.world_address)); - - let indexer: (Option, Option, Option, String) = - indexer_query.fetch_one(&self.pool).await?; + .bind(format!("{:#x}", contract)); + let indexer: (Option, Option, Option, String) = indexer_query + .fetch_one(&self.pool) + .await + .with_context(|| format!("Failed to fetch head for contract: {:#x}", contract))?; Ok(( indexer .0 @@ -100,7 +124,7 @@ impl Sql { Argument::Int(last_block_timestamp.try_into().map_err(|_| { anyhow!("Last block timestamp value {} doesn't fit in i64", last_block_timestamp) })?); - let id = Argument::FieldElement(self.world_address); + let id = Argument::FieldElement(contract_address); self.executor.send(QueryMessage::new( "UPDATE contracts SET head = ?, last_block_timestamp = ? WHERE id = ?".to_string(), @@ -116,21 +140,22 @@ impl Sql { Ok(()) } - pub fn set_last_pending_block_world_tx( + pub fn set_last_pending_block_contract_tx( &mut self, - last_pending_block_world_tx: Option, + contract: Felt, + last_pending_block_contract_tx: Option, ) -> Result<()> { - let last_pending_block_world_tx = if let Some(f) = last_pending_block_world_tx { + let last_pending_block_contract_tx = if let Some(f) = last_pending_block_contract_tx { Argument::String(format!("{:#x}", f)) } else { Argument::Null }; - let id = Argument::FieldElement(self.world_address); + let id = Argument::FieldElement(contract); self.executor.send(QueryMessage::other( - "UPDATE contracts SET last_pending_block_world_tx = ? WHERE id = ?".to_string(), - vec![last_pending_block_world_tx, id], + "UPDATE contracts SET last_pending_block_contract_tx = ? WHERE id = ?".to_string(), + vec![last_pending_block_contract_tx, id], ))?; Ok(()) @@ -142,16 +167,89 @@ impl Sql { } else { Argument::Null }; - let id = Argument::FieldElement(self.world_address); self.executor.send(QueryMessage::other( - "UPDATE contracts SET last_pending_block_tx = ? WHERE id = ?".to_string(), - vec![last_pending_block_tx, id], + "UPDATE contracts SET last_pending_block_tx = ? WHERE 1=1".to_string(), + vec![last_pending_block_tx], + ))?; + + Ok(()) + } + + pub(crate) async fn cursors(&self) -> Result { + let mut conn: PoolConnection = self.pool.acquire().await?; + let cursors = sqlx::query_as::<_, (String, String)>( + "SELECT contract_address, last_pending_block_contract_tx FROM contracts WHERE \ + last_pending_block_contract_tx IS NOT NULL", + ) + .fetch_all(&mut *conn) + .await?; + + let (head, last_pending_block_tx) = sqlx::query_as::<_, (Option, Option)>( + "SELECT head, last_pending_block_tx FROM contracts WHERE 1=1", + ) + .fetch_one(&mut *conn) + .await?; + + let head = head.map(|h| h.try_into().expect("doesn't fit in u64")); + let last_pending_block_tx = + last_pending_block_tx.map(|t| Felt::from_str(&t).expect("its a valid felt")); + Ok(Cursors { + cursor_map: cursors + .into_iter() + .map(|(c, t)| { + ( + Felt::from_str(&c).expect("its a valid felt"), + Felt::from_str(&t).expect("its a valid felt"), + ) + }) + .collect(), + last_pending_block_tx, + head, + }) + } + + // For a given contract address, sets head to the passed value and sets + // last_pending_block_contract_tx and last_pending_block_tx to null + pub fn reset_cursors( + &mut self, + head: u64, + cursor_map: HashMap, + last_block_timestamp: u64, + ) -> Result<()> { + self.executor.send(QueryMessage::new( + "".to_string(), + vec![], + QueryType::ResetCursors(ResetCursorsQuery { + cursor_map, + last_block_timestamp, + last_block_number: head, + }), ))?; Ok(()) } + pub fn update_cursors( + &mut self, + head: u64, + last_pending_block_tx: Option, + cursor_map: HashMap, + pending_block_timestamp: u64, + ) -> Result<()> { + self.executor.send(QueryMessage::new( + "".to_string(), + vec![], + QueryType::UpdateCursors(UpdateCursorsQuery { + cursor_map, + last_pending_block_tx, + last_block_number: head, + pending_block_timestamp, + }), + ))?; + Ok(()) + } + #[allow(clippy::too_many_arguments)] pub async fn register_model( &mut self, @@ -311,7 +409,7 @@ impl Sql { let entity_id = format!("{:#x}", poseidon_hash_many(&keys)); let model_id = format!("{:#x}", compute_selector_from_names(model_namespace, model_name)); - let keys_str = felts_sql_string(&keys); + let keys_str = felts_to_sql_string(&keys); let insert_entities = "INSERT INTO event_messages (id, keys, event_id, executed_at) \ VALUES (?, ?, ?, ?) ON CONFLICT(id) DO UPDATE SET \ updated_at=CURRENT_TIMESTAMP, executed_at=EXCLUDED.executed_at, \ @@ -459,15 +557,15 @@ impl Sql { Transaction::Invoke(InvokeTransaction::V1(invoke_v1_transaction)) => ( Argument::FieldElement(invoke_v1_transaction.transaction_hash), Argument::FieldElement(invoke_v1_transaction.sender_address), - Argument::String(felts_sql_string(&invoke_v1_transaction.calldata)), + Argument::String(felts_to_sql_string(&invoke_v1_transaction.calldata)), Argument::FieldElement(invoke_v1_transaction.max_fee), - Argument::String(felts_sql_string(&invoke_v1_transaction.signature)), + Argument::String(felts_to_sql_string(&invoke_v1_transaction.signature)), Argument::FieldElement(invoke_v1_transaction.nonce), ), Transaction::L1Handler(l1_handler_transaction) => ( Argument::FieldElement(l1_handler_transaction.transaction_hash), Argument::FieldElement(l1_handler_transaction.contract_address), - Argument::String(felts_sql_string(&l1_handler_transaction.calldata)), + Argument::String(felts_to_sql_string(&l1_handler_transaction.calldata)), Argument::FieldElement(Felt::ZERO), // has no max_fee Argument::String("".to_string()), // has no signature Argument::FieldElement((l1_handler_transaction.nonce).into()), @@ -504,8 +602,8 @@ impl Sql { block_timestamp: u64, ) -> Result<()> { let id = Argument::String(event_id.to_string()); - let keys = Argument::String(felts_sql_string(&event.keys)); - let data = Argument::String(felts_sql_string(&event.data)); + let keys = Argument::String(felts_to_sql_string(&event.keys)); + let data = Argument::String(felts_to_sql_string(&event.data)); let hash = Argument::FieldElement(transaction_hash); let executed_at = Argument::String(utc_dt_string_from_timestamp(block_timestamp)); @@ -1160,8 +1258,3 @@ impl Sql { recv.await? } } - -pub fn felts_sql_string(felts: &[Felt]) -> String { - felts.iter().map(|k| format!("{:#x}", k)).collect::>().join(FELT_DELIMITER) - + FELT_DELIMITER -} diff --git a/crates/torii/core/src/sql/query_queue.rs b/crates/torii/core/src/sql/query_queue.rs new file mode 100644 index 0000000000..774f8fb6dd --- /dev/null +++ b/crates/torii/core/src/sql/query_queue.rs @@ -0,0 +1,188 @@ +use std::collections::VecDeque; + +use anyhow::{Context, Result}; +use dojo_types::schema::{Struct, Ty}; +use sqlx::{FromRow, Pool, Sqlite}; +use starknet::core::types::Felt; + +use super::utils::felt_to_sql_string; +use crate::simple_broker::SimpleBroker; +use crate::types::{ + Entity as EntityUpdated, Event as EventEmitted, EventMessage as EventMessageUpdated, + Model as ModelRegistered, +}; + +#[derive(Debug, Clone)] +pub enum Argument { + Null, + Int(i64), + Bool(bool), + String(String), + FieldElement(Felt), +} + +#[derive(Debug, Clone)] +pub enum BrokerMessage { + ModelRegistered(ModelRegistered), + EntityUpdated(EntityUpdated), + EventMessageUpdated(EventMessageUpdated), + EventEmitted(EventEmitted), +} + +#[derive(Debug, Clone)] +pub struct QueryQueue { + pool: Pool, + pub queue: VecDeque<(String, Vec, QueryType)>, +} + +#[derive(Debug, Clone)] +pub struct DeleteEntityQuery { + pub entity_id: String, + pub event_id: String, + pub block_timestamp: String, + pub ty: Ty, +} + +#[derive(Debug, Clone)] +pub enum QueryType { + SetEntity(Ty), + DeleteEntity(DeleteEntityQuery), + EventMessage(Ty), + RegisterModel, + StoreEvent, + Other, +} + +impl QueryQueue { + pub fn new(pool: Pool) -> Self { + QueryQueue { pool, queue: VecDeque::new() } + } + + pub fn enqueue>( + &mut self, + statement: S, + arguments: Vec, + query_type: QueryType, + ) { + self.queue.push_back((statement.into(), arguments, query_type)); + } + + pub async fn execute_all(&mut self) -> Result<()> { + let mut tx = self.pool.begin().await?; + // publishes that are related to queries in the queue, they should be sent + // after the queries are executed + let mut publish_queue = VecDeque::new(); + + while let Some((statement, arguments, query_type)) = self.queue.pop_front() { + let mut query = sqlx::query(&statement); + + for arg in &arguments { + query = match arg { + Argument::Null => query.bind(None::), + Argument::Int(integer) => query.bind(integer), + Argument::Bool(bool) => query.bind(bool), + Argument::String(string) => query.bind(string), + Argument::FieldElement(felt) => query.bind(felt_to_sql_string(felt)), + } + } + + match query_type { + QueryType::SetEntity(entity) => { + let row = query.fetch_one(&mut *tx).await.with_context(|| { + format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) + })?; + let mut entity_updated = EntityUpdated::from_row(&row)?; + entity_updated.updated_model = Some(entity); + entity_updated.deleted = false; + let broker_message = BrokerMessage::EntityUpdated(entity_updated); + publish_queue.push_back(broker_message); + } + QueryType::DeleteEntity(entity) => { + let delete_model = query.execute(&mut *tx).await.with_context(|| { + format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) + })?; + if delete_model.rows_affected() == 0 { + continue; + } + + let row = sqlx::query( + "UPDATE entities SET updated_at=CURRENT_TIMESTAMP, executed_at=?, \ + event_id=? WHERE id = ? RETURNING *", + ) + .bind(entity.block_timestamp) + .bind(entity.event_id) + .bind(entity.entity_id) + .fetch_one(&mut *tx) + .await?; + let mut entity_updated = EntityUpdated::from_row(&row)?; + entity_updated.updated_model = + Some(Ty::Struct(Struct { name: entity.ty.name(), children: vec![] })); + + let count = sqlx::query_scalar::<_, i64>( + "SELECT count(*) FROM entity_model WHERE entity_id = ?", + ) + .bind(entity_updated.id.clone()) + .fetch_one(&mut *tx) + .await?; + + // Delete entity if all of its models are deleted + if count == 0 { + sqlx::query("DELETE FROM entities WHERE id = ?") + .bind(entity_updated.id.clone()) + .execute(&mut *tx) + .await?; + entity_updated.deleted = true; + } + + let broker_message = BrokerMessage::EntityUpdated(entity_updated); + publish_queue.push_back(broker_message); + } + QueryType::RegisterModel => { + let row = query.fetch_one(&mut *tx).await.with_context(|| { + format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) + })?; + let model_registered = ModelRegistered::from_row(&row)?; + publish_queue.push_back(BrokerMessage::ModelRegistered(model_registered)); + } + QueryType::EventMessage(entity) => { + let row = query.fetch_one(&mut *tx).await.with_context(|| { + format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) + })?; + let mut event_message = EventMessageUpdated::from_row(&row)?; + event_message.updated_model = Some(entity); + let broker_message = BrokerMessage::EventMessageUpdated(event_message); + publish_queue.push_back(broker_message); + } + QueryType::StoreEvent => { + let row = query.fetch_one(&mut *tx).await.with_context(|| { + format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) + })?; + let event = EventEmitted::from_row(&row)?; + publish_queue.push_back(BrokerMessage::EventEmitted(event)); + } + QueryType::Other => { + query.execute(&mut *tx).await.with_context(|| { + format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) + })?; + } + } + } + + tx.commit().await?; + + while let Some(message) = publish_queue.pop_front() { + send_broker_message(message); + } + + Ok(()) + } +} + +fn send_broker_message(message: BrokerMessage) { + match message { + BrokerMessage::ModelRegistered(model) => SimpleBroker::publish(model), + BrokerMessage::EntityUpdated(entity) => SimpleBroker::publish(entity), + BrokerMessage::EventMessageUpdated(event) => SimpleBroker::publish(event), + BrokerMessage::EventEmitted(event) => SimpleBroker::publish(event), + } +} diff --git a/crates/torii/core/src/sql_test.rs b/crates/torii/core/src/sql/test.rs similarity index 92% rename from crates/torii/core/src/sql_test.rs rename to crates/torii/core/src/sql/test.rs index 499fd0adf8..bd6fe9208a 100644 --- a/crates/torii/core/src/sql_test.rs +++ b/crates/torii/core/src/sql/test.rs @@ -1,3 +1,4 @@ +use std::collections::HashMap; use std::str::FromStr; use std::sync::Arc; @@ -22,13 +23,8 @@ use tokio::sync::broadcast; use crate::engine::{Engine, EngineConfig, Processors}; use crate::executor::Executor; -use crate::processors::generate_event_processors_map; -use crate::processors::register_model::RegisterModelProcessor; -use crate::processors::store_del_record::StoreDelRecordProcessor; -use crate::processors::store_set_record::StoreSetRecordProcessor; -use crate::processors::store_update_member::StoreUpdateMemberProcessor; -use crate::processors::store_update_record::StoreUpdateRecordProcessor; use crate::sql::Sql; +use crate::types::ContractType; pub async fn bootstrap_engine

( world: WorldContractReader

, @@ -40,26 +36,19 @@ where { let (shutdown_tx, _) = broadcast::channel(1); let to = provider.block_hash_and_number().await?.block_number; + let world_address = world.address; let mut engine = Engine::new( world, db.clone(), provider, - Processors { - event: generate_event_processors_map(vec![ - Arc::new(RegisterModelProcessor), - Arc::new(StoreSetRecordProcessor), - Arc::new(StoreUpdateRecordProcessor), - Arc::new(StoreUpdateMemberProcessor), - Arc::new(StoreDelRecordProcessor), - ])?, - ..Processors::default() - }, + Processors { ..Processors::default() }, EngineConfig::default(), shutdown_tx, None, + Arc::new(HashMap::from([(world_address, ContractType::WORLD)])), ); - let data = engine.fetch_range(0, to, None).await.unwrap(); + let data = engine.fetch_range(0, to, &HashMap::new()).await.unwrap(); engine.process_range(data).await.unwrap(); db.execute().await.unwrap(); @@ -147,7 +136,13 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { executor.run().await.unwrap(); }); - let db = Sql::new(pool.clone(), world_reader.address, sender.clone()).await.unwrap(); + let db = Sql::new( + pool.clone(), + sender.clone(), + &HashMap::from([(world_reader.address, ContractType::WORLD)]), + ) + .await + .unwrap(); let _ = bootstrap_engine(world_reader, db.clone(), provider).await.unwrap(); @@ -310,7 +305,13 @@ async fn test_load_from_remote_del(sequencer: &RunnerCtx) { executor.run().await.unwrap(); }); - let db = Sql::new(pool.clone(), world_reader.address, sender.clone()).await.unwrap(); + let db = Sql::new( + pool.clone(), + sender.clone(), + &HashMap::from([(world_reader.address, ContractType::WORLD)]), + ) + .await + .unwrap(); let _ = bootstrap_engine(world_reader, db.clone(), provider).await; @@ -402,7 +403,13 @@ async fn test_update_with_set_record(sequencer: &RunnerCtx) { executor.run().await.unwrap(); }); - let db = Sql::new(pool.clone(), world_reader.address, sender.clone()).await.unwrap(); + let db = Sql::new( + pool.clone(), + sender.clone(), + &HashMap::from([(world_reader.address, ContractType::WORLD)]), + ) + .await + .unwrap(); let _ = bootstrap_engine(world_reader, db.clone(), Arc::clone(&provider)).await.unwrap(); } diff --git a/crates/torii/core/src/sql/utils.rs b/crates/torii/core/src/sql/utils.rs new file mode 100644 index 0000000000..66b345dd1a --- /dev/null +++ b/crates/torii/core/src/sql/utils.rs @@ -0,0 +1,333 @@ +use std::cmp::Ordering; +use std::ops::{Add, AddAssign, Sub, SubAssign}; + +use starknet::core::types::U256; +use starknet_crypto::Felt; + +use super::FELT_DELIMITER; + +pub fn felts_to_sql_string(felts: &[Felt]) -> String { + felts.iter().map(|k| format!("{:#x}", k)).collect::>().join(FELT_DELIMITER) + + FELT_DELIMITER +} + +pub fn felt_to_sql_string(felt: &Felt) -> String { + format!("{:#x}", felt) +} + +pub fn felt_and_u256_to_sql_string(felt: &Felt, u256: &U256) -> String { + format!("{}:{}", felt_to_sql_string(felt), u256_to_sql_string(u256)) +} + +pub fn u256_to_sql_string(u256: &U256) -> String { + format!("{:#064x}", u256) +} + +pub fn sql_string_to_u256(sql_string: &str) -> U256 { + let sql_string = sql_string.strip_prefix("0x").unwrap_or(sql_string); + U256::from(crypto_bigint::U256::from_be_hex(sql_string)) +} + +// type used to do calculation on inmemory balances +#[derive(Debug, Clone, Copy)] +pub struct I256 { + pub value: U256, + pub is_negative: bool, +} + +impl Default for I256 { + fn default() -> Self { + Self { value: U256::from(0u8), is_negative: false } + } +} + +impl From for I256 { + fn from(value: U256) -> Self { + Self { value, is_negative: false } + } +} + +impl From for I256 { + fn from(value: u8) -> Self { + Self { value: U256::from(value), is_negative: false } + } +} + +impl Add for I256 { + type Output = I256; + + fn add(self, other: I256) -> I256 { + // Special case: if both are negative zero, return positive zero + if self.value == U256::from(0u8) + && other.value == U256::from(0u8) + && self.is_negative + && other.is_negative + { + return I256 { value: U256::from(0u8), is_negative: false }; + } + + if self.is_negative == other.is_negative { + // Same sign: add the values and keep the sign + I256 { value: self.value + other.value, is_negative: self.is_negative } + } else { + // Different signs: subtract the smaller value from the larger one + match self.value.cmp(&other.value) { + Ordering::Greater => { + I256 { value: self.value - other.value, is_negative: self.is_negative } + } + Ordering::Less => { + I256 { value: other.value - self.value, is_negative: other.is_negative } + } + // If both values are equal, the result is zero and not negative + Ordering::Equal => I256 { value: U256::from(0u8), is_negative: false }, + } + } + } +} + +impl Sub for I256 { + type Output = I256; + + fn sub(self, other: I256) -> I256 { + let new_sign = if other.value == U256::from(0u8) { false } else { !other.is_negative }; + let negated_other = I256 { value: other.value, is_negative: new_sign }; + self.add(negated_other) + } +} + +impl AddAssign for I256 { + fn add_assign(&mut self, other: I256) { + *self = *self + other; + } +} + +impl SubAssign for I256 { + fn sub_assign(&mut self, other: I256) { + *self = *self - other; + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_add_zero_false_and_zero_false() { + // 0,false + 0,false == 0,false + let a = I256::default(); + let b = I256::default(); + let result = a + b; + assert_eq!(result.value, U256::from(0u8)); + assert!(!result.is_negative); + } + + #[test] + fn test_add_zero_true_and_zero_false() { + // 0,true + 0,false == 0,false + let a = I256 { value: U256::from(0u8), is_negative: true }; + let b = I256::default(); + let result = a + b; + assert_eq!(result.value, U256::from(0u8)); + assert!(!result.is_negative); + } + + #[test] + fn test_sub_zero_false_and_zero_false() { + // 0,false - 0,false == 0,false + let a = I256::default(); + let b = I256::default(); + let result = a - b; + assert_eq!(result.value, U256::from(0u8)); + assert!(!result.is_negative); + } + + #[test] + fn test_sub_zero_true_and_zero_false() { + // 0,true - 0,false == 0,false + let a = I256 { value: U256::from(0u8), is_negative: true }; + let b = I256::default(); + let result = a - b; + assert_eq!(result.value, U256::from(0u8)); + assert!(!result.is_negative); + } + + #[test] + fn test_add_positive_and_negative_equal_values() { + // 5,false + 5,true == 0,false + let a = I256::from(U256::from(5u8)); + let b = I256 { value: U256::from(5u8), is_negative: true }; + let result = a + b; + assert_eq!(result.value, U256::from(0u8)); + assert!(!result.is_negative); + } + + #[test] + fn test_sub_positive_and_negative() { + // 10,false - 5,true == 15,false + let a = I256::from(U256::from(10u8)); + let b = I256 { value: U256::from(5u8), is_negative: true }; + let result = a - b; + assert_eq!(result.value, U256::from(15u8)); + assert!(!result.is_negative); + } + + #[test] + fn test_sub_larger_from_smaller() { + // 5,false - 10,true == 15,true + let a = I256::from(U256::from(5u8)); + let b = I256 { value: U256::from(10u8), is_negative: true }; + let result = a - b; + assert_eq!(result.value, U256::from(15u8)); + assert!(!result.is_negative); + } + + #[test] + fn test_add_mixed_signs() { + // 15,false + 10,true == 5,false + let a = I256::from(U256::from(15u8)); + let b = I256 { value: U256::from(10u8), is_negative: true }; + let result = a + b; + assert_eq!(result.value, U256::from(5u8)); + assert!(!result.is_negative); + } + + #[test] + fn test_sub_mixed_signs() { + // 5,false - 10,true == 15,false + let a = I256::from(U256::from(5u8)); + let b = I256 { value: U256::from(10u8), is_negative: true }; + let result = a - b; + assert_eq!(result.value, U256::from(15u8)); + assert!(!result.is_negative); + } + + #[test] + fn test_add_negative_and_negative() { + // -5,true + -3,true == -8,true + let a = I256 { value: U256::from(5u8), is_negative: true }; + let b = I256 { value: U256::from(3u8), is_negative: true }; + let result = a + b; + assert_eq!(result.value, U256::from(8u8)); + assert!(result.is_negative); + } + + #[test] + fn test_sub_negative_and_negative() { + // -5,true - -3,true == -2,true + let a = I256 { value: U256::from(5u8), is_negative: true }; + let b = I256 { value: U256::from(3u8), is_negative: true }; + let result = a - b; + assert_eq!(result.value, U256::from(2u8)); + assert!(result.is_negative); + } + + #[test] + fn test_subtraction_resulting_zero() { + // 5,false - 5,false == 0,false + let a = I256::from(U256::from(5u8)); + let b = I256::from(U256::from(5u8)); + let result = a - b; + assert_eq!(result.value, U256::from(0u8)); + assert!(!result.is_negative); + } + + #[test] + fn test_subtraction_resulting_zero_negative() { + // 5,true - 5,true == 0,false + let a = I256 { value: U256::from(5u8), is_negative: true }; + let b = I256 { value: U256::from(5u8), is_negative: true }; + let result = a - b; + assert_eq!(result.value, U256::from(0u8)); + assert!(!result.is_negative); + } + + #[test] + fn test_add_negative_and_positive_result_positive() { + // -10,true + 15,false == 5,false + let a = I256 { value: U256::from(10u8), is_negative: true }; + let b = I256::from(U256::from(15u8)); + let result = a + b; + assert_eq!(result.value, U256::from(5u8)); + assert!(!result.is_negative); + } + + #[test] + fn test_add_negative_and_positive_result_negative() { + // -15,true + 5,false == -10,true + let a = I256 { value: U256::from(15u8), is_negative: true }; + let b = I256::from(U256::from(5u8)); + let result = a + b; + assert_eq!(result.value, U256::from(10u8)); + assert!(result.is_negative); + } + + #[test] + fn test_add_zero_true_and_fifteen_true() { + // 0,true + 15,true == 15,true + let a = I256 { value: U256::from(0u8), is_negative: true }; + let b = I256 { value: U256::from(15u8), is_negative: true }; + let result = a + b; + assert_eq!(result.value, U256::from(15u8)); + assert!(result.is_negative); + } + + #[test] + fn test_sub_zero_true_and_fifteen_true() { + // 0,true - 15,true == 15,false + let a = I256 { value: U256::from(0u8), is_negative: true }; + let b = I256 { value: U256::from(15u8), is_negative: true }; + let result = a - b; + assert_eq!(result.value, U256::from(15u8)); + assert!(!result.is_negative); + } + + #[test] + fn test_add_fifteen_true_and_zero_true() { + // 15,true + 0,true == 15,true + let a = I256 { value: U256::from(15u8), is_negative: true }; + let b = I256 { value: U256::from(0u8), is_negative: true }; + let result = a + b; + assert_eq!(result.value, U256::from(15u8)); + assert!(result.is_negative); + } + + #[test] + fn test_sub_fifteen_true_and_zero_true() { + // 15,true - 0,true == 15,true + let a = I256 { value: U256::from(15u8), is_negative: true }; + let b = I256 { value: U256::from(0u8), is_negative: true }; + let result = a - b; + assert_eq!(result.value, U256::from(15u8)); + assert!(result.is_negative); + } + + #[test] + fn test_negative_zero() { + // 0,true + 0,true == 0,false + let a = I256 { value: U256::from(0u8), is_negative: true }; + let b = I256 { value: U256::from(0u8), is_negative: true }; + let result = a + b; + assert_eq!(result.value, U256::from(0u8)); + assert!(!result.is_negative); + } + + #[test] + fn test_sub_positive_and_negative_zero() { + // 15,false - 0,true == 15,false + let a = I256::from(U256::from(15u8)); + let b = I256 { value: U256::from(0u8), is_negative: true }; + let result = a - b; + assert_eq!(result.value, U256::from(15u8)); + assert!(!result.is_negative); + } + + #[test] + fn test_add_positive_and_negative_zero() { + // 15,false + 0,true == 15,false + let a = I256::from(U256::from(15u8)); + let b = I256 { value: U256::from(0u8), is_negative: true }; + let result = a + b; + assert_eq!(result.value, U256::from(15u8)); + assert!(!result.is_negative); + } +} diff --git a/crates/torii/core/src/types.rs b/crates/torii/core/src/types.rs index f24607a91a..254a095517 100644 --- a/crates/torii/core/src/types.rs +++ b/crates/torii/core/src/types.rs @@ -1,4 +1,7 @@ use core::fmt; +use std::collections::VecDeque; +use std::path::PathBuf; +use std::str::FromStr; use chrono::{DateTime, Utc}; use dojo_types::schema::Ty; @@ -117,11 +120,63 @@ pub struct Event { pub created_at: DateTime, } +#[derive(Default, Deserialize, Debug, Clone)] +pub struct ToriiConfig { + /// contract addresses to index + pub contracts: VecDeque, +} + +impl ToriiConfig { + pub fn load_from_path(path: &PathBuf) -> Result { + let config = std::fs::read_to_string(path)?; + let config: Self = toml::from_str(&config)?; + Ok(config) + } +} + +#[derive(Deserialize, Debug, Clone, Copy)] +pub struct Contract { + pub address: Felt, + pub r#type: ContractType, +} + +#[derive(Deserialize, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum ContractType { + WORLD, + ERC20, + ERC721, +} + +impl FromStr for ContractType { + type Err = anyhow::Error; + + fn from_str(input: &str) -> Result { + match input.to_lowercase().as_str() { + "world" => Ok(ContractType::WORLD), + "erc20" => Ok(ContractType::ERC20), + "erc721" => Ok(ContractType::ERC721), + _ => Err(anyhow::anyhow!("Invalid ERC type: {}", input)), + } + } +} + +impl std::fmt::Display for ContractType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ContractType::WORLD => write!(f, "WORLD"), + ContractType::ERC20 => write!(f, "ERC20"), + ContractType::ERC721 => write!(f, "ERC721"), + } + } +} + #[derive(FromRow, Deserialize, Debug, Clone, Default)] #[serde(rename_all = "camelCase")] -pub struct Contract { +pub struct ContractCursor { pub head: i64, pub tps: i64, pub last_block_timestamp: i64, pub contract_address: String, + pub last_pending_block_tx: Option, + pub last_pending_block_contract_tx: Option, } diff --git a/crates/torii/graphql/Cargo.toml b/crates/torii/graphql/Cargo.toml index 651a076a80..b4c2d3ced6 100644 --- a/crates/torii/graphql/Cargo.toml +++ b/crates/torii/graphql/Cargo.toml @@ -25,12 +25,13 @@ scarb-ui.workspace = true serde.workspace = true serde_json.workspace = true sqlx.workspace = true +starknet-crypto.workspace = true strum.workspace = true strum_macros.workspace = true thiserror.workspace = true -tokio.workspace = true tokio-stream = "0.1.11" tokio-util = "0.7.7" +tokio.workspace = true toml.workspace = true torii-core.workspace = true tracing.workspace = true @@ -46,6 +47,6 @@ katana-runner.workspace = true scarb.workspace = true serial_test = "2.0.0" sozo-ops.workspace = true +starknet-crypto.workspace = true starknet.workspace = true tempfile.workspace = true -starknet-crypto.workspace = true diff --git a/crates/torii/graphql/src/constants.rs b/crates/torii/graphql/src/constants.rs index e09c8de6d2..2d851f07b1 100644 --- a/crates/torii/graphql/src/constants.rs +++ b/crates/torii/graphql/src/constants.rs @@ -33,6 +33,9 @@ pub const QUERY_TYPE_NAME: &str = "World__Query"; pub const SUBSCRIPTION_TYPE_NAME: &str = "World__Subscription"; pub const MODEL_ORDER_TYPE_NAME: &str = "World__ModelOrder"; pub const MODEL_ORDER_FIELD_TYPE_NAME: &str = "World__ModelOrderField"; +pub const ERC_BALANCE_TYPE_NAME: &str = "ERC__Balance"; +pub const ERC_TRANSFER_TYPE_NAME: &str = "ERC__Transfer"; +pub const ERC_TOKEN_TYPE_NAME: &str = "ERC__Token"; // objects' single and plural names pub const ENTITY_NAMES: (&str, &str) = ("entity", "entities"); @@ -45,6 +48,10 @@ pub const METADATA_NAMES: (&str, &str) = ("metadata", "metadatas"); pub const TRANSACTION_NAMES: (&str, &str) = ("transaction", "transactions"); pub const PAGE_INFO_NAMES: (&str, &str) = ("pageInfo", ""); +pub const ERC_BALANCE_NAME: (&str, &str) = ("ercBalance", ""); +pub const ERC_TOKEN_NAME: (&str, &str) = ("ercToken", ""); +pub const ERC_TRANSFER_NAME: (&str, &str) = ("ercTransfer", ""); + // misc pub const ORDER_DIR_TYPE_NAME: &str = "OrderDirection"; pub const ORDER_ASC: &str = "ASC"; diff --git a/crates/torii/graphql/src/error.rs b/crates/torii/graphql/src/error.rs index d00969f98b..83834c9f35 100644 --- a/crates/torii/graphql/src/error.rs +++ b/crates/torii/graphql/src/error.rs @@ -9,6 +9,8 @@ pub enum ExtractError { NotList(String), #[error("Not a string: {0}")] NotString(String), + #[error("Not a felt: {0}")] + NotFelt(String), #[error("Not a number: {0}")] NotNumber(String), } diff --git a/crates/torii/graphql/src/mapping.rs b/crates/torii/graphql/src/mapping.rs index 1086373bca..47f7d8e1b1 100644 --- a/crates/torii/graphql/src/mapping.rs +++ b/crates/torii/graphql/src/mapping.rs @@ -4,7 +4,7 @@ use async_graphql::Name; use dojo_types::primitive::Primitive; use lazy_static::lazy_static; -use crate::constants::{CONTENT_TYPE_NAME, SOCIAL_TYPE_NAME}; +use crate::constants::{CONTENT_TYPE_NAME, ERC_TOKEN_TYPE_NAME, SOCIAL_TYPE_NAME}; use crate::types::{GraphqlType, TypeData, TypeMapping}; lazy_static! { @@ -144,4 +144,27 @@ lazy_static! { TypeData::Simple(TypeRef::named(GraphqlType::DateTime.to_string())) ), ]); + + pub static ref ERC_BALANCE_TYPE_MAPPING: TypeMapping = IndexMap::from([ + (Name::new("balance"), TypeData::Simple(TypeRef::named(TypeRef::STRING))), + (Name::new("type"), TypeData::Simple(TypeRef::named(TypeRef::STRING))), + (Name::new("token_metadata"), TypeData::Simple(TypeRef::named(ERC_TOKEN_TYPE_NAME))), + ]); + + pub static ref ERC_TRANSFER_TYPE_MAPPING: TypeMapping = IndexMap::from([ + (Name::new("from"), TypeData::Simple(TypeRef::named(TypeRef::STRING))), + (Name::new("to"), TypeData::Simple(TypeRef::named(TypeRef::STRING))), + (Name::new("amount"), TypeData::Simple(TypeRef::named(TypeRef::STRING))), + (Name::new("type"), TypeData::Simple(TypeRef::named(TypeRef::STRING))), + (Name::new("executed_at"), TypeData::Simple(TypeRef::named(TypeRef::STRING))), + (Name::new("token_metadata"), TypeData::Simple(TypeRef::named(ERC_TOKEN_TYPE_NAME))), + ]); + + pub static ref ERC_TOKEN_TYPE_MAPPING: TypeMapping = IndexMap::from([ + (Name::new("name"), TypeData::Simple(TypeRef::named(TypeRef::STRING))), + (Name::new("symbol"), TypeData::Simple(TypeRef::named(TypeRef::STRING))), + (Name::new("token_id"), TypeData::Simple(TypeRef::named(TypeRef::STRING))), + (Name::new("decimals"), TypeData::Simple(TypeRef::named(TypeRef::STRING))), + (Name::new("contract_address"), TypeData::Simple(TypeRef::named(TypeRef::STRING))), + ]); } diff --git a/crates/torii/graphql/src/object/erc/erc_balance.rs b/crates/torii/graphql/src/object/erc/erc_balance.rs new file mode 100644 index 0000000000..2e87a281dd --- /dev/null +++ b/crates/torii/graphql/src/object/erc/erc_balance.rs @@ -0,0 +1,143 @@ +use async_graphql::dynamic::{Field, FieldFuture, InputValue, TypeRef}; +use async_graphql::{Name, Value}; +use convert_case::{Case, Casing}; +use serde::Deserialize; +use sqlx::{FromRow, Pool, Sqlite, SqliteConnection}; +use starknet_crypto::Felt; +use torii_core::sql::utils::felt_to_sql_string; +use tracing::warn; + +use crate::constants::{ERC_BALANCE_NAME, ERC_BALANCE_TYPE_NAME}; +use crate::mapping::ERC_BALANCE_TYPE_MAPPING; +use crate::object::{BasicObject, ResolvableObject}; +use crate::types::{TypeMapping, ValueMapping}; +use crate::utils::extract; + +#[derive(Debug)] +pub struct ErcBalanceObject; + +impl BasicObject for ErcBalanceObject { + fn name(&self) -> (&str, &str) { + ERC_BALANCE_NAME + } + + fn type_name(&self) -> &str { + ERC_BALANCE_TYPE_NAME + } + + fn type_mapping(&self) -> &TypeMapping { + &ERC_BALANCE_TYPE_MAPPING + } +} + +impl ResolvableObject for ErcBalanceObject { + fn resolvers(&self) -> Vec { + let account_address = "account_address"; + let argument = InputValue::new( + account_address.to_case(Case::Camel), + TypeRef::named_nn(TypeRef::STRING), + ); + + let field = Field::new(self.name().0, TypeRef::named_list(self.type_name()), move |ctx| { + FieldFuture::new(async move { + let mut conn = ctx.data::>()?.acquire().await?; + let address = extract::( + ctx.args.as_index_map(), + &account_address.to_case(Case::Camel), + )?; + + let erc_balances = fetch_erc_balances(&mut conn, address).await?; + + Ok(Some(Value::List(erc_balances))) + }) + }) + .argument(argument); + vec![field] + } +} + +async fn fetch_erc_balances( + conn: &mut SqliteConnection, + address: Felt, +) -> sqlx::Result> { + let query = "SELECT t.contract_address, t.name, t.symbol, t.decimals, b.balance, b.token_id, \ + c.contract_type + FROM balances b + JOIN tokens t ON b.token_id = t.id + JOIN contracts c ON t.contract_address = c.contract_address + WHERE b.account_address = ?"; + + let rows = sqlx::query(query).bind(felt_to_sql_string(&address)).fetch_all(conn).await?; + + let mut erc_balances = Vec::new(); + + for row in rows { + let row = BalanceQueryResultRaw::from_row(&row)?; + + let balance_value = match row.contract_type.to_lowercase().as_str() { + "erc20" => { + let token_metadata = Value::Object(ValueMapping::from([ + (Name::new("name"), Value::String(row.name)), + (Name::new("symbol"), Value::String(row.symbol)), + // for erc20 there is no token_id + (Name::new("token_id"), Value::Null), + (Name::new("decimals"), Value::String(row.decimals.to_string())), + (Name::new("contract_address"), Value::String(row.contract_address.clone())), + ])); + + Value::Object(ValueMapping::from([ + (Name::new("balance"), Value::String(row.balance)), + (Name::new("type"), Value::String(row.contract_type)), + (Name::new("token_metadata"), token_metadata), + ])) + } + "erc721" => { + // contract_address:token_id + let token_id = row.token_id.split(':').collect::>(); + assert!(token_id.len() == 2); + + let token_metadata = Value::Object(ValueMapping::from([ + (Name::new("contract_address"), Value::String(row.contract_address.clone())), + (Name::new("name"), Value::String(row.name)), + (Name::new("symbol"), Value::String(row.symbol)), + (Name::new("token_id"), Value::String(token_id[1].to_string())), + (Name::new("decimals"), Value::String(row.decimals.to_string())), + ])); + + Value::Object(ValueMapping::from([ + (Name::new("balance"), Value::String(row.balance)), + (Name::new("type"), Value::String(row.contract_type)), + (Name::new("token_metadata"), token_metadata), + ])) + } + _ => { + warn!("Unknown contract type: {}", row.contract_type); + continue; + } + }; + + erc_balances.push(balance_value); + } + + Ok(erc_balances) +} + +// TODO: This would be required when subscriptions are needed +// impl ErcBalanceObject { +// pub fn value_mapping(entity: ErcBalance) -> ValueMapping { +// IndexMap::from([ +// ]) +// } +// } + +#[derive(FromRow, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +struct BalanceQueryResultRaw { + pub contract_address: String, + pub name: String, + pub symbol: String, + pub decimals: u8, + pub token_id: String, + pub balance: String, + pub contract_type: String, +} diff --git a/crates/torii/graphql/src/object/erc/erc_token.rs b/crates/torii/graphql/src/object/erc/erc_token.rs new file mode 100644 index 0000000000..14b8de7877 --- /dev/null +++ b/crates/torii/graphql/src/object/erc/erc_token.rs @@ -0,0 +1,21 @@ +use crate::constants::{ERC_TOKEN_NAME, ERC_TOKEN_TYPE_NAME}; +use crate::mapping::ERC_TOKEN_TYPE_MAPPING; +use crate::object::BasicObject; +use crate::types::TypeMapping; + +#[derive(Debug)] +pub struct ErcTokenObject; + +impl BasicObject for ErcTokenObject { + fn name(&self) -> (&str, &str) { + ERC_TOKEN_NAME + } + + fn type_name(&self) -> &str { + ERC_TOKEN_TYPE_NAME + } + + fn type_mapping(&self) -> &TypeMapping { + &ERC_TOKEN_TYPE_MAPPING + } +} diff --git a/crates/torii/graphql/src/object/erc/erc_transfer.rs b/crates/torii/graphql/src/object/erc/erc_transfer.rs new file mode 100644 index 0000000000..056f0c224b --- /dev/null +++ b/crates/torii/graphql/src/object/erc/erc_transfer.rs @@ -0,0 +1,181 @@ +use async_graphql::dynamic::{Field, FieldFuture, InputValue, TypeRef}; +use async_graphql::{Name, Value}; +use convert_case::{Case, Casing}; +use serde::Deserialize; +use sqlx::{FromRow, Pool, Sqlite, SqliteConnection}; +use starknet_crypto::Felt; +use torii_core::sql::utils::felt_to_sql_string; +use tracing::warn; + +use crate::constants::{ERC_TRANSFER_NAME, ERC_TRANSFER_TYPE_NAME}; +use crate::mapping::ERC_TRANSFER_TYPE_MAPPING; +use crate::object::{BasicObject, ResolvableObject}; +use crate::types::{TypeMapping, ValueMapping}; +use crate::utils::extract; + +#[derive(Debug)] +pub struct ErcTransferObject; + +impl BasicObject for ErcTransferObject { + fn name(&self) -> (&str, &str) { + ERC_TRANSFER_NAME + } + + fn type_name(&self) -> &str { + ERC_TRANSFER_TYPE_NAME + } + + fn type_mapping(&self) -> &TypeMapping { + &ERC_TRANSFER_TYPE_MAPPING + } +} + +impl ResolvableObject for ErcTransferObject { + fn resolvers(&self) -> Vec { + let account_address = "account_address"; + let limit = "limit"; + let arg_addr = InputValue::new( + account_address.to_case(Case::Camel), + TypeRef::named_nn(TypeRef::STRING), + ); + let arg_limit = + InputValue::new(limit.to_case(Case::Camel), TypeRef::named_nn(TypeRef::INT)); + + let field = Field::new(self.name().0, TypeRef::named_list(self.type_name()), move |ctx| { + FieldFuture::new(async move { + let mut conn = ctx.data::>()?.acquire().await?; + let address = extract::( + ctx.args.as_index_map(), + &account_address.to_case(Case::Camel), + )?; + let limit = extract::(ctx.args.as_index_map(), &limit.to_case(Case::Camel))?; + let limit: u32 = limit.try_into()?; + + let erc_transfers = fetch_erc_transfers(&mut conn, address, limit).await?; + + Ok(Some(Value::List(erc_transfers))) + }) + }) + .argument(arg_addr) + .argument(arg_limit); + vec![field] + } +} + +async fn fetch_erc_transfers( + conn: &mut SqliteConnection, + address: Felt, + limit: u32, +) -> sqlx::Result> { + let query = format!( + r#" +SELECT + et.contract_address, + et.from_address, + et.to_address, + et.amount, + et.token_id, + et.executed_at, + t.name, + t.symbol, + t.decimals, + c.contract_type +FROM + erc_transfers et +JOIN + tokens t ON et.token_id = t.id +JOIN + contracts c ON t.contract_address = c.contract_address +WHERE + et.from_address = ? OR et.to_address = ? +ORDER BY + et.executed_at DESC +LIMIT {}; +"#, + limit + ); + + let address = felt_to_sql_string(&address); + let rows = sqlx::query(&query).bind(&address).bind(&address).fetch_all(conn).await?; + + let mut erc_balances = Vec::new(); + + for row in rows { + let row = TransferQueryResultRaw::from_row(&row)?; + + let transfer_value = match row.contract_type.as_str() { + "ERC20" | "Erc20" | "erc20" => { + let token_metadata = Value::Object(ValueMapping::from([ + (Name::new("name"), Value::String(row.name)), + (Name::new("symbol"), Value::String(row.symbol)), + // for erc20 there is no token_id + (Name::new("token_id"), Value::Null), + (Name::new("decimals"), Value::String(row.decimals.to_string())), + (Name::new("contract_address"), Value::String(row.contract_address.clone())), + ])); + + Value::Object(ValueMapping::from([ + (Name::new("from"), Value::String(row.from_address)), + (Name::new("to"), Value::String(row.to_address)), + (Name::new("amount"), Value::String(row.amount)), + (Name::new("type"), Value::String(row.contract_type)), + (Name::new("executed_at"), Value::String(row.executed_at)), + (Name::new("token_metadata"), token_metadata), + ])) + } + "ERC721" | "Erc721" | "erc721" => { + // contract_address:token_id + let token_id = row.token_id.split(':').collect::>(); + assert!(token_id.len() == 2); + + let token_metadata = Value::Object(ValueMapping::from([ + (Name::new("name"), Value::String(row.name)), + (Name::new("symbol"), Value::String(row.symbol)), + (Name::new("token_id"), Value::String(token_id[1].to_string())), + (Name::new("decimals"), Value::String(row.decimals.to_string())), + (Name::new("contract_address"), Value::String(row.contract_address.clone())), + ])); + + Value::Object(ValueMapping::from([ + (Name::new("from"), Value::String(row.from_address)), + (Name::new("to"), Value::String(row.to_address)), + (Name::new("amount"), Value::String(row.amount)), + (Name::new("type"), Value::String(row.contract_type)), + (Name::new("executed_at"), Value::String(row.executed_at)), + (Name::new("token_metadata"), token_metadata), + ])) + } + _ => { + warn!("Unknown contract type: {}", row.contract_type); + continue; + } + }; + + erc_balances.push(transfer_value); + } + + Ok(erc_balances) +} + +// TODO: This would be required when subscriptions are needed +// impl ErcTransferObject { +// pub fn value_mapping(entity: ErcBalance) -> ValueMapping { +// IndexMap::from([ +// ]) +// } +// } + +#[derive(FromRow, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +struct TransferQueryResultRaw { + pub contract_address: String, + pub from_address: String, + pub to_address: String, + pub token_id: String, + pub amount: String, + pub executed_at: String, + pub name: String, + pub symbol: String, + pub decimals: u8, + pub contract_type: String, +} diff --git a/crates/torii/graphql/src/object/erc/mod.rs b/crates/torii/graphql/src/object/erc/mod.rs new file mode 100644 index 0000000000..eac2c5510b --- /dev/null +++ b/crates/torii/graphql/src/object/erc/mod.rs @@ -0,0 +1,3 @@ +pub mod erc_balance; +pub mod erc_token; +pub mod erc_transfer; diff --git a/crates/torii/graphql/src/object/mod.rs b/crates/torii/graphql/src/object/mod.rs index c1046ffbe4..8997cdabe3 100644 --- a/crates/torii/graphql/src/object/mod.rs +++ b/crates/torii/graphql/src/object/mod.rs @@ -1,5 +1,6 @@ pub mod connection; pub mod entity; +pub mod erc; pub mod event; pub mod event_message; pub mod inputs; diff --git a/crates/torii/graphql/src/schema.rs b/crates/torii/graphql/src/schema.rs index 48a915345b..5f70c49908 100644 --- a/crates/torii/graphql/src/schema.rs +++ b/crates/torii/graphql/src/schema.rs @@ -10,6 +10,9 @@ use super::object::model_data::ModelDataObject; use super::types::ScalarType; use super::utils; use crate::constants::{QUERY_TYPE_NAME, SUBSCRIPTION_TYPE_NAME}; +use crate::object::erc::erc_balance::ErcBalanceObject; +use crate::object::erc::erc_token::ErcTokenObject; +use crate::object::erc::erc_transfer::ErcTransferObject; use crate::object::event_message::EventMessageObject; use crate::object::metadata::content::ContentObject; use crate::object::metadata::social::SocialObject; @@ -28,6 +31,7 @@ pub async fn build_schema(pool: &SqlitePool) -> Result { let (objects, unions) = build_objects(pool).await?; let mut schema_builder = Schema::build(QUERY_TYPE_NAME, None, Some(SUBSCRIPTION_TYPE_NAME)); + //? why we need to provide QUERY_TYPE_NAME object here when its already passed to Schema? let mut query_root = Object::new(QUERY_TYPE_NAME); let mut subscription_root = Subscription::new(SUBSCRIPTION_TYPE_NAME); @@ -112,9 +116,12 @@ async fn build_objects(pool: &SqlitePool) -> Result<(Vec, Vec Result { .await .unwrap(); - TransactionWaiter::new(transaction_hash, &provider).await?; + TransactionWaiter::new(transaction_hash, &account.provider()).await?; // Execute `delete` and delete Record with id 20 let InvokeTransactionResult { transaction_hash } = account @@ -355,29 +353,28 @@ pub async fn spinup_types_test(path: &str) -> Result { tokio::spawn(async move { executor.run().await.unwrap(); }); - let db = Sql::new(pool.clone(), strat.world_address, sender).await.unwrap(); + let db = Sql::new( + pool.clone(), + sender, + &HashMap::from([(strat.world_address, ContractType::WORLD)]), + ) + .await + .unwrap(); let (shutdown_tx, _) = broadcast::channel(1); let mut engine = Engine::new( world, db.clone(), Arc::clone(&provider), - Processors { - event: generate_event_processors_map(vec![ - Arc::new(RegisterModelProcessor), - Arc::new(StoreSetRecordProcessor), - Arc::new(StoreDelRecordProcessor), - ]) - .unwrap(), - ..Processors::default() - }, + Processors { ..Processors::default() }, EngineConfig::default(), shutdown_tx, None, + Arc::new(HashMap::from([(strat.world_address, ContractType::WORLD)])), ); let to = account.provider().block_hash_and_number().await?.block_number; - let data = engine.fetch_range(0, to, None).await.unwrap(); + let data = engine.fetch_range(0, to, &HashMap::new()).await.unwrap(); engine.process_range(data).await.unwrap(); db.execute().await.unwrap(); Ok(pool) diff --git a/crates/torii/graphql/src/tests/subscription_test.rs b/crates/torii/graphql/src/tests/subscription_test.rs index 2e32e0c194..f35b60fcc6 100644 --- a/crates/torii/graphql/src/tests/subscription_test.rs +++ b/crates/torii/graphql/src/tests/subscription_test.rs @@ -1,5 +1,6 @@ #[cfg(test)] mod tests { + use std::collections::HashMap; use std::str::FromStr; use std::time::Duration; @@ -14,7 +15,9 @@ mod tests { use starknet_crypto::{poseidon_hash_many, Felt}; use tokio::sync::{broadcast, mpsc}; use torii_core::executor::Executor; - use torii_core::sql::{felts_sql_string, Sql}; + use torii_core::sql::utils::felts_to_sql_string; + use torii_core::sql::Sql; + use torii_core::types::ContractType; use crate::tests::{model_fixtures, run_graphql_subscription}; use crate::utils; @@ -28,7 +31,10 @@ mod tests { tokio::spawn(async move { executor.run().await.unwrap(); }); - let mut db = Sql::new(pool.clone(), Felt::ZERO, sender).await.unwrap(); + let mut db = + Sql::new(pool.clone(), sender, &HashMap::from([(Felt::ZERO, ContractType::WORLD)])) + .await + .unwrap(); model_fixtures(&mut db).await; // 0. Preprocess expected entity value @@ -111,7 +117,7 @@ mod tests { ], }); let keys = keys_from_ty(&ty).unwrap(); - let keys_str = felts_sql_string(&keys); + let keys_str = felts_to_sql_string(&keys); let entity_id = poseidon_hash_many(&keys); let model_id = model_id_from_ty(&ty); @@ -169,7 +175,10 @@ mod tests { tokio::spawn(async move { executor.run().await.unwrap(); }); - let mut db = Sql::new(pool.clone(), Felt::ZERO, sender).await.unwrap(); + let mut db = + Sql::new(pool.clone(), sender, &HashMap::from([(Felt::ZERO, ContractType::WORLD)])) + .await + .unwrap(); model_fixtures(&mut db).await; // 0. Preprocess expected entity value @@ -235,7 +244,7 @@ mod tests { }); let keys = keys_from_ty(&ty).unwrap(); - let keys_str = felts_sql_string(&keys); + let keys_str = felts_to_sql_string(&keys); let entity_id = poseidon_hash_many(&keys); let model_id = model_id_from_ty(&ty); @@ -290,7 +299,10 @@ mod tests { tokio::spawn(async move { executor.run().await.unwrap(); }); - let mut db = Sql::new(pool.clone(), Felt::ZERO, sender).await.unwrap(); + let mut db = + Sql::new(pool.clone(), sender, &HashMap::from([(Felt::ZERO, ContractType::WORLD)])) + .await + .unwrap(); // 0. Preprocess model value let namespace = "types_test".to_string(); let model_name = "Subrecord".to_string(); @@ -361,7 +373,10 @@ mod tests { tokio::spawn(async move { executor.run().await.unwrap(); }); - let mut db = Sql::new(pool.clone(), Felt::ZERO, sender).await.unwrap(); + let mut db = + Sql::new(pool.clone(), sender, &HashMap::from([(Felt::ZERO, ContractType::WORLD)])) + .await + .unwrap(); // 0. Preprocess model value let namespace = "types_test".to_string(); let model_name = "Subrecord".to_string(); @@ -433,7 +448,10 @@ mod tests { tokio::spawn(async move { executor.run().await.unwrap(); }); - let mut db = Sql::new(pool.clone(), Felt::ZERO, sender).await.unwrap(); + let mut db = + Sql::new(pool.clone(), sender, &HashMap::from([(Felt::ZERO, ContractType::WORLD)])) + .await + .unwrap(); let block_timestamp: u64 = 1710754478_u64; let (tx, mut rx) = mpsc::channel(7); tokio::spawn(async move { diff --git a/crates/torii/graphql/src/utils.rs b/crates/torii/graphql/src/utils.rs index 8f49990d4a..949e3b9711 100644 --- a/crates/torii/graphql/src/utils.rs +++ b/crates/torii/graphql/src/utils.rs @@ -1,5 +1,8 @@ +use std::str::FromStr; + use async_graphql::{Result, Value}; use convert_case::{Case, Casing}; +use starknet_crypto::Felt; use crate::error::ExtractError; use crate::types::ValueMapping; @@ -28,6 +31,18 @@ impl ExtractFromIndexMap for String { } } +impl ExtractFromIndexMap for Felt { + fn extract(indexmap: &ValueMapping, input: &str) -> Result { + let value = indexmap.get(input).ok_or_else(|| ExtractError::NotFound(input.to_string()))?; + match value { + Value::String(s) => { + Ok(Felt::from_str(s).map_err(|_| ExtractError::NotFelt(input.to_string()))?) + } + _ => Err(ExtractError::NotString(input.to_string())), + } + } +} + impl ExtractFromIndexMap for Vec { fn extract(indexmap: &ValueMapping, input: &str) -> Result { let value = indexmap.get(input).ok_or_else(|| ExtractError::NotFound(input.to_string()))?; diff --git a/crates/torii/grpc/src/server/mod.rs b/crates/torii/grpc/src/server/mod.rs index 6a496cc368..07d9607310 100644 --- a/crates/torii/grpc/src/server/mod.rs +++ b/crates/torii/grpc/src/server/mod.rs @@ -35,9 +35,9 @@ use tokio::sync::mpsc::Receiver; use tokio_stream::wrappers::{ReceiverStream, TcpListenerStream}; use tonic::transport::Server; use tonic::{Request, Response, Status}; -use torii_core::cache::ModelCache; use torii_core::error::{Error, ParseError, QueryError}; use torii_core::model::{build_sql_query, map_row_to_ty}; +use torii_core::sql::cache::ModelCache; use self::subscriptions::entity::EntityManager; use self::subscriptions::event_message::EventMessageManager; diff --git a/crates/torii/grpc/src/server/subscriptions/indexer.rs b/crates/torii/grpc/src/server/subscriptions/indexer.rs index 27315b6766..e328be6bbb 100644 --- a/crates/torii/grpc/src/server/subscriptions/indexer.rs +++ b/crates/torii/grpc/src/server/subscriptions/indexer.rs @@ -13,7 +13,7 @@ use tokio::sync::mpsc::{channel, Receiver, Sender}; use tokio::sync::RwLock; use torii_core::error::{Error, ParseError}; use torii_core::simple_broker::SimpleBroker; -use torii_core::types::Contract as ContractUpdated; +use torii_core::types::ContractCursor as ContractUpdated; use tracing::{error, trace}; use crate::proto; diff --git a/crates/torii/grpc/src/server/tests/entities_test.rs b/crates/torii/grpc/src/server/tests/entities_test.rs index d8b7b759d2..98388f466a 100644 --- a/crates/torii/grpc/src/server/tests/entities_test.rs +++ b/crates/torii/grpc/src/server/tests/entities_test.rs @@ -1,3 +1,4 @@ +use std::collections::HashMap; use std::str::FromStr; use std::sync::Arc; @@ -22,10 +23,8 @@ use tempfile::NamedTempFile; use tokio::sync::broadcast; use torii_core::engine::{Engine, EngineConfig, Processors}; use torii_core::executor::Executor; -use torii_core::processors::generate_event_processors_map; -use torii_core::processors::register_model::RegisterModelProcessor; -use torii_core::processors::store_set_record::StoreSetRecordProcessor; use torii_core::sql::Sql; +use torii_core::types::ContractType; use crate::proto::types::KeysClause; use crate::server::DojoWorld; @@ -105,28 +104,28 @@ async fn test_entities_queries(sequencer: &RunnerCtx) { tokio::spawn(async move { executor.run().await.unwrap(); }); - let db = Sql::new(pool.clone(), strat.world_address, sender).await.unwrap(); + let db = Sql::new( + pool.clone(), + sender, + &HashMap::from([(strat.world_address, ContractType::WORLD)]), + ) + .await + .unwrap(); let (shutdown_tx, _) = broadcast::channel(1); let mut engine = Engine::new( world_reader, db.clone(), Arc::clone(&provider), - Processors { - event: generate_event_processors_map(vec![ - Arc::new(RegisterModelProcessor), - Arc::new(StoreSetRecordProcessor), - ]) - .unwrap(), - ..Processors::default() - }, + Processors { ..Processors::default() }, EngineConfig::default(), shutdown_tx, None, + Arc::new(HashMap::from([(strat.world_address, ContractType::WORLD)])), ); let to = provider.block_hash_and_number().await.unwrap().block_number; - let data = engine.fetch_range(0, to, None).await.unwrap(); + let data = engine.fetch_range(0, to, &HashMap::new()).await.unwrap(); engine.process_range(data).await.unwrap(); db.execute().await.unwrap(); diff --git a/crates/torii/libp2p/src/server/mod.rs b/crates/torii/libp2p/src/server/mod.rs index 11f0489687..5682c8ac54 100644 --- a/crates/torii/libp2p/src/server/mod.rs +++ b/crates/torii/libp2p/src/server/mod.rs @@ -26,7 +26,8 @@ use starknet::core::utils::get_selector_from_name; use starknet::providers::Provider; use starknet_crypto::poseidon_hash_many; use torii_core::executor::QueryMessage; -use torii_core::sql::{felts_sql_string, Sql}; +use torii_core::sql::utils::felts_to_sql_string; +use torii_core::sql::Sql; use tracing::{info, warn}; use webrtc::tokio::Certificate; @@ -246,7 +247,7 @@ impl Relay

{ continue; } }; - let keys_str = felts_sql_string(&keys); + let keys_str = felts_to_sql_string(&keys); let entity_id = poseidon_hash_many(&keys); let model_id = ty_model_id(&ty).unwrap(); diff --git a/crates/torii/libp2p/src/tests.rs b/crates/torii/libp2p/src/tests.rs index 069f82997b..dcc3af889f 100644 --- a/crates/torii/libp2p/src/tests.rs +++ b/crates/torii/libp2p/src/tests.rs @@ -524,6 +524,7 @@ mod test { #[cfg(not(target_arch = "wasm32"))] #[tokio::test] async fn test_client_messaging() -> Result<(), Box> { + use std::collections::HashMap; use std::time::Duration; use dojo_types::schema::{Member, Struct, Ty}; @@ -540,6 +541,7 @@ mod test { use tokio::time::sleep; use torii_core::executor::Executor; use torii_core::sql::Sql; + use torii_core::types::ContractType; use crate::server::Relay; use crate::typed_data::{Domain, Field, SimpleField, TypedData}; @@ -576,7 +578,10 @@ mod test { tokio::spawn(async move { executor.run().await.unwrap(); }); - let mut db = Sql::new(pool.clone(), Felt::ZERO, sender).await.unwrap(); + let mut db = + Sql::new(pool.clone(), sender, &HashMap::from([(Felt::ZERO, ContractType::WORLD)])) + .await + .unwrap(); // Register the model of our Message db.register_model( diff --git a/crates/torii/migrations/20240913104418_add_erc.sql b/crates/torii/migrations/20240913104418_add_erc.sql new file mode 100644 index 0000000000..4366acac27 --- /dev/null +++ b/crates/torii/migrations/20240913104418_add_erc.sql @@ -0,0 +1,35 @@ +CREATE TABLE balances ( + -- account_address:token_id + id TEXT NOT NULL PRIMARY KEY, + balance TEXT NOT NULL, + account_address TEXT NOT NULL, + contract_address TEXT NOT NULL, + -- contract_address:token_id + token_id TEXT NOT NULL, + FOREIGN KEY (token_id) REFERENCES tokens(id) +); + +CREATE INDEX balances_account_address ON balances (account_address); +CREATE INDEX balances_contract_address ON balances (contract_address); + +CREATE TABLE tokens ( + -- contract_address:token_id + id TEXT NOT NULL PRIMARY KEY, + contract_address TEXT NOT NULL, + name TEXT NOT NULL, + symbol TEXT NOT NULL, + decimals INTEGER NOT NULL, + FOREIGN KEY (contract_address) REFERENCES contracts(id) +); + +CREATE TABLE erc_transfers ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + contract_address TEXT NOT NULL, + from_address TEXT NOT NULL, + to_address TEXT NOT NULL, + amount TEXT NOT NULL, + -- contract_address:token_id + token_id TEXT NOT NULL, + executed_at DATETIME NOT NULL, + FOREIGN KEY (token_id) REFERENCES tokens(id) +); diff --git a/crates/torii/migrations/20240918200125_rename_column_contracts_table.sql b/crates/torii/migrations/20240918200125_rename_column_contracts_table.sql new file mode 100644 index 0000000000..3213853e8a --- /dev/null +++ b/crates/torii/migrations/20240918200125_rename_column_contracts_table.sql @@ -0,0 +1 @@ +ALTER TABLE contracts RENAME COLUMN last_pending_block_world_tx TO last_pending_block_contract_tx; diff --git a/scripts/compare-torii-data.py b/scripts/compare-torii-data.py new file mode 100755 index 0000000000..c792803eae --- /dev/null +++ b/scripts/compare-torii-data.py @@ -0,0 +1,103 @@ +# This script compares data across 'events', 'entities', 'transactions', 'balances', 'tokens', and 'erc_transfers' tables between two SQLite databases. +# Helpful to make sure any changes made in torii doesn't affect the resulting data. + +import sqlite3 +import argparse + +def fetch_table_data(db_path, table_name, columns): + conn = sqlite3.connect(db_path) + cursor = conn.cursor() + cursor.execute(f"SELECT {', '.join(columns)} FROM {table_name}") + data = cursor.fetchall() + conn.close() + if table_name == "erc_transfers": + # Use a set of tuples for row-wise comparison since there's no unique ID + return set(tuple(row) for row in data) + else: + # Use the first column as the key for other tables + return {row[0]: row[1:] for row in data} + +def get_table_row_count(db_path, table_name): + conn = sqlite3.connect(db_path) + cursor = conn.cursor() + cursor.execute(f"SELECT COUNT(*) FROM {table_name}") + count = cursor.fetchone()[0] + conn.close() + return count + +def compare_data(data1, data2, table_name): + if table_name == "erc_transfers": + differences = data1.symmetric_difference(data2) + if differences: + print(f"Differences found in {table_name} table:") + for row in differences: + print(f" {row}") + else: + print(f"No differences found in {table_name}") + else: + differences_found = False + for id, values in data1.items(): + if id in data2: + if values != data2[id]: + print(f"Mismatch found in {table_name} for ID {id}:") + print(f" Database 1: {values}") + print(f" Database 2: {data2[id]}") + differences_found = True + else: + print(f"ID {id} found in {table_name} of Database 1 but not in Database 2") + differences_found = True + + for id in data2: + if id not in data1: + print(f"ID {id} found in {table_name} of Database 2 but not in Database 1") + differences_found = True + + if not differences_found: + print(f"No differences found in {table_name}") + +def table_exists(db_path, table_name): + conn = sqlite3.connect(db_path) + cursor = conn.cursor() + cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}'") + exists = cursor.fetchone() is not None + conn.close() + return exists + +def compare_databases(db_path1, db_path2): + # Columns to compare, ignoring time-dependent and event_id columns + table_columns = { + "events": ["id", "keys", "data", "transaction_hash"], + "entities": ["id", "keys"], + "transactions": ["id", "transaction_hash", "sender_address", "calldata", "max_fee", "signature", "nonce", "transaction_type"], + "balances": ["id", "balance", "account_address", "contract_address", "token_id"], + "tokens": ["id", "contract_address", "name", "symbol", "decimals"], + "erc_transfers": ["contract_address", "from_address", "to_address", "amount", "token_id"] + } + + for table_name, columns in table_columns.items(): + if table_exists(db_path1, table_name) and table_exists(db_path2, table_name): + print(f"\nComparing {table_name} table:") + + # Fetch data from both databases + data_db1 = fetch_table_data(db_path1, table_name, columns) + data_db2 = fetch_table_data(db_path2, table_name, columns) + + # Get row counts from both databases + count_db1 = get_table_row_count(db_path1, table_name) + count_db2 = get_table_row_count(db_path2, table_name) + + # Print row counts + print(f"Number of rows in {table_name} table: Database 1 = {count_db1}, Database 2 = {count_db2}") + + # Compare data + compare_data(data_db1, data_db2, table_name) + else: + print(f"\nSkipping {table_name} table as it doesn't exist in one or both databases.") + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Compare data in events, entities, transactions, balances, tokens, and erc_transfers tables between two SQLite databases.") + parser.add_argument("db_path1", help="Path to the first SQLite database") + parser.add_argument("db_path2", help="Path to the second SQLite database") + args = parser.parse_args() + + compare_databases(args.db_path1, args.db_path2) diff --git a/scripts/deploy_erc20_katana.sh b/scripts/deploy_erc20_katana.sh new file mode 100755 index 0000000000..3ad8d87937 --- /dev/null +++ b/scripts/deploy_erc20_katana.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +starkli deploy --account ../account.json --keystore ../signer.json --keystore-password "" 0x02a8846878b6ad1f54f6ba46f5f40e11cee755c677f130b2c4b60566c9003f1f 0x626c6f62 0x424c42 0x8 u256:10000000000 0xb3ff441a68610b30fd5e2abbf3a1548eb6ba6f3559f2862bf2dc757e5828ca --rpc http://localhost:5050 diff --git a/scripts/send_erc20_transfer.sh b/scripts/send_erc20_transfer.sh new file mode 100755 index 0000000000..b321d2fa19 --- /dev/null +++ b/scripts/send_erc20_transfer.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +if [ $# -eq 0 ]; then + echo "Error: Contract address argument is required." + echo "Usage: $0 " + exit 1 +fi + +contract_address=$1 +rpc="http://localhost:5050" + +starkli invoke $contract_address transfer 0x1234 u256:1 --account ../account.json --keystore ../signer.json --keystore-password "" --rpc $rpc diff --git a/scripts/verify_db_balances/Cargo.toml b/scripts/verify_db_balances/Cargo.toml new file mode 100644 index 0000000000..d7d9c03e0a --- /dev/null +++ b/scripts/verify_db_balances/Cargo.toml @@ -0,0 +1,16 @@ +[package] +edition.workspace = true +license.workspace = true +name = "verify_db_balances" +repository.workspace = true +version.workspace = true + +[dependencies] +num-traits = "0.2.19" +sqlx.workspace = true +starknet.workspace = true +tokio = { workspace = true, features = [ "full" ] } +tracing-subscriber.workspace = true +tracing.workspace = true + +clap.workspace = true diff --git a/scripts/verify_db_balances/src/main.rs b/scripts/verify_db_balances/src/main.rs new file mode 100644 index 0000000000..9c72b97f02 --- /dev/null +++ b/scripts/verify_db_balances/src/main.rs @@ -0,0 +1,157 @@ +use std::str::FromStr; +use std::sync::Arc; + +use clap::Parser; +use num_traits::ToPrimitive; +use sqlx::sqlite::SqlitePool; +use sqlx::Row; +use starknet::core::types::{BlockId, Felt, FunctionCall, U256}; +use starknet::macros::selector; +use starknet::providers::jsonrpc::HttpTransport; +use starknet::providers::{JsonRpcClient, Provider, Url}; +use tracing::{error, info, Level}; + +async fn get_balance_from_starknet( + account_address: &str, + contract_address: &str, + contract_type: &str, + token_id: &str, + provider: Arc>, +) -> Result> { + let account_address = Felt::from_str(account_address).unwrap(); + let contract_address = Felt::from_str(contract_address).unwrap(); + + let balance = match contract_type { + "ERC20" => { + let balance = provider + .call( + FunctionCall { + contract_address, + entry_point_selector: selector!("balanceOf"), + calldata: vec![account_address], + }, + BlockId::Tag(starknet::core::types::BlockTag::Pending), + ) + .await?; + + let balance_low = balance[0].to_u128().unwrap(); + let balance_high = balance[1].to_u128().unwrap(); + + let balance = U256::from_words(balance_low, balance_high); + format!("{:#064x}", balance) + } + "ERC721" => { + let token_id = Felt::from_str(token_id.split(":").nth(1).unwrap()).unwrap(); + let balance = provider + .call( + FunctionCall { + contract_address, + entry_point_selector: selector!("ownerOf"), + // HACK: assumes token_id.high == 0 + calldata: vec![token_id, Felt::ZERO], + }, + BlockId::Tag(starknet::core::types::BlockTag::Pending), + ) + .await?; + if account_address != balance[0] { + format!("{:#064x}", U256::from(0u8)) + } else { + format!("{:#064x}", U256::from(1u8)) + } + } + _ => unreachable!(), + }; + Ok(balance) +} + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + /// Path to the SQLite database file + #[arg(short, long)] + db_path: String, + + /// RPC URL for the Starknet provider + #[arg(short, long)] + rpc_url: String, +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Initialize the logger + tracing_subscriber::fmt().with_max_level(Level::INFO).init(); + + // Parse command line arguments + let args = Args::parse(); + + // Use the provided database path + let pool = SqlitePool::connect(&format!("sqlite:{}", args.db_path)).await?; + + let rows = sqlx::query( + " + SELECT b.account_address, b.contract_address, b.balance, c.contract_type, b.token_id + FROM balances b + JOIN contracts c ON b.contract_address = c.contract_address + ", + ) + .fetch_all(&pool) + .await?; + + // Create a semaphore to limit concurrent tasks + let semaphore = std::sync::Arc::new(tokio::sync::Semaphore::new(10)); // Adjust the number as needed + + let mut handles = Vec::new(); + + // print number of balances + info!("Checking {} balances", rows.len()); + + let provider = + Arc::new(JsonRpcClient::new(HttpTransport::new(Url::parse(&args.rpc_url).unwrap()))); + + // IMPROVEMENT: batch multiple balanceOf calls in same rpc call + for row in rows { + let account_address: String = row.get("account_address"); + let contract_address: String = row.get("contract_address"); + let db_balance: String = row.get("balance"); + let contract_type: String = row.get("contract_type"); + let token_id: String = row.get("token_id"); + let semaphore_clone = semaphore.clone(); + let provider = provider.clone(); + + let handle = tokio::spawn(async move { + let _permit = semaphore_clone.acquire().await.unwrap(); + let starknet_balance = get_balance_from_starknet( + &account_address, + &contract_address, + &contract_type, + &token_id, + provider, + ) + .await?; + + if db_balance != starknet_balance { + error!( + "Mismatch for account {} and contract {}: DB balance = {}, Starknet balance = \ + {}", + account_address, contract_address, db_balance, starknet_balance + ); + } else { + info!( + "Balance matched for account {} and contract {}", + account_address, contract_address + ); + } + Ok::<(), Box>(()) + }); + + handles.push(handle); + } + + // Wait for all tasks to complete + for handle in handles { + handle.await??; + } + + info!("Checked all balances"); + Ok(()) +} diff --git a/spawn-and-move-db.tar.gz b/spawn-and-move-db.tar.gz index c55d890bc2..f6d7914732 100644 Binary files a/spawn-and-move-db.tar.gz and b/spawn-and-move-db.tar.gz differ diff --git a/types-test-db.tar.gz b/types-test-db.tar.gz index 4caaae7f17..0929d93679 100644 Binary files a/types-test-db.tar.gz and b/types-test-db.tar.gz differ