From 39ae619fc53111bf0d52a56cf864a0f366438565 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Mon, 15 Dec 2025 11:19:14 -0300 Subject: [PATCH 01/71] feat: add task delay tests --- coordinator/cosign/Cargo.toml | 6 ++ coordinator/cosign/src/delay.rs | 18 +++-- coordinator/cosign/src/lib.rs | 4 + coordinator/cosign/src/tests/delay.rs | 111 ++++++++++++++++++++++++++ coordinator/cosign/src/tests/mod.rs | 2 + 5 files changed, 136 insertions(+), 5 deletions(-) create mode 100644 coordinator/cosign/src/tests/delay.rs create mode 100644 coordinator/cosign/src/tests/mod.rs diff --git a/coordinator/cosign/Cargo.toml b/coordinator/cosign/Cargo.toml index 63c50ebc1..0f6470be5 100644 --- a/coordinator/cosign/Cargo.toml +++ b/coordinator/cosign/Cargo.toml @@ -31,3 +31,9 @@ serai-db = { path = "../../common/db", version = "0.1.1" } serai-task = { path = "../../common/task", version = "0.1" } serai-cosign-types = { path = "./types" } + +[dev-dependencies] +tokio = { version = "1", default-features = false, features = ["time", "test-util"] } + +[features] +tests = [] diff --git a/coordinator/cosign/src/delay.rs b/coordinator/cosign/src/delay.rs index 3439135b4..f0789a80e 100644 --- a/coordinator/cosign/src/delay.rs +++ b/coordinator/cosign/src/delay.rs @@ -9,7 +9,7 @@ use crate::evaluator::CosignedBlocks; /// How often callers should broadcast the cosigns flagged for rebroadcasting. pub const BROADCAST_FREQUENCY: Duration = Duration::from_secs(60); const SYNCHRONY_EXPECTATION: Duration = Duration::from_secs(10); -const ACKNOWLEDGEMENT_DELAY: Duration = +pub(crate) const ACKNOWLEDGEMENT_DELAY: Duration = Duration::from_secs(BROADCAST_FREQUENCY.as_secs() + SYNCHRONY_EXPECTATION.as_secs()); create_db!( @@ -37,12 +37,20 @@ impl ContinuallyRan for CosignDelayTask { let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else { break; }; + + // If we've already acknowledged a later block, don't regress (and don't wait). + let already_cosigned = LatestCosignedBlockNumber::get(&txn).unwrap_or(0); + if block_number <= already_cosigned { + txn.commit(); + made_progress = true; + continue; + } + // Calculate when we should mark it as valid - let time_valid = - SystemTime::UNIX_EPOCH + Duration::from_secs(time_evaluated) + ACKNOWLEDGEMENT_DELAY; + let time_valid = Duration::from_secs(time_evaluated) + ACKNOWLEDGEMENT_DELAY; + // Sleep until then - tokio::time::sleep(SystemTime::now().duration_since(time_valid).unwrap_or(Duration::ZERO)) - .await; + tokio::time::sleep(time_valid).await; // Set the cosigned block LatestCosignedBlockNumber::set(&mut txn, &block_number); diff --git a/coordinator/cosign/src/lib.rs b/coordinator/cosign/src/lib.rs index 781e0c276..95d26c65e 100644 --- a/coordinator/cosign/src/lib.rs +++ b/coordinator/cosign/src/lib.rs @@ -37,6 +37,10 @@ mod delay; pub use delay::BROADCAST_FREQUENCY; use delay::LatestCosignedBlockNumber; +#[cfg(any(test, feature = "tests"))] +/// Test helpers and fixtures. +pub mod tests; + /// A 'global session', defined as all validator sets used for cosigning at a given moment. /// /// We evaluate cosign faults within a global session. This ensures even if cosigners cosign diff --git a/coordinator/cosign/src/tests/delay.rs b/coordinator/cosign/src/tests/delay.rs new file mode 100644 index 000000000..2f2928b5b --- /dev/null +++ b/coordinator/cosign/src/tests/delay.rs @@ -0,0 +1,111 @@ +use crate::LatestCosignedBlockNumber; +use crate::delay::{ACKNOWLEDGEMENT_DELAY, CosignDelayTask}; +use std::{ + sync::OnceLock, + time::{Duration, SystemTime}, +}; + +use serai_db::*; +use serai_task::ContinuallyRan; + +use crate::evaluator::CosignedBlocks; + +use serai_db::{Db as _, MemDb}; + +#[tokio::test] +async fn delay_task_returns_false_with_no_messages() { + let db = MemDb::new(); + let mut task = CosignDelayTask { db }; + assert_eq!(task.run_iteration().await.unwrap(), false); +} + +#[tokio::test] +async fn delay_task_updates_latest_cosigned_block_number() { + let mut db = MemDb::new(); + + { + let mut txn = db.txn(); + CosignedBlocks::send(&mut txn, &(7u64, 0u64)); + txn.commit(); + } + + let mut task = CosignDelayTask { db: db.clone() }; + assert_eq!(task.run_iteration().await.unwrap(), true); + assert_eq!(LatestCosignedBlockNumber::get(&db), Some(7u64)); +} + +#[tokio::test] +async fn delay_task_drains_multiple_messages_in_one_iteration() { + let mut db = MemDb::new(); + + { + let mut txn = db.txn(); + CosignedBlocks::send(&mut txn, &(1u64, 0u64)); + CosignedBlocks::send(&mut txn, &(2u64, 0u64)); + CosignedBlocks::send(&mut txn, &(3u64, 0u64)); + txn.commit(); + } + + let mut task = CosignDelayTask { db: db.clone() }; + assert_eq!(task.run_iteration().await.unwrap(), true); + assert_eq!(LatestCosignedBlockNumber::get(&db), Some(3u64)); + assert!(CosignedBlocks::peek(&db).is_none()); +} + +#[tokio::test(start_paused = true)] +async fn delay_task_does_not_regress_and_skips_wait_for_stale_messages() { + let mut db = MemDb::new(); + + { + let mut txn = db.txn(); + LatestCosignedBlockNumber::set(&mut txn, &10u64); + txn.commit(); + } + + let now_secs = + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap_or(Duration::ZERO).as_secs(); + + { + let mut txn = db.txn(); + CosignedBlocks::send(&mut txn, &(9u64, now_secs)); + txn.commit(); + } + + let mut task = CosignDelayTask { db: db.clone() }; + let handle = tokio::spawn(async move { task.run_iteration().await.unwrap() }); + tokio::task::yield_now().await; + + assert_eq!(handle.await.unwrap(), true); + assert_eq!(LatestCosignedBlockNumber::get(&db), Some(10u64)); + assert!(CosignedBlocks::peek(&db).is_none()); +} + +#[tokio::test(start_paused = true)] +async fn delay_task_does_not_ack_before_acknowledgement_delay() { + let mut db = MemDb::new(); + + let now_secs = + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap_or(Duration::ZERO).as_secs(); + + { + let mut txn = db.txn(); + CosignedBlocks::send(&mut txn, &(7u64, now_secs)); + txn.commit(); + } + + let mut task = CosignDelayTask { db: db.clone() }; + let handle = tokio::spawn(async move { task.run_iteration().await.unwrap() }); + + tokio::task::yield_now().await; + assert_eq!(LatestCosignedBlockNumber::get(&db), None); + + tokio::time::advance(ACKNOWLEDGEMENT_DELAY - Duration::from_secs(2)).await; + tokio::task::yield_now().await; + assert_eq!(LatestCosignedBlockNumber::get(&db), None); + + tokio::time::advance(Duration::from_secs(4)).await; + tokio::task::yield_now().await; + + assert_eq!(handle.await.unwrap(), true); + assert_eq!(LatestCosignedBlockNumber::get(&db), Some(7u64)); +} diff --git a/coordinator/cosign/src/tests/mod.rs b/coordinator/cosign/src/tests/mod.rs new file mode 100644 index 000000000..cabb3e42d --- /dev/null +++ b/coordinator/cosign/src/tests/mod.rs @@ -0,0 +1,2 @@ +#[cfg(test)] +mod delay; From 28548b2f3a1de52e586e1563afc9002f12d82b4e Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Fri, 19 Dec 2025 18:41:42 -0300 Subject: [PATCH 02/71] feat: add intend task tests --- coordinator/cosign/Cargo.toml | 8 +- coordinator/cosign/src/intend.rs | 112 ++-- coordinator/cosign/src/lib.rs | 78 ++- coordinator/cosign/src/tests/intend.rs | 699 +++++++++++++++++++++++++ coordinator/cosign/src/tests/mod.rs | 237 +++++++++ substrate/client/serai/src/lib.rs | 16 + 6 files changed, 1073 insertions(+), 77 deletions(-) create mode 100644 coordinator/cosign/src/tests/intend.rs diff --git a/coordinator/cosign/Cargo.toml b/coordinator/cosign/Cargo.toml index 0f6470be5..a23ae6e90 100644 --- a/coordinator/cosign/Cargo.toml +++ b/coordinator/cosign/Cargo.toml @@ -31,9 +31,15 @@ serai-db = { path = "../../common/db", version = "0.1.1" } serai-task = { path = "../../common/task", version = "0.1" } serai-cosign-types = { path = "./types" } +schnorrkel = { version = "0.11", default-features = false, features = ["std"], optional = true } +rand_core = { version = "0.6", default-features = false, features = ["std", "getrandom"], optional = true } [dev-dependencies] +rand_core = { version = "0.6", default-features = false, features = ["std", "getrandom"] } +schnorrkel = { version = "0.11", default-features = false, features = ["std"] } +serai-substrate-tests = { path = "../../tests/substrate" } +k256 = { version = "0.13", default-features = false, features = ["std", "ecdsa"] } tokio = { version = "1", default-features = false, features = ["time", "test-util"] } [features] -tests = [] +tests = ["schnorrkel", "rand_core"] diff --git a/coordinator/cosign/src/intend.rs b/coordinator/cosign/src/intend.rs index dbe5bbfac..1b7d4f77a 100644 --- a/coordinator/cosign/src/intend.rs +++ b/coordinator/cosign/src/intend.rs @@ -13,7 +13,7 @@ use serai_client_serai::{ address::SeraiAddress, merkle::IncrementalUnbalancedMerkleTree, }, - validator_sets::Event, + validator_sets, Event, }, Serai, Events, }; @@ -55,15 +55,15 @@ db_channel! { } async fn block_has_events_justifying_a_cosign( - serai: &Serai, + serai: &impl SeraiRpc, block_number: u64, ) -> Result<(Block, Events, HasEvents), String> { let block = serai .block_by_number(block_number) .await - .map_err(|e| format!("{e:?}"))? + .unwrap() .ok_or_else(|| "couldn't get block which should've been finalized".to_string())?; - let events = serai.events(block.header.hash()).await.map_err(|e| format!("{e:?}"))?; + let events = serai.events(block.header.hash()).await?; if events.validator_sets().set_keys_events().next().is_some() { return Ok((block, events, HasEvents::Notable)); @@ -92,34 +92,31 @@ fn cosigning_sets(getter: &impl Get) -> Vec<(ExternalValidatorSet, Public, Amoun } /// A task to determine which blocks we should intend to cosign. -pub(crate) struct CosignIntendTask { +pub(crate) struct CosignIntendTask { pub(crate) db: D, - pub(crate) serai: Arc, + pub(crate) serai: S, } -impl ContinuallyRan for CosignIntendTask { +impl ContinuallyRan for CosignIntendTask { type Error = String; fn run_iteration(&mut self) -> impl Send + Future> { async move { let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1); - let latest_block_number = - self.serai.latest_finalized_block_number().await.map_err(|e| format!("{e:?}"))?; + let latest_block_number = self.serai.latest_finalized_block_number().await?; - for block_number in start_block_number ..= latest_block_number { + for block_number in start_block_number..=latest_block_number { let mut txn = self.db.txn(); let (block, events, mut has_events) = - block_has_events_justifying_a_cosign(&self.serai, block_number) - .await - .map_err(|e| format!("{e:?}"))?; + block_has_events_justifying_a_cosign(&self.serai, block_number).await?; let mut builds_upon = BuildsUpon::get(&txn).unwrap_or(IncrementalUnbalancedMerkleTree::new()); // Check we are indexing a linear chain - if block.header.builds_upon() != - builds_upon.clone().calculate(serai_client_serai::abi::BLOCK_HEADER_BRANCH_TAG) + if block.header.builds_upon() + != builds_upon.clone().calculate(serai_client_serai::abi::BLOCK_HEADER_BRANCH_TAG) { Err(format!( "node's block #{block_number} doesn't build upon the block #{} prior indexed", @@ -138,53 +135,46 @@ impl ContinuallyRan for CosignIntendTask { BuildsUpon::set(&mut txn, &builds_upon); // Update the stakes - for event in events.validator_sets().allocation_events() { - let Event::Allocation { validator, network, amount } = event else { - panic!("event from `allocation_events` wasn't `Event::Allocation`") - }; - let Ok(network) = ExternalNetworkId::try_from(*network) else { continue }; - let existing = Stakes::get(&txn, network, *validator).unwrap_or(Amount(0)); - Stakes::set(&mut txn, network, *validator, &Amount(existing.0 + amount.0)); - } - for event in events.validator_sets().deallocation_events() { - let Event::Deallocation { validator, network, amount, timeline: _ } = event else { - panic!("event from `deallocation_events` wasn't `Event::Deallocation`") - }; - let Ok(network) = ExternalNetworkId::try_from(*network) else { continue }; - let existing = Stakes::get(&txn, network, *validator).unwrap_or(Amount(0)); - Stakes::set(&mut txn, network, *validator, &Amount(existing.0 - amount.0)); - } - - // Handle decided sets - for event in events.validator_sets().set_decided_events() { - let Event::SetDecided { set, validators } = event else { - panic!("event from `set_decided_events` wasn't `Event::SetDecided`") - }; - - let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue }; - Validators::set( - &mut txn, - set, - &validators.iter().map(|(validator, _key_shares)| *validator).collect(), - ); - } - - // Handle declarations of the latest set - for event in events.validator_sets().set_keys_events() { - let Event::SetKeys { set, key_pair } = event else { - panic!("event from `set_keys_events` wasn't `Event::SetKeys`") - }; - let mut stake = 0; - for validator in - Validators::take(&mut txn, *set).expect("set which wasn't decided set keys") - { - stake += Stakes::get(&txn, set.network, validator).unwrap_or(Amount(0)).0; + for tx_events in events.events() { + for event in tx_events { + match event { + Event::ValidatorSets(event) => match event { + validator_sets::Event::Allocation { validator, network, amount } => { + let Ok(network) = ExternalNetworkId::try_from(*network) else { continue }; + let existing = Stakes::get(&txn, network, *validator).unwrap_or(Amount(0)); + Stakes::set(&mut txn, network, *validator, &Amount(existing.0 + amount.0)); + } + validator_sets::Event::Deallocation { validator, network, amount, timeline: _ } => { + let Ok(network) = ExternalNetworkId::try_from(*network) else { continue }; + let existing = Stakes::get(&txn, network, *validator).unwrap_or(Amount(0)); + Stakes::set(&mut txn, network, *validator, &Amount(existing.0 - amount.0)); + } + validator_sets::Event::SetDecided { set, validators } => { + let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue }; + Validators::set( + &mut txn, + set, + &validators.iter().map(|(validator, _key_shares)| *validator).collect(), + ); + } + validator_sets::Event::SetKeys { set, key_pair } => { + let mut stake = 0; + for validator in + Validators::take(&mut txn, *set).expect("set which wasn't decided set keys") + { + stake += Stakes::get(&txn, set.network, validator).unwrap_or(Amount(0)).0; + } + LatestSet::set( + &mut txn, + set.network, + &Set { session: set.session, key: key_pair.0, stake: Amount(stake) }, + ); + } + _ => continue, + }, + _ => continue, + } } - LatestSet::set( - &mut txn, - set.network, - &Set { session: set.session, key: key_pair.0, stake: Amount(stake) }, - ); } let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn); diff --git a/coordinator/cosign/src/lib.rs b/coordinator/cosign/src/lib.rs index 95d26c65e..2b2b9f0f8 100644 --- a/coordinator/cosign/src/lib.rs +++ b/coordinator/cosign/src/lib.rs @@ -20,7 +20,7 @@ use serai_client_serai::{ }, Block, }, - Serai, State, + Events, Serai, State, }; use serai_db::*; @@ -41,6 +41,44 @@ use delay::LatestCosignedBlockNumber; /// Test helpers and fixtures. pub mod tests; +/// Abstraction over the Serai RPC client so tests can inject custom behaviour. +pub trait SeraiRpc: Clone + Send + Sync + 'static { + /// Return the latest finalized block number. + fn latest_finalized_block_number(&self) -> impl Send + Future>; + + /// Fetch a block by its number. + fn block_by_number( + &self, + block: u64, + ) -> impl Send + Future, String>>; + + /// Fetch all events associated with the provided block hash. + fn events(&self, block: BlockHash) -> impl Send + Future>; +} + +impl SeraiRpc for Arc { + fn latest_finalized_block_number(&self) -> impl Send + Future> { + let serai = self.clone(); + async move { serai.as_ref().latest_finalized_block_number().await.map_err(|e| format!("{e:?}")) } + } + + fn block_by_number( + &self, + block: u64, + ) -> impl Send + Future, String>> { + let serai = self.clone(); + async move { serai.as_ref().block_by_number(block).await.map_err(|e| format!("{e:?}")) } + } + + fn events(&self, block: BlockHash) -> impl Send + Future> { + let serai = self.clone(); + async move { + let events = serai.as_ref().events(block).await.map_err(|e| format!("{e:?}"))?; + Ok(events) + } + } +} + /// A 'global session', defined as all validator sets used for cosigning at a given moment. /// /// We evaluate cosign faults within a global session. This ensures even if cosigners cosign @@ -166,14 +204,14 @@ impl IntakeCosignError { /// If this error is temporal to the local view pub fn temporal(&self) -> bool { match self { - IntakeCosignError::NotYetIndexedBlock | - IntakeCosignError::StaleCosign | - IntakeCosignError::UnrecognizedGlobalSession | - IntakeCosignError::FutureGlobalSession => true, - IntakeCosignError::BeforeGlobalSessionStart | - IntakeCosignError::AfterGlobalSessionEnd | - IntakeCosignError::NonParticipatingNetwork | - IntakeCosignError::InvalidSignature => false, + IntakeCosignError::NotYetIndexedBlock + | IntakeCosignError::StaleCosign + | IntakeCosignError::UnrecognizedGlobalSession + | IntakeCosignError::FutureGlobalSession => true, + IntakeCosignError::BeforeGlobalSessionStart + | IntakeCosignError::AfterGlobalSessionEnd + | IntakeCosignError::NonParticipatingNetwork + | IntakeCosignError::InvalidSignature => false, } } } @@ -181,24 +219,34 @@ impl IntakeCosignError { /// The interface to manage cosigning with. pub struct Cosigning { db: D, + // The task system stops a task once all its handles are dropped. Keep these alive for as long as + // this cosigning service should run. + _task_handles: Vec, } impl Cosigning { + /// Create a cosigning handle using an already-initialized database. + /// + /// This does not spawn any background tasks; use `Cosigning::spawn` for the full service. + pub fn new(db: D) -> Self { + Self { db, _task_handles: vec![] } + } + /// Spawn the tasks to intend and evaluate cosigns. /// /// The database specified must only be used with a singular instance of the Serai network, and /// only used once at any given time. - pub fn spawn( + pub fn spawn( db: D, - serai: Arc, + serai: S, request: R, tasks_to_run_upon_cosigning: Vec, ) -> Self { - let (intend_task, _intend_task_handle) = Task::new(); + let (intend_task, intend_task_handle) = Task::new(); let (evaluator_task, evaluator_task_handle) = Task::new(); let (delay_task, delay_task_handle) = Task::new(); tokio::spawn( (intend::CosignIntendTask { db: db.clone(), serai }) - .continually_run(intend_task, vec![evaluator_task_handle]), + .continually_run(intend_task, vec![evaluator_task_handle.clone()]), ); tokio::spawn( (evaluator::CosignEvaluatorTask { @@ -206,13 +254,13 @@ impl Cosigning { request, last_request_for_cosigns: Instant::now(), }) - .continually_run(evaluator_task, vec![delay_task_handle]), + .continually_run(evaluator_task, vec![delay_task_handle.clone()]), ); tokio::spawn( (delay::CosignDelayTask { db: db.clone() }) .continually_run(delay_task, tasks_to_run_upon_cosigning), ); - Self { db } + Self { db, _task_handles: vec![intend_task_handle, evaluator_task_handle, delay_task_handle] } } /// The latest cosigned block number. diff --git a/coordinator/cosign/src/tests/intend.rs b/coordinator/cosign/src/tests/intend.rs new file mode 100644 index 000000000..56dce680c --- /dev/null +++ b/coordinator/cosign/src/tests/intend.rs @@ -0,0 +1,699 @@ +use std::{ + collections::{HashMap, HashSet}, + fmt::Write as _, + time::Duration, +}; + +use core::future::Future; + +use blake2::{Digest, Blake2b256}; +use k256::{ecdsa::SigningKey as Secp256k1SigningKey, elliptic_curve::sec1::ToEncodedPoint}; +use rand_core::{OsRng, RngCore}; +use schnorrkel::Keypair as Sr25519Keypair; + +use serai_db::{DbTxn, Db as _}; +use serai_task::ContinuallyRan; + +use serai_client_serai::abi::{ + primitives::{ + BlockHash, + address::{SeraiAddress, ExternalAddress}, + balance::{Amount, ExternalBalance}, + coin::ExternalCoin, + crypto::{Public, ExternalKey, KeyPair}, + instructions::{OutInstruction, OutInstructionWithBalance}, + network_id::{ExternalNetworkId, NetworkId}, + validator_sets::{Session, ValidatorSet, ExternalValidatorSet, KeyShares}, + }, + coins, system, validator_sets, Event, +}; + +use crate::{ + intend::{ + BlockEventData, BlockEvents, CosignIntendTask, GlobalSessionsChannel, IntendedCosigns, + ScanCosignFrom, + }, + SeraiRpc, + tests::*, + CosignIntent, GlobalSession, GlobalSessions, GlobalSessionsLastBlock, HasEvents, + LatestGlobalSessionIntended, SubstrateBlockHash, +}; +use serai_substrate_tests::{composition, rpc}; + +fn set_keys_event_with_pair(set: ExternalValidatorSet, key_pair: &KeyPair) -> Event { + Event::ValidatorSets(validator_sets::Event::SetKeys { set, key_pair: key_pair.clone() }) +} + +fn set_keys_event(set: ExternalValidatorSet, key_seed: u8) -> Event { + let key_pair = + KeyPair(Public([key_seed; 32]), ExternalKey(vec![key_seed; 32].try_into().unwrap())); + set_keys_event_with_pair(set, &key_pair) +} + +fn set_decided_event(set: ValidatorSet, validator: SeraiAddress) -> Event { + Event::ValidatorSets(validator_sets::Event::SetDecided { + set, + validators: vec![(validator, KeyShares(1))], + }) +} + +fn allocation_event(validator: SeraiAddress, network: NetworkId, amount: u64) -> Event { + Event::ValidatorSets(validator_sets::Event::Allocation { + validator, + network, + amount: Amount(amount), + }) +} + +fn deallocation_event(validator: SeraiAddress, network: NetworkId, amount: u64) -> Event { + Event::ValidatorSets(validator_sets::Event::Deallocation { + validator, + network, + amount: Amount(amount), + timeline: validator_sets::DeallocationTimeline::Immediate, + }) +} + +fn burn_with_instruction_event(from: SeraiAddress) -> Event { + let address = ExternalAddress::try_from(vec![1u8, 2u8, 3u8]).unwrap(); + Event::Coins(coins::Event::BurnWithInstruction { + from, + instruction: OutInstructionWithBalance { + instruction: OutInstruction::Transfer(address), + balance: ExternalBalance { coin: ExternalCoin::Bitcoin, amount: Amount(1) }, + }, + }) +} + +fn assert_global_session(actual: &GlobalSession, expected: &GlobalSession) { + assert_eq!(actual.start_block_number, expected.start_block_number); + assert_eq!(actual.sets, expected.sets); + assert_eq!(actual.keys, expected.keys); + assert_eq!(actual.stakes, expected.stakes); + assert_eq!(actual.total_stake, expected.total_stake); +} + +#[tokio::test] +async fn intend_returns_false_with_no_blocks() { + init_logger(); + + let mut env = TestEnvironment::new(); + + let mut task = env.into_task(); + assert_eq!(task.run_iteration().await.unwrap(), false); +} + +#[tokio::test] +async fn intend_returns_true_with_linear_blocks() { + init_logger(); + + let mut env = TestEnvironment::new(); + + let block1_hash = env.serai.make_block(1); + env.serai.new_events(block1_hash); + + let mut task = env.into_task(); + // Returns true with one block + assert_eq!(task.run_iteration().await.unwrap(), true); + + let block2_hash = env.serai.make_block(2); + env.serai.new_events(block2_hash); + + let block3_hash = env.serai.make_block(3); + env.serai.new_events(block3_hash); + + let mut task = env.into_task(); + // Returns true with sequence of blocks + assert_eq!(task.run_iteration().await.unwrap(), true); +} + +#[tokio::test] +async fn intend_errors_if_chain_is_not_linear() { + init_logger(); + + let mut env = TestEnvironment::new(); + + let block1_hash = env.serai.make_block(1); + env.serai.new_events(block1_hash); + + // Block #2 does not build upon block #1 + env.serai.builds_upon = IncrementalUnbalancedMerkleTree::new(); + + let block2_hash = env.serai.make_block(2); + env.serai.new_events(block2_hash); + + let mut task = env.into_task(); + let err = task.run_iteration().await.unwrap_err(); + assert!(err.contains("doesn't build upon"), "{err}"); +} + +#[tokio::test] +async fn intend_errors_if_block_not_found() { + init_logger(); + + let mut env = TestEnvironment::new(); + + // Make block 1 exist in terms of finalization, but return None when fetched + let _block1_hash = env.serai.make_block(1); + env.serai.set_block_not_found(1); + + let mut task = env.into_task(); + let err = task.run_iteration().await.unwrap_err(); + assert!( + err.contains("couldn't get block which should've been finalized"), + "unexpected error: {err}" + ); +} + +#[tokio::test] +async fn intend_handles_blocks_with_no_events() { + init_logger(); + + let mut env = TestEnvironment::new(); + + let block1_hash = env.serai.make_block(1); + env.serai.new_events(block1_hash); + + let mut task = env.into_task(); + + task.run_iteration().await; + + let mut txn = env.db.txn(); + + let block = BlockEvents::try_recv(&mut txn).unwrap(); + assert_eq!(block.block_number, 1); + assert!(matches!(block.has_events, HasEvents::No)); + + txn.commit(); +} + +#[tokio::test] +async fn intend_errors_if_notable_block_has_no_stake() { + init_logger(); + + let mut env = TestEnvironment::new(); + + let validator = SeraiAddress([7u8; 32]); + + let block1_hash = env.serai.make_block(1); + + let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + let vset0 = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; + + env + .serai + .set_events(block1_hash, vec![set_decided_event(vset0, validator), set_keys_event(set0, 1)]); + + let mut task = env.into_task(); + let err = task.run_iteration().await.unwrap_err(); + assert!(err.contains("had 0 stake"), "{err}"); +} + +#[tokio::test] +async fn intend_task_indexes_blocks_and_emits_events_and_intents() { + init_logger(); + + let mut env = TestEnvironment::new(); + + let validator = SeraiAddress([7u8; 32]); + + let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + let set1 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(1) }; + + let vset0 = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; + let vset1 = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(1) }; + + let block1_hash = env.serai.make_block(1); + env.serai.set_events( + block1_hash, + vec![ + allocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 10), + set_decided_event(vset0, validator), + set_keys_event(set0, 1), + ], + ); + + let block2_hash = env.serai.make_block(2); + env + .serai + .set_events(block2_hash, vec![set_decided_event(vset1, validator), set_keys_event(set1, 2)]); + + let mut task = env.into_task(); + assert_eq!(task.run_iteration().await.unwrap(), true); + + assert_eq!(ScanCosignFrom::get(&env.db), Some(3u64)); + + let mut txn = env.db.txn(); + + // BlockEvents: block 1 is notable but has no prior global session, so it's treated as `No`. + + let first = BlockEvents::try_recv(&mut txn).expect("expected block 1 event"); + assert_eq!(first.block_number, 1); + assert!(matches!(first.has_events, HasEvents::No)); + + let second = BlockEvents::try_recv(&mut txn).expect("expected block 2 event"); + assert_eq!(second.block_number, 2); + assert!(matches!(second.has_events, HasEvents::Notable)); + + assert!(BlockEvents::try_recv(&mut txn).is_none()); + + // Global sessions were created at both notable blocks. + let (session1_id, session1) = { + let first = GlobalSessionsChannel::try_recv(&mut txn).expect("expected first global session"); + first + }; + assert_eq!(session1.start_block_number, 2); + assert_eq!(session1.sets, vec![set0]); + + let (session2_id, session2) = { + let second = GlobalSessionsChannel::try_recv(&mut txn).expect("expected second global session"); + second + }; + assert_ne!(session1_id, session2_id); + assert_eq!(session2.start_block_number, 3); + assert_eq!(session2.sets, vec![set1]); + + // Block 2 should be intended for cosigning by the prior global session. + + let intent = IntendedCosigns::try_recv(&mut txn, set0).expect("expected cosign intent for set0"); + + assert_eq!(intent.global_session, session1_id); + assert_eq!(intent.block_number, 2); + assert_eq!(intent.block_hash, block2_hash); + assert!(intent.notable); + + // No additional intents should exist. + + assert!(IntendedCosigns::try_recv(&mut txn, set0).is_none()); + + txn.commit(); +} + +#[tokio::test] +async fn intend_emits_non_notable_for_burn_with_instruction() { + init_logger(); + + let mut env = TestEnvironment::new(); + + let validator = SeraiAddress([7u8; 32]); + + let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + let vset0 = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; + + let block1_hash = env.serai.make_block(1); + env.serai.set_events( + block1_hash, + vec![ + allocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 10), + set_decided_event(vset0, validator), + set_keys_event(set0, 1), + ], + ); + + let block2_hash = env.serai.make_block(2); + env.serai.set_events(block2_hash, vec![burn_with_instruction_event(validator)]); + + let mut task = env.into_task(); + assert_eq!(task.run_iteration().await.unwrap(), true); + + let mut txn = env.db.txn(); + let _b1 = BlockEvents::try_recv(&mut txn).unwrap(); + let b2 = BlockEvents::try_recv(&mut txn).unwrap(); + assert_eq!(b2.block_number, 2); + assert!(matches!(b2.has_events, HasEvents::NonNotable)); + txn.commit(); +} + +#[tokio::test] +async fn intend_updates_stakes_and_ignores_non_external_network_events() { + init_logger(); + + let mut env = TestEnvironment::new(); + + let validator = SeraiAddress([7u8; 32]); + + let block1_hash = env.serai.make_block(1); + + let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + let vset0 = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; + let vset_serai = ValidatorSet { network: NetworkId::Serai, session: Session(9) }; + + env.serai.set_events( + block1_hash, + vec![ + // Non-ValidatorSets event to exercise the `continue` branch in stake processing. + Event::System(system::Event::TransactionSuccess), + // Stakes for the external network. + allocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 10), + deallocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 3), + // These are ignored as they're not external networks. + allocation_event(validator, NetworkId::Serai, 999), + deallocation_event(validator, NetworkId::Serai, 999), + // Decide the external set. + set_decided_event(vset0, validator), + // This SetDecided can't be converted to an external set and is ignored. + Event::ValidatorSets(validator_sets::Event::AcceptedHandover { set: vset_serai }), + Event::ValidatorSets(validator_sets::Event::SetDecided { + set: vset_serai, + validators: vec![], + }), + // Set keys for the external set. + set_keys_event(set0, 1), + ], + ); + + let mut task = env.into_task(); + assert_eq!(task.run_iteration().await.unwrap(), true); + + let (_id, info) = { + let mut txn = env.db.txn(); + let res = GlobalSessionsChannel::try_recv(&mut txn).expect("expected a global session"); + txn.commit(); + res + }; + assert_eq!(info.start_block_number, 2); + assert_eq!(info.stakes.get(&ExternalNetworkId::Bitcoin), Some(&7)); + assert_eq!(info.total_stake, 7); +} + +fn random_serai_address(rng: &mut OsRng) -> SeraiAddress { + SeraiAddress(Sr25519Keypair::generate_with(&mut *rng).public.to_bytes()) +} + +fn random_crypto_key_pair(rng: &mut OsRng) -> KeyPair { + let substrate_public = Sr25519Keypair::generate_with(&mut *rng).public.to_bytes(); + let external_point = + Secp256k1SigningKey::random(&mut *rng).verifying_key().to_encoded_point(true); + let external_key = + ExternalKey(external_point.as_bytes().to_vec().try_into().expect("compressed key fits")); + KeyPair(Public(substrate_public), external_key) +} + +#[tokio::test] +async fn intend_fuzz_test() { + init_logger(); + + let mut rng = &mut OsRng; + + for iteration in 0..3 { + let network = ExternalNetworkId::Bitcoin; + let validators: Vec = (0..3).map(|_| random_serai_address(rng)).collect(); + + let mut serai = Serai::default(); + + let mut stake_tracker: HashMap = HashMap::new(); + let mut next_session: u32 = 0; + let mut stakes: HashMap<(ExternalNetworkId, SeraiAddress), u64> = HashMap::new(); + let mut decided_validators: HashMap> = HashMap::new(); + let mut latest_set: HashMap = HashMap::new(); + + let mut expected_block_events = vec![]; + let mut expected_sessions: Vec<([u8; 32], GlobalSession)> = vec![]; + let mut expected_session_last_block: HashMap<[u8; 32], u64> = HashMap::new(); + let mut expected_latest_global_session: Option<[u8; 32]> = None; + let mut expected_intents: HashMap> = HashMap::new(); + + let block_count = 8 + (rng.next_u32() % 5) as u64; + let forced_new_session_block = block_count / 2; + let forced_burn_after_new_session_block = forced_new_session_block + 1; + + for block_number in 1..=block_count { + let mut planned_stakes = stake_tracker.clone(); + let mut tx_events = vec![]; + + let adjustments = usize::try_from(rng.next_u32() % 3).unwrap_or(0); + for _ in 0..adjustments { + let should_allocate = + (rng.next_u32() % 2 == 0) || planned_stakes.values().all(|stake| *stake == 0); + + if should_allocate { + let validator = validators[(rng.next_u32() as usize) % validators.len()]; + let amount = (rng.next_u64() % 10) + 1; + *planned_stakes.entry(validator).or_default() += amount; + + tx_events.push(allocation_event(validator, NetworkId::External(network), amount)); + } else { + let available: Vec<_> = validators + .iter() + .copied() + .filter(|validator| planned_stakes.get(validator).copied().unwrap_or(0) > 0) + .collect(); + if let Some(validator) = available.get((rng.next_u32() as usize) % available.len()) { + let validator = *validator; + let current = planned_stakes[&validator]; + let amount = (rng.next_u64() % current).saturating_add(1); + planned_stakes.insert(validator, current - amount); + + tx_events.push(deallocation_event(validator, NetworkId::External(network), amount)); + } + } + } + + let include_burn = block_number != 1 + && ((block_number == 2) + || (block_number == forced_burn_after_new_session_block) + || (rng.next_u32() % 3 == 0)); + + let is_initial_session = block_number == 1; // Session 0 + let is_random_session = (block_number > 2 && (rng.next_u32() % 10 == 0)); // 10% chance + let will_create_new_session = + (block_number == forced_new_session_block) || is_initial_session || is_random_session; + + if will_create_new_session { + let validator = { + let mut available = validators + .iter() + .copied() + .filter(|validator| planned_stakes.get(validator).copied().unwrap_or(0) > 0) + .collect::>(); + + if available.is_empty() { + let validator = validators[(rng.next_u32() as usize) % validators.len()]; + let top_up = (rng.next_u64() % 10) + 1; + + *planned_stakes.entry(validator).or_default() += top_up; + + tx_events.push(allocation_event(validator, NetworkId::External(network), top_up)); + validator + } else { + available[(rng.next_u32() as usize) % available.len()] + } + }; + + let session = Session(next_session.try_into().unwrap()); + let set = ExternalValidatorSet { network, session }; + let vset = ValidatorSet { network: NetworkId::External(network), session }; + + tx_events.push(set_decided_event(vset, validator)); + let key_pair = random_crypto_key_pair(rng); + + tx_events.push(set_keys_event_with_pair(set, &key_pair)); + next_session = next_session.saturating_add(1); + } + + if include_burn { + let burn_from = validators[(rng.next_u32() as usize) % validators.len()]; + + tx_events.push(burn_with_instruction_event(burn_from)); + } + + let block_hash = serai.make_block(block_number); + + serai.set_events(block_hash, tx_events.clone()); + + let mut has_set_keys = false; + let mut has_burn = false; + + for event in tx_events { + match event { + Event::ValidatorSets(validator_sets::Event::Allocation { + validator, + network, + amount, + }) => { + let Ok(network) = ExternalNetworkId::try_from(network) else { continue }; + let key = (network, validator); + *stakes.entry(key).or_default() += amount.0; + } + Event::ValidatorSets(validator_sets::Event::Deallocation { + validator, + network, + amount, + timeline: _, + }) => { + let Ok(network) = ExternalNetworkId::try_from(network) else { continue }; + let key = (network, validator); + let stake = stakes.get_mut(&key).expect("deallocating missing stake"); + assert!(*stake >= amount.0, "deallocation underflow in expected model"); + *stake -= amount.0; + } + Event::ValidatorSets(validator_sets::Event::SetDecided { + set, + validators: event_validators, + }) => { + let Ok(set) = ExternalValidatorSet::try_from(set) else { continue }; + decided_validators + .insert(set, event_validators.iter().map(|(validator, _)| *validator).collect()); + } + Event::ValidatorSets(validator_sets::Event::SetKeys { set, key_pair }) => { + has_set_keys = true; + let validators = + decided_validators.remove(&set).expect("set which wasn't decided set keys"); + let mut total_stake = 0; + for validator in validators { + total_stake += stakes.get(&(set.network, validator)).copied().unwrap_or(0); + } + latest_set.insert(set.network, (set.session, key_pair.0, total_stake)); + } + Event::Coins(coins::Event::BurnWithInstruction { .. }) => has_burn = true, + _ => {} + } + } + + let mut has_events = if has_set_keys { + HasEvents::Notable + } else if has_burn { + HasEvents::NonNotable + } else { + HasEvents::No + }; + + let global_session_for_this_block = expected_latest_global_session; + + if has_events == HasEvents::Notable { + let mut sets = vec![]; + let mut keys = HashMap::new(); + let mut session_stakes = HashMap::new(); + let mut total_stake = 0; + + for network in ExternalNetworkId::all() { + if let Some((session, key, stake)) = latest_set.get(&network).copied() { + let set = ExternalValidatorSet { network, session }; + sets.push(set); + keys.insert(network, key); + session_stakes.insert(network, stake); + total_stake += stake; + } + } + + assert!(total_stake > 0, "cosigning sets for block #{block_number} had 0 stake in total"); + + let global_session = GlobalSession { + start_block_number: block_number + 1, + sets: sets.clone(), + keys, + stakes: session_stakes, + total_stake, + }; + let session_id = GlobalSession::id(sets); + if let Some(existing) = global_session_for_this_block { + expected_session_last_block.insert(existing, block_number); + } + expected_latest_global_session = Some(session_id); + expected_sessions.push((session_id, global_session)); + } + + if global_session_for_this_block.is_none() { + has_events = HasEvents::No; + } + + if matches!(has_events, HasEvents::Notable | HasEvents::NonNotable) { + if let Some(global_session) = global_session_for_this_block { + let session = expected_sessions + .iter() + .find(|(session_id, _)| *session_id == global_session) + .map(|(_, session)| session) + .expect("global session missing from expected state"); + for set in &session.sets { + let intent = CosignIntent { + global_session, + block_number, + block_hash, + notable: has_events == HasEvents::Notable, + }; + expected_intents.entry(*set).or_default().push(intent); + } + } + } + + expected_block_events.push(BlockEventData { block_number, has_events }); + + stake_tracker = planned_stakes; + } + + let blocks_by_number = serai.blocks_by_number.clone(); + + let mut env = TestEnvironment::from_serai(serai); + + let mut task = env.into_task(); + + task.run_iteration().await.unwrap(); + + let latest_block = block_count; + + let scan_cosign_from = ScanCosignFrom::get(&env.db); + assert_eq!(scan_cosign_from, Some(latest_block + 1)); + + let mut txn = env.db.txn(); + + let mut block_events = vec![]; + while let Some(event) = BlockEvents::try_recv(&mut txn) { + block_events.push(event); + } + + assert_eq!(block_events.len(), expected_block_events.len()); + + for (idx, (actual, expected)) in block_events.iter().zip(&expected_block_events).enumerate() { + assert_eq!(actual.block_number, expected.block_number); + assert_eq!(actual.has_events, expected.has_events); + } + + for (block_number, block) in &blocks_by_number { + let stored_hash = SubstrateBlockHash::get(&txn, *block_number); + let expected_hash = Some(block.header.hash()); + assert_eq!(stored_hash, expected_hash); + } + + let mut sessions_from_channel = vec![]; + while let Some(entry) = GlobalSessionsChannel::try_recv(&mut txn) { + sessions_from_channel.push(entry); + } + + assert_eq!(sessions_from_channel.len(), expected_sessions.len()); + for (idx, ((actual_id, actual_session), (expected_id, expected_session))) in + sessions_from_channel.iter().zip(&expected_sessions).enumerate() + { + assert_eq!(actual_id, expected_id); + assert_global_session(actual_session, expected_session); + } + + for (session_id, expected_session) in &expected_sessions { + let stored = GlobalSessions::get(&txn, *session_id).expect("missing stored global session"); + assert_global_session(&stored, expected_session); + let expected_last_block = expected_session_last_block.get(session_id).copied(); + let stored_last_block = GlobalSessionsLastBlock::get(&txn, *session_id); + assert_eq!(stored_last_block, expected_last_block); + } + let latest_intended = LatestGlobalSessionIntended::get(&txn); + assert_eq!(latest_intended, expected_latest_global_session); + + let all_sets: HashSet<_> = + expected_sessions.iter().flat_map(|(_, session)| session.sets.iter().copied()).collect(); + + for set in all_sets { + let mut actual = vec![]; + while let Some(intent) = IntendedCosigns::try_recv(&mut txn, set) { + actual.push(intent); + } + let expected = expected_intents.get(&set).cloned().unwrap_or_default(); + assert_eq!(actual, expected, "intents mismatch for set {:?}", set); + } + + txn.commit(); + } +} diff --git a/coordinator/cosign/src/tests/mod.rs b/coordinator/cosign/src/tests/mod.rs index cabb3e42d..eceddb9b2 100644 --- a/coordinator/cosign/src/tests/mod.rs +++ b/coordinator/cosign/src/tests/mod.rs @@ -1,2 +1,239 @@ #[cfg(test)] mod delay; + +#[cfg(test)] +mod intend; + +use blake2::{Digest, Blake2b256}; +use core::future::Future; +use std::{ + collections::{HashMap, HashSet}, + sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering}, + Arc, OnceLock, + }, +}; + +use rand_core::{OsRng, RngCore}; + +use schnorrkel::{ExpansionMode, Keypair, MiniSecretKey}; + +use serai_client_serai::{ + abi::{ + primitives::{ + crypto::Public, + merkle::{IncrementalUnbalancedMerkleTree, UnbalancedMerkleTree}, + network_id::ExternalNetworkId, + BlockHash, + }, + Block, Event, Header, HeaderV1, BLOCK_HEADER_BRANCH_TAG, BLOCK_HEADER_LEAF_TAG, + }, + Events, +}; + +use crate::{ + SeraiRpc, + intend::{CosignIntendTask}, + COSIGN_CONTEXT, Cosign, SignedCosign, +}; +use serai_db::MemDb; + +struct TestLogger; + +static LOG_ENABLED: AtomicBool = AtomicBool::new(true); + +impl log::Log for TestLogger { + fn enabled(&self, _metadata: &log::Metadata<'_>) -> bool { + LOG_ENABLED.load(Ordering::Relaxed) + } + + fn log(&self, _record: &log::Record<'_>) {} + + fn flush(&self) {} +} + +fn init_logger() { + static LOGGER: TestLogger = TestLogger; + static INIT: OnceLock<()> = OnceLock::new(); + INIT.get_or_init(|| { + let _ = log::set_logger(&LOGGER); + log::set_max_level(log::LevelFilter::Trace); + }); +} + +pub(crate) fn cosign_fixture(seed: [u8; 32], cosigner: ExternalNetworkId) -> Cosign { + let block_number = u64::from_le_bytes(seed[..8].try_into().unwrap()); + let block_hash = seed.map(|b| b ^ 0xAA); + + Cosign { global_session: seed, block_number, block_hash: BlockHash(block_hash), cosigner } +} + +pub(crate) fn keypair_from_seed(seed: [u8; 32]) -> Keypair { + MiniSecretKey::from_bytes(&seed) + .expect("test seeds should always create a keypair") + .expand_to_keypair(ExpansionMode::Uniform) +} + +pub(crate) fn sr25519_fixture() -> schnorrkel::Keypair { + let mut seed = [0u8; 32]; + + loop { + OsRng.fill_bytes(&mut seed); + if let Ok(mini) = schnorrkel::MiniSecretKey::from_bytes(&seed) { + let keypair = mini.expand_to_keypair(schnorrkel::ExpansionMode::Ed25519); + break keypair; + } + } +} + +pub(crate) fn sign_cosign(cosign: Cosign, keypair: &schnorrkel::Keypair) -> SignedCosign { + let sig = keypair.sign_simple(COSIGN_CONTEXT, &cosign.signature_message()); + SignedCosign { cosign, signature: sig.to_bytes() } +} + +pub(crate) fn signed_cosign_fixture( + seed: [u8; 32], + cosigner: ExternalNetworkId, +) -> (SignedCosign, Public) { + let cosign = cosign_fixture(seed, cosigner); + let keypair = keypair_from_seed(seed.map(|b| b ^ 0x55)); + let signature = keypair.sign_simple(COSIGN_CONTEXT, &cosign.signature_message()); + + (SignedCosign { cosign, signature: signature.to_bytes() }, Public(keypair.public.to_bytes())) +} + +#[derive(Clone)] +pub(crate) struct Serai { + pub(crate) block_by_number_error: Option, + pub(crate) events_error: Option, + pub(crate) blocks_by_number: HashMap, + pub(crate) events_by_hash: HashMap, + pub(crate) builds_upon: IncrementalUnbalancedMerkleTree, + pub(crate) missing_blocks: HashSet, +} + +impl Default for Serai { + fn default() -> Self { + Self { + block_by_number_error: None, + events_error: None, + blocks_by_number: HashMap::new(), + events_by_hash: HashMap::new(), + builds_upon: IncrementalUnbalancedMerkleTree::new(), + missing_blocks: HashSet::new(), + } + } +} + +impl Serai { + pub(crate) fn new() -> Self { + Self::default() + } + + pub(crate) fn set_block_not_found(&mut self, block_number: u64) { + self.missing_blocks.insert(block_number); + } + + pub(crate) fn make_block(&mut self, number: u64) -> BlockHash { + let block = Block { + header: Header::V1(HeaderV1 { + number, + builds_upon: self.builds_upon.clone().calculate(BLOCK_HEADER_BRANCH_TAG), + unix_time_in_millis: 0, + transactions_commitment: UnbalancedMerkleTree::EMPTY, + events_commitment: UnbalancedMerkleTree::EMPTY, + consensus_commitment: [0; 32], + }), + transactions: vec![], + }; + + let block_hash = block.header.hash(); + self.builds_upon.append( + BLOCK_HEADER_BRANCH_TAG, + Blake2b256::new_with_prefix([BLOCK_HEADER_LEAF_TAG]) + .chain_update(block_hash.0) + .finalize() + .into(), + ); + + if number > 0u64 { + self.blocks_by_number.insert(number, block); + } + + block_hash + } + + pub(crate) fn new_events(&mut self, block_hash: BlockHash) { + self.events_by_hash = HashMap::from([(block_hash, Events::new())]); + } + + pub(crate) fn set_events(&mut self, block_hash: BlockHash, events: Vec) { + self.events_by_hash.insert(block_hash, Events::with(events)); + } + + pub(crate) fn builds_upon(&self) -> &IncrementalUnbalancedMerkleTree { + &self.builds_upon + } +} + +impl SeraiRpc for Serai { + fn latest_finalized_block_number(&self) -> impl Send + Future> { + let latest = self.blocks_by_number.keys().copied().max().unwrap_or(0); + async move { Ok(latest) } + } + + fn block_by_number( + &self, + block: u64, + ) -> impl Send + Future, String>> { + let err = self.block_by_number_error.clone(); + let block_entry = self.blocks_by_number.get(&block).cloned(); + let is_missing = self.missing_blocks.contains(&block); + + async move { + if let Some(e) = err { + return Err(e); + } + if is_missing { + return Ok(None); + } + Ok(block_entry) + } + } + + fn events(&self, block: BlockHash) -> impl Send + Future> { + let err = self.events_error.clone(); + let events = self.events_by_hash.get(&block).cloned().unwrap_or_default(); + async move { + if let Some(e) = err { + return Err(e); + } + Ok(events) + } + } +} + +pub(crate) struct TestEnvironment { + pub(crate) serai: Serai, + pub(crate) db: MemDb, +} + +impl Default for TestEnvironment { + fn default() -> Self { + Self { serai: Serai::new(), db: MemDb::new() } + } +} + +impl TestEnvironment { + pub(crate) fn new() -> Self { + Self::default() + } + + pub(crate) fn from_serai(serai: Serai) -> Self { + Self { serai, db: MemDb::new() } + } + + pub(crate) fn into_task(&self) -> CosignIntendTask { + CosignIntendTask { db: self.db.clone(), serai: self.serai.clone() } + } +} diff --git a/substrate/client/serai/src/lib.rs b/substrate/client/serai/src/lib.rs index 0b205a7ef..23bbb5936 100644 --- a/substrate/client/serai/src/lib.rs +++ b/substrate/client/serai/src/lib.rs @@ -227,7 +227,23 @@ impl Serai { } } +impl Default for Events { + fn default() -> Self { + Events { events: Arc::new(vec![vec![]]) } + } +} + impl Events { + /// Create an instance of Events + pub fn new() -> Self { + Events::default() + } + + /// Create an instance of Events + pub fn with(events: Vec) -> Self { + Events { events: Arc::new(vec![events]) } + } + /// The events within this container. /// /// This will yield the events for each transaction within the block, including the implicit From d45d5e7c72f630e7e78a19b834dd3b38da6907ac Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Tue, 23 Dec 2025 10:36:15 -0300 Subject: [PATCH 03/71] feat(cosign): improve delay iteration txn use, sanity check timestamp, avoid overflow panic --- coordinator/cosign/src/delay.rs | 43 +++-- coordinator/cosign/src/tests/delay.rs | 238 ++++++++++++++++++++------ 2 files changed, 212 insertions(+), 69 deletions(-) diff --git a/coordinator/cosign/src/delay.rs b/coordinator/cosign/src/delay.rs index f0789a80e..41fcf6fd8 100644 --- a/coordinator/cosign/src/delay.rs +++ b/coordinator/cosign/src/delay.rs @@ -2,7 +2,7 @@ use core::future::Future; use std::time::{Duration, SystemTime}; use serai_db::*; -use serai_task::{DoesNotError, ContinuallyRan}; +use serai_task::ContinuallyRan; use crate::evaluator::CosignedBlocks; @@ -12,6 +12,10 @@ const SYNCHRONY_EXPECTATION: Duration = Duration::from_secs(10); pub(crate) const ACKNOWLEDGEMENT_DELAY: Duration = Duration::from_secs(BROADCAST_FREQUENCY.as_secs() + SYNCHRONY_EXPECTATION.as_secs()); +pub(crate) fn now_timestamp() -> Duration { + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap_or(Duration::ZERO) +} + create_db!( SubstrateCosignDelay { // The latest cosigned block number. @@ -25,34 +29,49 @@ pub(crate) struct CosignDelayTask { } impl ContinuallyRan for CosignDelayTask { - type Error = DoesNotError; + type Error = String; fn run_iteration(&mut self) -> impl Send + Future> { async move { let mut made_progress = false; loop { let mut txn = self.db.txn(); + let cosigned_block = CosignedBlocks::try_recv(&mut txn); + txn.commit(); - // Receive the next block to mark as cosigned - let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else { + let Some((block_number, time_evaluated)) = cosigned_block else { break; }; - // If we've already acknowledged a later block, don't regress (and don't wait). - let already_cosigned = LatestCosignedBlockNumber::get(&txn).unwrap_or(0); + if block_number == 0u64 { + return Ok(false); + } + + // If we've already acknowledged a later block, consume and skip (don't wait). + let already_cosigned = LatestCosignedBlockNumber::get(&self.db).unwrap_or(0); if block_number <= already_cosigned { - txn.commit(); made_progress = true; continue; } - // Calculate when we should mark it as valid - let time_valid = Duration::from_secs(time_evaluated) + ACKNOWLEDGEMENT_DELAY; + // Calculate when we should mark it as valid, checking for overflow to avoid panic + let time_evaluated_duration = Duration::from_secs(time_evaluated); + let Some(time_valid) = time_evaluated_duration.checked_add(ACKNOWLEDGEMENT_DELAY) else { + return Err(format!( + "time_evaluated ({time_evaluated}) would overflow when adding ACKNOWLEDGEMENT_DELAY" + )); + }; + let now = now_timestamp(); - // Sleep until then - tokio::time::sleep(time_valid).await; + // If the time valid is greater than the current time, + // sleep until the time valid is reached + if time_valid > now { + // Sleep until then (no transaction held during sleep) + tokio::time::sleep(time_valid.saturating_sub(now)).await; + } - // Set the cosigned block + // Atomically consume the message AND update the cosigned block number + let mut txn = self.db.txn(); LatestCosignedBlockNumber::set(&mut txn, &block_number); txn.commit(); diff --git a/coordinator/cosign/src/tests/delay.rs b/coordinator/cosign/src/tests/delay.rs index 2f2928b5b..25f92ce56 100644 --- a/coordinator/cosign/src/tests/delay.rs +++ b/coordinator/cosign/src/tests/delay.rs @@ -1,111 +1,235 @@ -use crate::LatestCosignedBlockNumber; -use crate::delay::{ACKNOWLEDGEMENT_DELAY, CosignDelayTask}; -use std::{ - sync::OnceLock, - time::{Duration, SystemTime}, +use std::time::{Duration, SystemTime}; + +use crate::{ + LatestCosignedBlockNumber, delay::ACKNOWLEDGEMENT_DELAY, evaluator::CosignedBlocks, tests::Test, }; -use serai_db::*; +use serai_db::{Db as _, DbTxn as _}; use serai_task::ContinuallyRan; -use crate::evaluator::CosignedBlocks; +fn now_timestamp() -> u64 { + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap_or(Duration::ZERO).as_secs() +} + +impl Test { + // Assert CosignedBlocks queue items have been consumed after task run + fn assert_queue_empty(&self) { + assert!(CosignedBlocks::peek(&self.db).is_none(), "expected queue to be empty"); + } + + // Assert LatestCosignedBlockNumber db points to latest block number after task run + fn assert_latest_cosigned_block_number(&self, block_number: Option) { + assert_eq!(LatestCosignedBlockNumber::get(&self.db), block_number); + } -use serai_db::{Db as _, MemDb}; + // Assert everything that changed or should have changed after a task iteration run + fn assert_task_iteration(&self, latest_cosigned_block_number: Option) { + self.assert_latest_cosigned_block_number(latest_cosigned_block_number); + self.assert_queue_empty(); + } +} #[tokio::test] async fn delay_task_returns_false_with_no_messages() { - let db = MemDb::new(); - let mut task = CosignDelayTask { db }; - assert_eq!(task.run_iteration().await.unwrap(), false); + let mut task = Test::new().into_delay_task(); + Test::assert_task_made_progress(&mut task, false).await; } #[tokio::test] async fn delay_task_updates_latest_cosigned_block_number() { - let mut db = MemDb::new(); + let mut test = Test::new(); { - let mut txn = db.txn(); - CosignedBlocks::send(&mut txn, &(7u64, 0u64)); + let mut txn = test.db.txn(); + CosignedBlocks::send(&mut txn, &(2u64, now_timestamp())); txn.commit(); } - let mut task = CosignDelayTask { db: db.clone() }; - assert_eq!(task.run_iteration().await.unwrap(), true); - assert_eq!(LatestCosignedBlockNumber::get(&db), Some(7u64)); + let mut task = test.into_delay_task(); + Test::assert_task_made_progress(&mut task, true).await; + + test.assert_task_iteration(Some(2u64)); } #[tokio::test] async fn delay_task_drains_multiple_messages_in_one_iteration() { - let mut db = MemDb::new(); + let mut test = Test::new(); + let now = now_timestamp(); { - let mut txn = db.txn(); - CosignedBlocks::send(&mut txn, &(1u64, 0u64)); - CosignedBlocks::send(&mut txn, &(2u64, 0u64)); - CosignedBlocks::send(&mut txn, &(3u64, 0u64)); + let mut txn = test.db.txn(); + CosignedBlocks::send(&mut txn, &(1u64, now)); + CosignedBlocks::send(&mut txn, &(2u64, now)); + CosignedBlocks::send(&mut txn, &(3u64, now)); txn.commit(); } - let mut task = CosignDelayTask { db: db.clone() }; - assert_eq!(task.run_iteration().await.unwrap(), true); - assert_eq!(LatestCosignedBlockNumber::get(&db), Some(3u64)); - assert!(CosignedBlocks::peek(&db).is_none()); + let mut task = test.into_delay_task(); + Test::assert_task_made_progress(&mut task, true).await; + + test.assert_task_iteration(Some(3u64)); } -#[tokio::test(start_paused = true)] +#[tokio::test] async fn delay_task_does_not_regress_and_skips_wait_for_stale_messages() { - let mut db = MemDb::new(); + let mut test = Test::new(); + let now = now_timestamp(); + + { + let mut txn = test.db.txn(); + CosignedBlocks::send(&mut txn, &(1u64, now)); + CosignedBlocks::send(&mut txn, &(2u64, now)); + CosignedBlocks::send(&mut txn, &(4u64, now)); + txn.commit(); + } { - let mut txn = db.txn(); - LatestCosignedBlockNumber::set(&mut txn, &10u64); + let mut txn = test.db.txn(); + CosignedBlocks::send(&mut txn, &(3u64, now)); txn.commit(); } - let now_secs = - SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap_or(Duration::ZERO).as_secs(); + let mut task = test.into_delay_task(); + Test::assert_task_made_progress(&mut task, true).await; + + // Queue order: 1, 2, 4, 3 + // Block 1 processed (1 > 0), Block 2 processed (2 > 1), + // Block 4 processed (4 > 2), Block 3 skipped (3 <= 4) + test.assert_task_iteration(Some(4u64)); +} + +#[tokio::test(flavor = "multi_thread")] +async fn delay_task_does_not_ack_before_acknowledgement_delay() { + let mut test = Test::new(); + let now = now_timestamp(); { - let mut txn = db.txn(); - CosignedBlocks::send(&mut txn, &(9u64, now_secs)); + let mut txn = test.db.txn(); + CosignedBlocks::send(&mut txn, &(1u64, now)); txn.commit(); } - let mut task = CosignDelayTask { db: db.clone() }; + let mut task = test.into_delay_task(); let handle = tokio::spawn(async move { task.run_iteration().await.unwrap() }); - tokio::task::yield_now().await; - assert_eq!(handle.await.unwrap(), true); - assert_eq!(LatestCosignedBlockNumber::get(&db), Some(10u64)); - assert!(CosignedBlocks::peek(&db).is_none()); + // Give the task a moment to start and reach the sleep + tokio::time::sleep(Duration::from_millis(50)).await; + test.assert_latest_cosigned_block_number(None); + + // Sleep for most of (but not all) the acknowledgement delay - should still not be set + tokio::time::sleep(ACKNOWLEDGEMENT_DELAY - Duration::from_secs(1)).await; + test.assert_latest_cosigned_block_number(None); + + // Wait for the task to complete + let result = handle.await.unwrap(); + + assert_eq!(result, true); + test.assert_task_iteration(Some(1u64)); } -#[tokio::test(start_paused = true)] -async fn delay_task_does_not_ack_before_acknowledgement_delay() { - let mut db = MemDb::new(); +#[tokio::test] +async fn delay_task_with_zero_timestamp_processes_immediately() { + let mut test = Test::new(); + + { + let mut txn = test.db.txn(); + CosignedBlocks::send(&mut txn, &(1u64, 0u64)); + txn.commit(); + } + + let mut task = test.into_delay_task(); + // This should complete immediately without sleeping + // Since now > 0 + ACKNOWLEDGEMENT_DELAY, + // time_valid < now (already valid), so no sleep occurs + Test::assert_task_made_progress(&mut task, true).await; - let now_secs = - SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap_or(Duration::ZERO).as_secs(); + test.assert_task_iteration(Some(1u64)); +} + +#[tokio::test] +async fn delay_task_with_max_timestamp_returns_error() { + let mut test = Test::new(); { - let mut txn = db.txn(); - CosignedBlocks::send(&mut txn, &(7u64, now_secs)); + let mut txn = test.db.txn(); + CosignedBlocks::send(&mut txn, &(1u64, u64::MAX)); txn.commit(); } - let mut task = CosignDelayTask { db: db.clone() }; - let handle = tokio::spawn(async move { task.run_iteration().await.unwrap() }); + let mut task = test.into_delay_task(); + let result = task.run_iteration().await; + + // When timestamp is u64::MAX, adding ACKNOWLEDGEMENT_DELAY would overflow + // The task should return an error instead of panicking + assert!(result.is_err()); + assert!(result.unwrap_err().contains("overflow")); + + // The block should not have been acknowledged + test.assert_task_iteration(None); +} + +#[tokio::test] +async fn delay_task_with_far_future_timestamp_hangs() { + // A timestamp far in the future (but not MAX to avoid overflow) + // will cause the task to sleep for an extremely long time + let mut test = Test::new(); + let far_future = now_timestamp() + 1_000_000; + + { + let mut txn = test.db.txn(); + CosignedBlocks::send(&mut txn, &(1u64, far_future)); + txn.commit(); + } + + let mut task = test.into_delay_task(); - tokio::task::yield_now().await; - assert_eq!(LatestCosignedBlockNumber::get(&db), None); + // Use a timeout to prevent the test from hanging forever + let result = tokio::time::timeout(Duration::from_millis(100), task.run_iteration()).await; - tokio::time::advance(ACKNOWLEDGEMENT_DELAY - Duration::from_secs(2)).await; - tokio::task::yield_now().await; - assert_eq!(LatestCosignedBlockNumber::get(&db), None); + assert!(result.is_err(), "Expected timeout, but task completed"); + + // The block should not have been acknowledged since we timed out + test.assert_task_iteration(None); +} + +#[tokio::test] +async fn delay_task_increasing_blocks_with_increasing_timestamps() { + let mut test = Test::new(); + let base_time = now_timestamp(); + + { + let mut txn = test.db.txn(); + CosignedBlocks::send(&mut txn, &(1u64, base_time)); + CosignedBlocks::send(&mut txn, &(2u64, base_time + 1)); + CosignedBlocks::send(&mut txn, &(3u64, base_time + 2)); + txn.commit(); + } + + let mut task = test.into_delay_task(); + Test::assert_task_made_progress(&mut task, true).await; + + test.assert_task_iteration(Some(3u64)); +} + +#[tokio::test] +async fn delay_task_increasing_blocks_with_decreasing_timestamps() { + // This simulates a scenario where later blocks were evaluated earlier + // (e.g., due to clock skew) + let mut test = Test::new(); + let base_time = now_timestamp(); + + { + let mut txn = test.db.txn(); + CosignedBlocks::send(&mut txn, &(1u64, base_time + 2)); + CosignedBlocks::send(&mut txn, &(2u64, base_time + 1)); + CosignedBlocks::send(&mut txn, &(3u64, base_time)); + txn.commit(); + } - tokio::time::advance(Duration::from_secs(4)).await; - tokio::task::yield_now().await; + let mut task = test.into_delay_task(); + Test::assert_task_made_progress(&mut task, true).await; - assert_eq!(handle.await.unwrap(), true); - assert_eq!(LatestCosignedBlockNumber::get(&db), Some(7u64)); + // All blocks should still be processed in order, ending with block 3 + // Even though block 3 has an earlier timestamp, it processes after block 1 and 2 + test.assert_task_iteration(Some(3u64)); } From 057ebcef699eb0490c9e032d0389299cf7e1a053 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Wed, 24 Dec 2025 15:16:09 -0300 Subject: [PATCH 04/71] feat(cosign): refactor intend tests, add better test cases --- Cargo.lock | 4 + coordinator/cosign/src/intend.rs | 62 +- coordinator/cosign/src/lib.rs | 21 +- coordinator/cosign/src/tests/delay.rs | 36 +- coordinator/cosign/src/tests/intend.rs | 1206 ++++++++++++++---------- coordinator/cosign/src/tests/mod.rs | 135 +-- 6 files changed, 817 insertions(+), 647 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ab96d9a60..d5d19f810 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8265,10 +8265,14 @@ version = "0.1.0" dependencies = [ "blake2 0.11.0-rc.3", "borsh", + "k256", "log", + "rand_core 0.6.4", + "schnorrkel", "serai-client-serai", "serai-cosign-types", "serai-db", + "serai-substrate-tests", "serai-task", "tokio", ] diff --git a/coordinator/cosign/src/intend.rs b/coordinator/cosign/src/intend.rs index e81d6c442..ece20a479 100644 --- a/coordinator/cosign/src/intend.rs +++ b/coordinator/cosign/src/intend.rs @@ -1,5 +1,5 @@ use core::future::Future; -use std::{sync::Arc, collections::HashMap}; +use std::collections::HashMap; use blake2::{Digest as _, Blake2b256}; @@ -15,7 +15,7 @@ use serai_client_serai::{ }, validator_sets, Event, }, - Serai, Events, + Events, }; use serai_db::*; @@ -24,10 +24,10 @@ use serai_task::ContinuallyRan; use crate::*; #[derive(BorshSerialize, BorshDeserialize)] -struct Set { - session: Session, - key: Public, - stake: Amount, +pub(crate) struct Set { + pub(crate) session: Session, + pub(crate) key: Public, + pub(crate) stake: Amount, } create_db!( @@ -58,12 +58,15 @@ async fn block_has_events_justifying_a_cosign( serai: &impl SeraiRpc, block_number: u64, ) -> Result<(Block, Events, HasEvents), String> { - let block = serai - .block_by_number(block_number) - .await - .unwrap() - .ok_or_else(|| "couldn't get block which should've been finalized".to_owned())?; - let events = serai.events(block.header.hash()).await?; + let block = match serai.block_by_number(block_number).await { + Ok(Some(block)) => block, + Ok(None) => return Err("couldn't get block which should've been finalized".to_owned()), + Err(e) => return Err(format!("RPC error fetching block #{block_number}: {e}")), + }; + let events = match serai.events(block.header.hash()).await { + Ok(events) => events, + Err(e) => return Err(format!("RPC error fetching events for block #{block_number}: {e}")), + }; if events.validator_sets().set_keys_events().next().is_some() { return Ok((block, events, HasEvents::Notable)); @@ -103,7 +106,14 @@ impl ContinuallyRan for CosignIntendTask { fn run_iteration(&mut self) -> impl Send + Future> { async move { let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1); - let latest_block_number = self.serai.latest_finalized_block_number().await?; + let latest_block_number = match self.serai.latest_finalized_block_number().await { + Ok(n) => n, + Err(e) => return Err(format!("RPC error fetching latest finalized block number: {e}")), + }; + + if latest_block_number < start_block_number { + return Ok(false); + } for block_number in start_block_number..=latest_block_number { let mut txn = self.db.txn(); @@ -118,7 +128,9 @@ impl ContinuallyRan for CosignIntendTask { if block.header.builds_upon() != builds_upon.clone().calculate(serai_client_serai::abi::BLOCK_HEADER_BRANCH_TAG) { - Err(format!( + // nothing to commit + drop(txn); + return Err(format!( "node's block #{block_number} doesn't build upon the block #{} prior indexed", block_number - 1 ))?; @@ -147,7 +159,12 @@ impl ContinuallyRan for CosignIntendTask { validator_sets::Event::Deallocation { validator, network, amount, timeline: _ } => { let Ok(network) = ExternalNetworkId::try_from(*network) else { continue }; let existing = Stakes::get(&txn, network, *validator).unwrap_or(Amount(0)); - Stakes::set(&mut txn, network, *validator, &Amount(existing.0 - amount.0)); + Stakes::set( + &mut txn, + network, + *validator, + &Amount(existing.0.saturating_sub(amount.0)), + ); } validator_sets::Event::SetDecided { set, validators } => { let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue }; @@ -190,15 +207,17 @@ impl ContinuallyRan for CosignIntendTask { let mut sets = Vec::with_capacity(sets_and_keys_and_stakes.len()); let mut keys = HashMap::with_capacity(sets_and_keys_and_stakes.len()); let mut stakes = HashMap::with_capacity(sets_and_keys_and_stakes.len()); - let mut total_stake = 0; + let mut total_stake = 0u64; for (set, key, stake) in sets_and_keys_and_stakes { sets.push(set); keys.insert(set.network, key); stakes.insert(set.network, stake.0); - total_stake += stake.0; + total_stake = total_stake.saturating_add(stake.0); } if total_stake == 0 { - Err(format!("cosigning sets for block #{block_number} had 0 stake in total"))?; + // commit only per block finished otherwise reset progress + drop(txn); + return Err(format!("cosigning sets for block #{block_number} had 0 stake in total"))?; } let global_session_info = GlobalSession { @@ -234,6 +253,7 @@ impl ContinuallyRan for CosignIntendTask { // Tell each set of their expectation to cosign this block for set in global_session_info.sets { + #[cfg(not(coverage))] log::debug!("{set:?} will be cosigning block #{block_number}"); IntendedCosigns::send( &mut txn, @@ -250,14 +270,16 @@ impl ContinuallyRan for CosignIntendTask { HasEvents::No => {} } - // Populate a singular feed with every block's status for the evluator to work off of + // Populate a singular feed with every block's status for the evaluator to work off of BlockEvents::send(&mut txn, &(BlockEventData { block_number, has_events })); // Mark this block as handled, meaning we should scan from the next block moving on ScanCosignFrom::set(&mut txn, &(block_number + 1)); + + // All-or-nothing, commit only per block finished otherwise reset progress txn.commit(); } - Ok(start_block_number <= latest_block_number) + Ok(true) } } } diff --git a/coordinator/cosign/src/lib.rs b/coordinator/cosign/src/lib.rs index 1f9f1359d..0622333eb 100644 --- a/coordinator/cosign/src/lib.rs +++ b/coordinator/cosign/src/lib.rs @@ -4,7 +4,7 @@ #![allow(clippy::std_instead_of_alloc, clippy::std_instead_of_core)] use core::{fmt::Debug, future::Future}; -use std::{sync::Arc, collections::HashMap, time::Instant}; +use std::{collections::HashMap, time::Instant}; use blake2::{Digest as _, Blake2s256}; @@ -18,7 +18,7 @@ use serai_client_serai::{ }, Block, }, - Events, Serai, State, + Events, }; use serai_db::*; @@ -35,7 +35,7 @@ mod delay; pub use delay::BROADCAST_FREQUENCY; use delay::LatestCosignedBlockNumber; -#[cfg(any(test, feature = "tests"))] +#[cfg(test)] /// Test helpers and fixtures. pub mod tests; @@ -54,6 +54,7 @@ pub trait SeraiRpc: Clone + Send + Sync + 'static { fn events(&self, block: BlockHash) -> impl Send + Future>; } +#[cfg(not(coverage))] impl SeraiRpc for Arc { fn latest_finalized_block_number(&self) -> impl Send + Future> { let serai = self.clone(); @@ -264,7 +265,7 @@ impl Cosigning { /// The latest cosigned block number. pub fn latest_cosigned_block_number(getter: &impl Get) -> Result { if FaultedSession::get(getter).is_some() { - Err(Faulted)?; + return Err(Faulted)?; } Ok(LatestCosignedBlockNumber::get(getter).unwrap_or(0)) @@ -338,7 +339,7 @@ impl Cosigning { // Check our indexed blockchain includes a block with this block number let Some(our_block_hash) = SubstrateBlockHash::get(&self.db, cosign.block_number) else { - Err(IntakeCosignError::NotYetIndexedBlock)? + return Err(IntakeCosignError::NotYetIndexedBlock)?; }; let faulty = cosign.block_hash != our_block_hash; @@ -348,19 +349,19 @@ impl Cosigning { NetworksLatestCosignedBlock::get(&self.db, cosign.global_session, network) { if existing.cosign.block_number >= cosign.block_number { - Err(IntakeCosignError::StaleCosign)?; + return Err(IntakeCosignError::StaleCosign)?; } } } let Some(global_session) = GlobalSessions::get(&self.db, cosign.global_session) else { - Err(IntakeCosignError::UnrecognizedGlobalSession)? + return Err(IntakeCosignError::UnrecognizedGlobalSession)?; }; // Check the cosigned block number is in range to the global session if cosign.block_number < global_session.start_block_number { // Cosign is for a block predating the global session - Err(IntakeCosignError::BeforeGlobalSessionStart)?; + return Err(IntakeCosignError::BeforeGlobalSessionStart)?; } if !faulty { // This prevents a malicious validator set, on the same chain, from producing a cosign after @@ -368,7 +369,7 @@ impl Cosigning { if let Some(last_block) = GlobalSessionsLastBlock::get(&self.db, cosign.global_session) { if cosign.block_number > last_block { // Cosign is for a block after the last block this global session should have signed - Err(IntakeCosignError::AfterGlobalSessionEnd)?; + return Err(IntakeCosignError::AfterGlobalSessionEnd)?; } } } @@ -378,7 +379,7 @@ impl Cosigning { let key = *global_session.keys.get(&network).ok_or(IntakeCosignError::NonParticipatingNetwork)?; if !signed_cosign.verify_signature(key) { - Err(IntakeCosignError::InvalidSignature)?; + return Err(IntakeCosignError::InvalidSignature)?; } } diff --git a/coordinator/cosign/src/tests/delay.rs b/coordinator/cosign/src/tests/delay.rs index 25f92ce56..01bbd4ecc 100644 --- a/coordinator/cosign/src/tests/delay.rs +++ b/coordinator/cosign/src/tests/delay.rs @@ -32,7 +32,21 @@ impl Test { #[tokio::test] async fn delay_task_returns_false_with_no_messages() { let mut task = Test::new().into_delay_task(); - Test::assert_task_made_progress(&mut task, false).await; + Test::assert_task_run_and_check_progress(&mut task, false).await; +} + +#[tokio::test] +async fn delay_task_returns_false_with_genesis_block() { + let mut test = Test::new(); + + { + let mut txn = test.db.txn(); + CosignedBlocks::send(&mut txn, &(0u64, now_timestamp())); + txn.commit(); + } + + let mut task = test.into_delay_task(); + Test::assert_task_run_and_check_progress(&mut task, false).await; } #[tokio::test] @@ -41,14 +55,14 @@ async fn delay_task_updates_latest_cosigned_block_number() { { let mut txn = test.db.txn(); - CosignedBlocks::send(&mut txn, &(2u64, now_timestamp())); + CosignedBlocks::send(&mut txn, &(1u64, now_timestamp())); txn.commit(); } let mut task = test.into_delay_task(); - Test::assert_task_made_progress(&mut task, true).await; + Test::assert_task_run_and_check_progress(&mut task, true).await; - test.assert_task_iteration(Some(2u64)); + test.assert_task_iteration(Some(1u64)); } #[tokio::test] @@ -65,7 +79,7 @@ async fn delay_task_drains_multiple_messages_in_one_iteration() { } let mut task = test.into_delay_task(); - Test::assert_task_made_progress(&mut task, true).await; + Test::assert_task_run_and_check_progress(&mut task, true).await; test.assert_task_iteration(Some(3u64)); } @@ -90,7 +104,7 @@ async fn delay_task_does_not_regress_and_skips_wait_for_stale_messages() { } let mut task = test.into_delay_task(); - Test::assert_task_made_progress(&mut task, true).await; + Test::assert_task_run_and_check_progress(&mut task, true).await; // Queue order: 1, 2, 4, 3 // Block 1 processed (1 > 0), Block 2 processed (2 > 1), @@ -98,7 +112,7 @@ async fn delay_task_does_not_regress_and_skips_wait_for_stale_messages() { test.assert_task_iteration(Some(4u64)); } -#[tokio::test(flavor = "multi_thread")] +#[tokio::test] async fn delay_task_does_not_ack_before_acknowledgement_delay() { let mut test = Test::new(); let now = now_timestamp(); @@ -122,8 +136,8 @@ async fn delay_task_does_not_ack_before_acknowledgement_delay() { // Wait for the task to complete let result = handle.await.unwrap(); - assert_eq!(result, true); + test.assert_task_iteration(Some(1u64)); } @@ -141,7 +155,7 @@ async fn delay_task_with_zero_timestamp_processes_immediately() { // This should complete immediately without sleeping // Since now > 0 + ACKNOWLEDGEMENT_DELAY, // time_valid < now (already valid), so no sleep occurs - Test::assert_task_made_progress(&mut task, true).await; + Test::assert_task_run_and_check_progress(&mut task, true).await; test.assert_task_iteration(Some(1u64)); } @@ -206,7 +220,7 @@ async fn delay_task_increasing_blocks_with_increasing_timestamps() { } let mut task = test.into_delay_task(); - Test::assert_task_made_progress(&mut task, true).await; + Test::assert_task_run_and_check_progress(&mut task, true).await; test.assert_task_iteration(Some(3u64)); } @@ -227,7 +241,7 @@ async fn delay_task_increasing_blocks_with_decreasing_timestamps() { } let mut task = test.into_delay_task(); - Test::assert_task_made_progress(&mut task, true).await; + Test::assert_task_run_and_check_progress(&mut task, true).await; // All blocks should still be processed in order, ending with block 3 // Even though block 3 has an earlier timestamp, it processes after block 1 and 2 diff --git a/coordinator/cosign/src/tests/intend.rs b/coordinator/cosign/src/tests/intend.rs index 56dce680c..57de29738 100644 --- a/coordinator/cosign/src/tests/intend.rs +++ b/coordinator/cosign/src/tests/intend.rs @@ -1,44 +1,34 @@ -use std::{ - collections::{HashMap, HashSet}, - fmt::Write as _, - time::Duration, -}; - use core::future::Future; - -use blake2::{Digest, Blake2b256}; -use k256::{ecdsa::SigningKey as Secp256k1SigningKey, elliptic_curve::sec1::ToEncodedPoint}; -use rand_core::{OsRng, RngCore}; -use schnorrkel::Keypair as Sr25519Keypair; - -use serai_db::{DbTxn, Db as _}; -use serai_task::ContinuallyRan; - -use serai_client_serai::abi::{ - primitives::{ - BlockHash, - address::{SeraiAddress, ExternalAddress}, - balance::{Amount, ExternalBalance}, - coin::ExternalCoin, - crypto::{Public, ExternalKey, KeyPair}, - instructions::{OutInstruction, OutInstructionWithBalance}, - network_id::{ExternalNetworkId, NetworkId}, - validator_sets::{Session, ValidatorSet, ExternalValidatorSet, KeyShares}, +use std::collections::HashMap; + +use serai_db::{Db as _, DbTxn}; + +use serai_client_serai::{ + abi::{ + primitives::{ + address::{SeraiAddress, ExternalAddress}, + balance::{Amount, ExternalBalance}, + coin::ExternalCoin, + crypto::{Public, ExternalKey, KeyPair}, + instructions::{OutInstruction, OutInstructionWithBalance}, + network_id::{ExternalNetworkId, NetworkId}, + validator_sets::{Session, ValidatorSet, ExternalValidatorSet, KeyShares}, + BlockHash, + }, + coins, system, validator_sets, Block, Event, }, - coins, system, validator_sets, Event, + Events, }; use crate::{ intend::{ - BlockEventData, BlockEvents, CosignIntendTask, GlobalSessionsChannel, IntendedCosigns, - ScanCosignFrom, + BlockEventData, BlockEvents, BuildsUpon, CosignIntendTask, GlobalSessionsChannel, + IntendedCosigns, LatestSet, ScanCosignFrom, Set, Stakes, Validators, }, - SeraiRpc, tests::*, CosignIntent, GlobalSession, GlobalSessions, GlobalSessionsLastBlock, HasEvents, - LatestGlobalSessionIntended, SubstrateBlockHash, + LatestGlobalSessionIntended, SeraiRpc, SubstrateBlockHash, }; -use serai_substrate_tests::{composition, rpc}; fn set_keys_event_with_pair(set: ExternalValidatorSet, key_pair: &KeyPair) -> Event { Event::ValidatorSets(validator_sets::Event::SetKeys { set, key_pair: key_pair.clone() }) @@ -85,615 +75,797 @@ fn burn_with_instruction_event(from: SeraiAddress) -> Event { }) } -fn assert_global_session(actual: &GlobalSession, expected: &GlobalSession) { - assert_eq!(actual.start_block_number, expected.start_block_number); - assert_eq!(actual.sets, expected.sets); - assert_eq!(actual.keys, expected.keys); - assert_eq!(actual.stakes, expected.stakes); - assert_eq!(actual.total_stake, expected.total_stake); +fn events_from_allocations(allocations: &[(SeraiAddress, ExternalNetworkId, u64)]) -> Vec { + allocations + .iter() + .map(|(validator, network, amount)| { + allocation_event(*validator, NetworkId::External(*network), *amount) + }) + .collect() } -#[tokio::test] -async fn intend_returns_false_with_no_blocks() { - init_logger(); +impl Test { + fn assert_substrate_block_hash_exists(&self, block_number: u64) -> BlockHash { + let block_hash = SubstrateBlockHash::get(&self.db, block_number); + assert!(block_hash.is_some(), "no substrate blockhash for block {block_number}"); + block_hash.expect("no substrate blockhash") + } - let mut env = TestEnvironment::new(); + fn assert_builds_upon_is_expected(&self, expected: &IncrementalUnbalancedMerkleTree) { + assert_eq!(BuildsUpon::get(&self.db).as_ref(), Some(expected)); + } - let mut task = env.into_task(); - assert_eq!(task.run_iteration().await.unwrap(), false); -} + // Assert everything that changed or should have changed after a simple task iteration run with linear blocks + // (substrate block hashes are set and builds upon is expected) + fn assert_task_iteration_per_block(&self, block_number: u64) -> BlockHash { + let block_hash = self.assert_substrate_block_hash_exists(block_number); + self.assert_builds_upon_is_expected(&self.serai.builds_upon); + block_hash + } -#[tokio::test] -async fn intend_returns_true_with_linear_blocks() { - init_logger(); + fn assert_block_events_is_expected(&mut self, expected: BlockEventData) { + let mut txn = self.db.txn(); + let actual = BlockEvents::try_recv(&mut txn); + txn.commit(); + match actual { + Some(a) => { + assert_eq!(a.block_number, expected.block_number); + assert_eq!(a.has_events, expected.has_events); + } + None => panic!("BlockEvents mismatch: got None, expected {:?}", expected), + } + } - let mut env = TestEnvironment::new(); + fn assert_scan_cosign_from_is_expected(&self, expected: u64) { + assert_eq!(ScanCosignFrom::get(&self.db), Some(expected)); + } - let block1_hash = env.serai.make_block(1); - env.serai.new_events(block1_hash); + // Assert everything that changed or should have changed after task iteration is ran per block + // (BlockEventData points to current block and events, ScanCosignFrom is the next block) + fn assert_task_iteration_per_block_concluded( + &mut self, + block_number: u64, + has_events: HasEvents, + ) { + self.assert_block_events_is_expected(BlockEventData { block_number, has_events }); + self.assert_scan_cosign_from_is_expected(block_number + 1); + } - let mut task = env.into_task(); - // Returns true with one block - assert_eq!(task.run_iteration().await.unwrap(), true); + fn assert_task_iteration_per_block_with_no_events_ran(&mut self, block_number: u64) { + self.assert_task_iteration_per_block(block_number); + self.assert_task_iteration_per_block_concluded(block_number, HasEvents::No); + } - let block2_hash = env.serai.make_block(2); - env.serai.new_events(block2_hash); + fn assert_task_iterations_with_no_events_ran(&mut self, block_numbers: (u64, u64)) { + let start_block = block_numbers.0; + let end_block = block_numbers.1; - let block3_hash = env.serai.make_block(3); - env.serai.new_events(block3_hash); + for block_number in start_block..=end_block { + self.assert_task_iteration_per_block(block_number); + self.assert_block_events_is_expected(BlockEventData { + block_number, + has_events: HasEvents::No, + }); + } - let mut task = env.into_task(); - // Returns true with sequence of blocks - assert_eq!(task.run_iteration().await.unwrap(), true); -} + self.assert_scan_cosign_from_is_expected(end_block + 1); + } -#[tokio::test] -async fn intend_errors_if_chain_is_not_linear() { - init_logger(); + /// Asserts that block 1 was processed successfully but block 2 failed. + /// Takes the expected `builds_upon` value (state after block 1 was processed). + fn assert_block_1_succeeded_block_2_failed( + &self, + expected_builds_upon: &IncrementalUnbalancedMerkleTree, + ) { + let getter = &self.db; - let mut env = TestEnvironment::new(); + assert!(SubstrateBlockHash::get(getter, 1).is_some()); + assert!(SubstrateBlockHash::get(getter, 2).is_none()); - let block1_hash = env.serai.make_block(1); - env.serai.new_events(block1_hash); + // BuildsUpon should reflect state after block 1 (before block 2 failed) + self.assert_builds_upon_is_expected(expected_builds_upon); - // Block #2 does not build upon block #1 - env.serai.builds_upon = IncrementalUnbalancedMerkleTree::new(); + assert_eq!(BlockEvents::peek(getter).expect("missing block events").block_number, 1); + // Next ScanCosignFrom is still block 2 since it failed and must be re-ran + assert_eq!(ScanCosignFrom::get(getter).expect("missing scan cosign from"), 2); + } + + fn assert_stakes_is_expected( + &self, + network: ExternalNetworkId, + validator: SeraiAddress, + expected: Option, + ) { + assert_eq!(Stakes::get(&self.db, network, validator), expected); + } - let block2_hash = env.serai.make_block(2); - env.serai.new_events(block2_hash); + /// Asserts stakes match the accumulated totals from a slice of allocations. + /// Groups by (network, validator) and sums amounts before asserting. + fn assert_stakes_from_allocations_is_expected( + &self, + allocations: &[(SeraiAddress, ExternalNetworkId, u64)], + ) { + let mut expected: HashMap<(ExternalNetworkId, SeraiAddress), u64> = HashMap::new(); + for (validator, network, amount) in allocations { + *expected.entry((*network, *validator)).or_default() += amount; + } + for ((network, validator), amount) in expected { + self.assert_stakes_is_expected(network, validator, Some(Amount(amount))); + } + } - let mut task = env.into_task(); - let err = task.run_iteration().await.unwrap_err(); - assert!(err.contains("doesn't build upon"), "{err}"); -} + fn assert_global_session(actual: &GlobalSession, expected: &GlobalSession) { + assert_eq!(actual.start_block_number, expected.start_block_number); + assert_eq!(actual.sets, expected.sets); + assert_eq!(actual.keys, expected.keys); + assert_eq!(actual.stakes, expected.stakes); + assert_eq!(actual.total_stake, expected.total_stake); + } -#[tokio::test] -async fn intend_errors_if_block_not_found() { - init_logger(); + fn assert_validators_is_expected( + &self, + set: ExternalValidatorSet, + expected: Option>, + ) { + assert_eq!(Validators::get(&self.db, set), expected); + } + + fn assert_latest_set_is_expected(&self, network: ExternalNetworkId, expected: Option<&Set>) { + let actual = LatestSet::get(&self.db, network); + match (actual.as_ref(), expected) { + (Some(a), Some(e)) => { + assert_eq!(a.session, e.session); + assert_eq!(a.key, e.key); + assert_eq!(a.stake, e.stake); + } + (None, None) => {} + _ => panic!("LatestSet mismatch for {:?}", network), + } + } - let mut env = TestEnvironment::new(); + #[allow(dead_code)] + fn assert_global_sessions_get(&self, session_id: [u8; 32], expected: Option<&GlobalSession>) { + match (GlobalSessions::get(&self.db, session_id), expected) { + (Some(ref actual), Some(exp)) => Self::assert_global_session(actual, exp), + (None, None) => {} + (actual, exp) => { + panic!("GlobalSessions mismatch: got {:?}, expected {:?}", actual.is_some(), exp.is_some()) + } + } + } - // Make block 1 exist in terms of finalization, but return None when fetched - let _block1_hash = env.serai.make_block(1); - env.serai.set_block_not_found(1); + fn assert_global_sessions_last_block(&self, session_id: [u8; 32], expected: u64) { + assert_eq!(GlobalSessionsLastBlock::get(&self.db, session_id), Some(expected)); + } - let mut task = env.into_task(); - let err = task.run_iteration().await.unwrap_err(); - assert!( - err.contains("couldn't get block which should've been finalized"), - "unexpected error: {err}" - ); + #[allow(dead_code)] + fn assert_latest_global_session_intended(&self, expected: Option<[u8; 32]>) { + assert_eq!(LatestGlobalSessionIntended::get(&self.db), expected); + } + + #[allow(dead_code)] + fn assert_global_sessions_channel_peek(&self, expected: Option<&([u8; 32], GlobalSession)>) { + let actual = GlobalSessionsChannel::peek(&self.db); + match (actual.as_ref(), expected) { + (Some((aid, asess)), Some((eid, esess))) => { + assert_eq!(aid, eid); + Self::assert_global_session(asess, esess); + } + (None, None) => {} + _ => panic!( + "GlobalSessionsChannel mismatch: got {:?}, expected {:?}", + actual.is_some(), + expected.is_some() + ), + } + } + + fn assert_intended_cosigns_peek(&self, set: ExternalValidatorSet, expected: CosignIntent) { + assert_eq!(IntendedCosigns::peek(&self.db, set), Some(expected)); + } + + /// Asserts that a notable block was processed correctly, verifying: + /// - Substrate block hash and builds_upon are set + /// - BlockEvents has the correct event type: + /// - HasEvents::No for the first notable block (no prior session to cosign it) + /// - HasEvents::Notable for subsequent notable blocks + /// - A new GlobalSession was created and stored + /// - GlobalSessionsLastBlock is set for the previous session (if one existed) + /// - LatestGlobalSessionIntended is updated to the new session + /// - GlobalSessionsChannel received the new session + /// - IntendedCosigns are sent for the previous session's sets (if one existed) + /// + /// Returns the new session ID and session info for further assertions if needed. + fn assert_task_iteration_per_block_with_notable_events_ran( + &mut self, + block_number: u64, + previous_session_id: Option<[u8; 32]>, + ) -> ([u8; 32], GlobalSession) { + let block_hash = self.assert_task_iteration_per_block(block_number); + + // First notable block has no prior session to cosign it, so it's treated as No + // Subsequent notable blocks have a prior session, so they're treated as Notable + let expected_has_events = + if previous_session_id.is_some() { HasEvents::Notable } else { HasEvents::No }; + self.assert_block_events_is_expected(BlockEventData { + block_number, + has_events: expected_has_events, + }); + + // Get session from channel (channels preserve order, so this gives us the session for this block) + let mut txn = self.db.txn(); + let channel_entry = GlobalSessionsChannel::try_recv(&mut txn); + txn.commit(); + + let (session_id, session) = channel_entry.unwrap_or_else(|| { + panic!("GlobalSessionsChannel was empty, expected session for block {block_number}") + }); + + let stored_session = GlobalSessions::get(&self.db, session_id) + .expect("GlobalSessions should contain the session after notable block"); + Self::assert_global_session(&session, &stored_session); + + assert_eq!( + session.start_block_number, + block_number + 1, + "session should start at block after the notable block" + ); + + assert!(session.total_stake > 0, "session should have non-zero total stake"); + + // GlobalSessionsLastBlock is set for the previous session when a new session starts + if let Some(prev_id) = previous_session_id { + self.assert_global_sessions_last_block(prev_id, block_number); + } + + // IntendedCosigns are sent for the previous session's sets + if let Some(prev_id) = previous_session_id { + let prev_session = + GlobalSessions::get(&self.db, prev_id).expect("previous session should exist"); + for set in prev_session.sets { + self.assert_intended_cosigns_peek( + set, + CosignIntent { global_session: prev_id, block_number, block_hash, notable: true }, + ); + } + } + + (session_id, session) + } + + /// Asserts that a non-notable block (e.g., with burn events) was processed correctly, verifying: + /// - Substrate block hash and builds_upon are set + /// - BlockEvents has NonNotable for this block + /// - IntendedCosigns are sent for the active session's sets (with notable=false) + /// - ScanCosignFrom is set to the next block + fn assert_task_iteration_per_block_with_non_notable_events_ran(&mut self, block_number: u64) { + let block_hash = self.assert_task_iteration_per_block(block_number); + self.assert_task_iteration_per_block_concluded(block_number, HasEvents::NonNotable); + + let active_session_id = LatestGlobalSessionIntended::get(&self.db) + .expect("NonNotable block requires an active session from a prior notable block"); + + // IntendedCosigns are sent for the active session's sets with notable=false + let session = + GlobalSessions::get(&self.db, active_session_id).expect("active session should exist"); + for set in session.sets { + self.assert_intended_cosigns_peek( + set, + CosignIntent { + global_session: active_session_id, + block_number, + block_hash, + notable: false, + }, + ); + } + } } #[tokio::test] -async fn intend_handles_blocks_with_no_events() { - init_logger(); +async fn intend_returns_false_with_no_blocks() { + let test = Test::new(); + let mut task = test.into_intend_task(); + Test::assert_task_run_and_check_progress(&mut task, false).await; +} - let mut env = TestEnvironment::new(); +#[tokio::test] +async fn intend_returns_false_with_genesis_block() { + let mut test = Test::new(); - let block1_hash = env.serai.make_block(1); - env.serai.new_events(block1_hash); + let genesis_hash = test.serai.make_block(0); + test.serai.initialize_empty_events(genesis_hash); - let mut task = env.into_task(); + let mut task = test.into_intend_task(); - task.run_iteration().await; + // In intend.rs let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1); + // will default to the 1st block, and without a greater serai.latest_finalized_block_number() + // there will nothing to iterate, returning false as in "did not progress" + Test::assert_task_run_and_check_progress(&mut task, false).await; +} - let mut txn = env.db.txn(); +#[tokio::test] +async fn intend_returns_true_with_one_block() { + let mut test = Test::new(); - let block = BlockEvents::try_recv(&mut txn).unwrap(); - assert_eq!(block.block_number, 1); - assert!(matches!(block.has_events, HasEvents::No)); + let block1_hash = test.serai.make_block(1); + test.serai.initialize_empty_events(block1_hash); - txn.commit(); + let mut task = test.into_intend_task(); + + // Should return true as in "did progress the new block" + Test::assert_task_run_and_check_progress(&mut task, true).await; + + test.assert_task_iteration_per_block_with_no_events_ran(1); } #[tokio::test] -async fn intend_errors_if_notable_block_has_no_stake() { - init_logger(); +async fn intend_returns_true_with_linear_blocks_with_no_events() { + let mut test = Test::new(); - let mut env = TestEnvironment::new(); + let block1_hash = test.serai.make_block(1); + test.serai.initialize_empty_events(block1_hash); - let validator = SeraiAddress([7u8; 32]); + let block2_hash = test.serai.make_block(2); + test.serai.initialize_empty_events(block2_hash); - let block1_hash = env.serai.make_block(1); - - let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - let vset0 = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; + let block3_hash = test.serai.make_block(3); + test.serai.initialize_empty_events(block3_hash); - env - .serai - .set_events(block1_hash, vec![set_decided_event(vset0, validator), set_keys_event(set0, 1)]); + let mut task = test.into_intend_task(); + Test::assert_task_run_and_check_progress(&mut task, true).await; - let mut task = env.into_task(); - let err = task.run_iteration().await.unwrap_err(); - assert!(err.contains("had 0 stake"), "{err}"); + test.assert_task_iterations_with_no_events_ran((1, 3)); } #[tokio::test] -async fn intend_task_indexes_blocks_and_emits_events_and_intents() { - init_logger(); +async fn intend_errors_if_chain_is_not_linear() { + let mut test = Test::new(); - let mut env = TestEnvironment::new(); + let block1_hash = test.serai.make_block(1); + test.serai.initialize_empty_events(block1_hash); - let validator = SeraiAddress([7u8; 32]); + // Capture builds_upon after block 1 (before block 2 modifies it) + let builds_upon_after_block_1 = test.serai.builds_upon.clone(); - let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - let set1 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(1) }; + // Block #2 does not build upon block #1 + test.serai.builds_upon = IncrementalUnbalancedMerkleTree::new(); - let vset0 = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; - let vset1 = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(1) }; + let block2_hash = test.serai.make_block(2); + test.serai.initialize_empty_events(block2_hash); - let block1_hash = env.serai.make_block(1); - env.serai.set_events( - block1_hash, - vec![ - allocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 10), - set_decided_event(vset0, validator), - set_keys_event(set0, 1), - ], - ); + let mut task = test.into_intend_task(); + Test::assert_task_failed(&mut task, "doesn't build upon").await; - let block2_hash = env.serai.make_block(2); - env - .serai - .set_events(block2_hash, vec![set_decided_event(vset1, validator), set_keys_event(set1, 2)]); + test.assert_block_1_succeeded_block_2_failed(&builds_upon_after_block_1); +} - let mut task = env.into_task(); - assert_eq!(task.run_iteration().await.unwrap(), true); +#[tokio::test] +async fn intend_errors_if_block_not_found() { + let mut test = Test::new(); - assert_eq!(ScanCosignFrom::get(&env.db), Some(3u64)); + // Block 1 exists and can be fetched + let block1_hash = test.serai.make_block(1); + test.serai.initialize_empty_events(block1_hash); - let mut txn = env.db.txn(); + // Capture builds_upon after block 1 (before block 2 modifies it) + let builds_upon_after_block_1 = test.serai.builds_upon.clone(); - // BlockEvents: block 1 is notable but has no prior global session, so it's treated as `No`. + // Block 2 exists in terms of finalization, but returns None when fetched + test.serai.make_block(2); + test.serai.set_block_not_found(2); - let first = BlockEvents::try_recv(&mut txn).expect("expected block 1 event"); - assert_eq!(first.block_number, 1); - assert!(matches!(first.has_events, HasEvents::No)); + let mut task = test.into_intend_task(); + Test::assert_task_failed(&mut task, "couldn't get block which should've been finalized").await; - let second = BlockEvents::try_recv(&mut txn).expect("expected block 2 event"); - assert_eq!(second.block_number, 2); - assert!(matches!(second.has_events, HasEvents::Notable)); + test.assert_block_1_succeeded_block_2_failed(&builds_upon_after_block_1); +} - assert!(BlockEvents::try_recv(&mut txn).is_none()); +#[tokio::test] +async fn intend_handles_rpc_error_on_block_fetch() { + let mut test = Test::new(); + + // Block 1 exists and can be fetched + let block1_hash = test.serai.make_block(1); + test.serai.initialize_empty_events(block1_hash); + + // Capture builds_upon after block 1 (before block 2 modifies it) + let builds_upon_after_block_1 = test.serai.builds_upon.clone(); - // Global sessions were created at both notable blocks. - let (session1_id, session1) = { - let first = GlobalSessionsChannel::try_recv(&mut txn).expect("expected first global session"); - first - }; - assert_eq!(session1.start_block_number, 2); - assert_eq!(session1.sets, vec![set0]); + // Block 2 exists in terms of finalization, but fetching it returns an error + test.serai.make_block(2); + test.serai.set_block_error(2, "connection refused"); - let (session2_id, session2) = { - let second = GlobalSessionsChannel::try_recv(&mut txn).expect("expected second global session"); - second - }; - assert_ne!(session1_id, session2_id); - assert_eq!(session2.start_block_number, 3); - assert_eq!(session2.sets, vec![set1]); + let mut task = test.into_intend_task(); + Test::assert_task_failed(&mut task, "RPC error fetching block").await; - // Block 2 should be intended for cosigning by the prior global session. + test.assert_block_1_succeeded_block_2_failed(&builds_upon_after_block_1); +} + +#[tokio::test] +async fn intend_handles_rpc_error_on_events_fetch() { + let mut test = Test::new(); - let intent = IntendedCosigns::try_recv(&mut txn, set0).expect("expected cosign intent for set0"); + // Block 1 exists and can be fetched + let block1_hash = test.serai.make_block(1); + test.serai.initialize_empty_events(block1_hash); - assert_eq!(intent.global_session, session1_id); - assert_eq!(intent.block_number, 2); - assert_eq!(intent.block_hash, block2_hash); - assert!(intent.notable); + // Capture builds_upon after block 1 (before block 2 modifies it) + let builds_upon_after_block_1 = test.serai.builds_upon.clone(); - // No additional intents should exist. + // Block 2 exists in terms of finalization, but fetching it returns an error + let block2_hash = test.serai.make_block(2); + test.serai.set_events_error(block2_hash, "timeout"); - assert!(IntendedCosigns::try_recv(&mut txn, set0).is_none()); + let mut task = test.into_intend_task(); + Test::assert_task_failed(&mut task, "RPC error fetching events").await; - txn.commit(); + test.assert_block_1_succeeded_block_2_failed(&builds_upon_after_block_1); } #[tokio::test] -async fn intend_emits_non_notable_for_burn_with_instruction() { - init_logger(); +async fn intend_handles_rpc_error_on_latest_finalized() { + let mut test = Test::new(); + + // We need to add a block first so latest_finalized_block_number would normally succeed + test.serai.make_block(1); + + // Create a wrapper that returns error for latest_finalized_block_number + #[derive(Clone)] + struct FailingSeraiRPC; + impl SeraiRpc for FailingSeraiRPC { + fn latest_finalized_block_number(&self) -> impl Send + Future> { + async { Err("network error".to_string()) } + } + fn block_by_number( + &self, + _block: u64, + ) -> impl Send + Future, String>> { + async { Ok(None) } + } + fn events(&self, _block: BlockHash) -> impl Send + Future> { + async { Ok(Events::new()) } + } + } - let mut env = TestEnvironment::new(); + // Create a custom Serai that will fail on latest_finalized_block_number + let mut task = CosignIntendTask { db: test.db.clone(), serai: FailingSeraiRPC }; + Test::assert_task_failed(&mut task, "RPC error fetching latest finalized").await; +} - let validator = SeraiAddress([7u8; 32]); +#[tokio::test] +async fn intend_handles_allocation_events() { + let mut test = Test::new(); + + let validator1 = SeraiAddress([0x01; 32]); + let validator2 = SeraiAddress([0x02; 32]); + + // Block 1: Allocations across multiple networks + let allocations_block1 = [ + (validator1, ExternalNetworkId::Bitcoin, 50), + (validator1, ExternalNetworkId::Bitcoin, 100), + (validator2, ExternalNetworkId::Bitcoin, 200), + (validator1, ExternalNetworkId::Ethereum, 150), + ]; + let block1_hash = test.serai.make_block(1); + test.serai.set_events(block1_hash, events_from_allocations(&allocations_block1)); + + // Block 2: More allocations + let allocations_block2 = + [(validator2, ExternalNetworkId::Ethereum, 75), (validator1, ExternalNetworkId::Bitcoin, 25)]; + let block2_hash = test.serai.make_block(2); + test.serai.set_events(block2_hash, events_from_allocations(&allocations_block2)); + + let mut task = test.into_intend_task(); + Test::assert_task_run_and_check_progress(&mut task, true).await; + + let all_allocations: Vec<_> = + allocations_block1.iter().chain(allocations_block2.iter()).copied().collect(); + test.assert_stakes_from_allocations_is_expected(&all_allocations); + + // Both blocks have only allocation events (no SetKeys, no burn) -> HasEvents::No + test.assert_task_iterations_with_no_events_ran((1, 2)); +} - let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - let vset0 = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; +#[tokio::test] +async fn intend_handles_deallocation_event() { + let mut test = Test::new(); + + let validator = SeraiAddress([0x01; 32]); - let block1_hash = env.serai.make_block(1); - env.serai.set_events( + // Block 1: Allocate then deallocate some + let block1_hash = test.serai.make_block(1); + test.serai.set_events( block1_hash, vec![ - allocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 10), - set_decided_event(vset0, validator), - set_keys_event(set0, 1), + // Allocate first + allocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 100), + // Deallocate some + deallocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 30), ], ); - let block2_hash = env.serai.make_block(2); - env.serai.set_events(block2_hash, vec![burn_with_instruction_event(validator)]); + // Block 2: Deallocate more than remaining to test saturation + let block2_hash = test.serai.make_block(2); + test.serai.set_events( + block2_hash, + vec![ + // Deallocate more than remaining (70 left, deallocating 100) + deallocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 100), + ], + ); + + // Create task after all blocks are set up + let mut task = test.into_intend_task(); + Test::assert_task_run_and_check_progress(&mut task, true).await; - let mut task = env.into_task(); - assert_eq!(task.run_iteration().await.unwrap(), true); + test.assert_stakes_is_expected(ExternalNetworkId::Bitcoin, validator, Some(Amount(0))); - let mut txn = env.db.txn(); - let _b1 = BlockEvents::try_recv(&mut txn).unwrap(); - let b2 = BlockEvents::try_recv(&mut txn).unwrap(); - assert_eq!(b2.block_number, 2); - assert!(matches!(b2.has_events, HasEvents::NonNotable)); - txn.commit(); + // Both blocks have only allocation/deallocation events (no SetKeys, no burn) -> HasEvents::No + test.assert_task_iterations_with_no_events_ran((1, 2)); } #[tokio::test] -async fn intend_updates_stakes_and_ignores_non_external_network_events() { - init_logger(); - - let mut env = TestEnvironment::new(); - - let validator = SeraiAddress([7u8; 32]); +async fn intend_errors_if_notable_block_has_no_stake() { + let mut test = Test::new(); - let block1_hash = env.serai.make_block(1); + let validator1 = SeraiAddress([0x01; 32]); + let validator2 = SeraiAddress([0x02; 32]); let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; let vset0 = ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; - let vset_serai = ValidatorSet { network: NetworkId::Serai, session: Session(9) }; + let set1 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(1) }; + let vset1 = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(1) }; - env.serai.set_events( + // Block 1: Normal notable block with allocations + let block1_hash = test.serai.make_block(1); + test.serai.set_events( block1_hash, vec![ - // Non-ValidatorSets event to exercise the `continue` branch in stake processing. - Event::System(system::Event::TransactionSuccess), - // Stakes for the external network. - allocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 10), - deallocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 3), - // These are ignored as they're not external networks. - allocation_event(validator, NetworkId::Serai, 999), - deallocation_event(validator, NetworkId::Serai, 999), - // Decide the external set. - set_decided_event(vset0, validator), - // This SetDecided can't be converted to an external set and is ignored. - Event::ValidatorSets(validator_sets::Event::AcceptedHandover { set: vset_serai }), - Event::ValidatorSets(validator_sets::Event::SetDecided { - set: vset_serai, - validators: vec![], - }), - // Set keys for the external set. + allocation_event(validator1, NetworkId::External(ExternalNetworkId::Bitcoin), 100), + set_decided_event(vset0, validator1), set_keys_event(set0, 1), ], ); - let mut task = env.into_task(); - assert_eq!(task.run_iteration().await.unwrap(), true); + // Block 2: SetDecided and SetKeys for new session with validator2 who has no allocations -> 0 stake + let block2_hash = test.serai.make_block(2); + test + .serai + .set_events(block2_hash, vec![set_decided_event(vset1, validator2), set_keys_event(set1, 2)]); - let (_id, info) = { - let mut txn = env.db.txn(); - let res = GlobalSessionsChannel::try_recv(&mut txn).expect("expected a global session"); - txn.commit(); - res - }; - assert_eq!(info.start_block_number, 2); - assert_eq!(info.stakes.get(&ExternalNetworkId::Bitcoin), Some(&7)); - assert_eq!(info.total_stake, 7); + let mut task = test.into_intend_task(); + Test::assert_task_failed(&mut task, "had 0 stake").await; } -fn random_serai_address(rng: &mut OsRng) -> SeraiAddress { - SeraiAddress(Sr25519Keypair::generate_with(&mut *rng).public.to_bytes()) -} +#[tokio::test] +async fn intend_handles_set_decided_event() { + let mut test = Test::new(); + + let validator1 = SeraiAddress([0x01; 32]); + let validator2 = SeraiAddress([0x02; 32]); + let validator3 = SeraiAddress([0x03; 32]); + + let set0_btc = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + let vset0_btc = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; + let set0_eth = ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }; + let vset0_eth = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Ethereum), session: Session(0) }; + + // Block 1: SetDecided for Bitcoin + let block1_hash = test.serai.make_block(1); + test.serai.set_events( + block1_hash, + vec![Event::ValidatorSets(validator_sets::Event::SetDecided { + set: vset0_btc, + validators: vec![ + (validator1, KeyShares(1)), + (validator2, KeyShares(2)), + (validator3, KeyShares(3)), + ], + })], + ); + + // Block 2: SetDecided for Ethereum with different validators + let block2_hash = test.serai.make_block(2); + test.serai.set_events( + block2_hash, + vec![Event::ValidatorSets(validator_sets::Event::SetDecided { + set: vset0_eth, + validators: vec![(validator1, KeyShares(2)), (validator2, KeyShares(3))], + })], + ); + + let mut task = test.into_intend_task(); + Test::assert_task_run_and_check_progress(&mut task, true).await; -fn random_crypto_key_pair(rng: &mut OsRng) -> KeyPair { - let substrate_public = Sr25519Keypair::generate_with(&mut *rng).public.to_bytes(); - let external_point = - Secp256k1SigningKey::random(&mut *rng).verifying_key().to_encoded_point(true); - let external_key = - ExternalKey(external_point.as_bytes().to_vec().try_into().expect("compressed key fits")); - KeyPair(Public(substrate_public), external_key) + // Verify validators are stored for each set + test.assert_validators_is_expected(set0_btc, Some(vec![validator1, validator2, validator3])); + test.assert_validators_is_expected(set0_eth, Some(vec![validator1, validator2])); + + // SetDecided alone doesn't make a block notable (only SetKeys does) -> HasEvents::No + test.assert_task_iterations_with_no_events_ran((1, 2)); } #[tokio::test] -async fn intend_fuzz_test() { - init_logger(); - - let mut rng = &mut OsRng; - - for iteration in 0..3 { - let network = ExternalNetworkId::Bitcoin; - let validators: Vec = (0..3).map(|_| random_serai_address(rng)).collect(); - - let mut serai = Serai::default(); - - let mut stake_tracker: HashMap = HashMap::new(); - let mut next_session: u32 = 0; - let mut stakes: HashMap<(ExternalNetworkId, SeraiAddress), u64> = HashMap::new(); - let mut decided_validators: HashMap> = HashMap::new(); - let mut latest_set: HashMap = HashMap::new(); - - let mut expected_block_events = vec![]; - let mut expected_sessions: Vec<([u8; 32], GlobalSession)> = vec![]; - let mut expected_session_last_block: HashMap<[u8; 32], u64> = HashMap::new(); - let mut expected_latest_global_session: Option<[u8; 32]> = None; - let mut expected_intents: HashMap> = HashMap::new(); - - let block_count = 8 + (rng.next_u32() % 5) as u64; - let forced_new_session_block = block_count / 2; - let forced_burn_after_new_session_block = forced_new_session_block + 1; - - for block_number in 1..=block_count { - let mut planned_stakes = stake_tracker.clone(); - let mut tx_events = vec![]; - - let adjustments = usize::try_from(rng.next_u32() % 3).unwrap_or(0); - for _ in 0..adjustments { - let should_allocate = - (rng.next_u32() % 2 == 0) || planned_stakes.values().all(|stake| *stake == 0); - - if should_allocate { - let validator = validators[(rng.next_u32() as usize) % validators.len()]; - let amount = (rng.next_u64() % 10) + 1; - *planned_stakes.entry(validator).or_default() += amount; - - tx_events.push(allocation_event(validator, NetworkId::External(network), amount)); - } else { - let available: Vec<_> = validators - .iter() - .copied() - .filter(|validator| planned_stakes.get(validator).copied().unwrap_or(0) > 0) - .collect(); - if let Some(validator) = available.get((rng.next_u32() as usize) % available.len()) { - let validator = *validator; - let current = planned_stakes[&validator]; - let amount = (rng.next_u64() % current).saturating_add(1); - planned_stakes.insert(validator, current - amount); - - tx_events.push(deallocation_event(validator, NetworkId::External(network), amount)); - } - } - } +async fn intend_handles_set_keys_event() { + let mut test = Test::new(); - let include_burn = block_number != 1 - && ((block_number == 2) - || (block_number == forced_burn_after_new_session_block) - || (rng.next_u32() % 3 == 0)); - - let is_initial_session = block_number == 1; // Session 0 - let is_random_session = (block_number > 2 && (rng.next_u32() % 10 == 0)); // 10% chance - let will_create_new_session = - (block_number == forced_new_session_block) || is_initial_session || is_random_session; - - if will_create_new_session { - let validator = { - let mut available = validators - .iter() - .copied() - .filter(|validator| planned_stakes.get(validator).copied().unwrap_or(0) > 0) - .collect::>(); - - if available.is_empty() { - let validator = validators[(rng.next_u32() as usize) % validators.len()]; - let top_up = (rng.next_u64() % 10) + 1; - - *planned_stakes.entry(validator).or_default() += top_up; - - tx_events.push(allocation_event(validator, NetworkId::External(network), top_up)); - validator - } else { - available[(rng.next_u32() as usize) % available.len()] - } - }; - - let session = Session(next_session.try_into().unwrap()); - let set = ExternalValidatorSet { network, session }; - let vset = ValidatorSet { network: NetworkId::External(network), session }; - - tx_events.push(set_decided_event(vset, validator)); - let key_pair = random_crypto_key_pair(rng); - - tx_events.push(set_keys_event_with_pair(set, &key_pair)); - next_session = next_session.saturating_add(1); - } + let validator1 = SeraiAddress([0x01; 32]); + let validator2 = SeraiAddress([0x02; 32]); - if include_burn { - let burn_from = validators[(rng.next_u32() as usize) % validators.len()]; + let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + let vset0 = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; + let set1 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(1) }; + let vset1 = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(1) }; - tx_events.push(burn_with_instruction_event(burn_from)); - } + // Block 1: First SetKeys (creates session 0) + let block1_hash = test.serai.make_block(1); + test.serai.set_events( + block1_hash, + vec![ + allocation_event(validator1, NetworkId::External(ExternalNetworkId::Bitcoin), 100), + allocation_event(validator2, NetworkId::External(ExternalNetworkId::Bitcoin), 200), + Event::ValidatorSets(validator_sets::Event::SetDecided { + set: vset0, + validators: vec![(validator1, KeyShares(1)), (validator2, KeyShares(2))], + }), + set_keys_event(set0, 1), + ], + ); - let block_hash = serai.make_block(block_number); - - serai.set_events(block_hash, tx_events.clone()); - - let mut has_set_keys = false; - let mut has_burn = false; - - for event in tx_events { - match event { - Event::ValidatorSets(validator_sets::Event::Allocation { - validator, - network, - amount, - }) => { - let Ok(network) = ExternalNetworkId::try_from(network) else { continue }; - let key = (network, validator); - *stakes.entry(key).or_default() += amount.0; - } - Event::ValidatorSets(validator_sets::Event::Deallocation { - validator, - network, - amount, - timeline: _, - }) => { - let Ok(network) = ExternalNetworkId::try_from(network) else { continue }; - let key = (network, validator); - let stake = stakes.get_mut(&key).expect("deallocating missing stake"); - assert!(*stake >= amount.0, "deallocation underflow in expected model"); - *stake -= amount.0; - } - Event::ValidatorSets(validator_sets::Event::SetDecided { - set, - validators: event_validators, - }) => { - let Ok(set) = ExternalValidatorSet::try_from(set) else { continue }; - decided_validators - .insert(set, event_validators.iter().map(|(validator, _)| *validator).collect()); - } - Event::ValidatorSets(validator_sets::Event::SetKeys { set, key_pair }) => { - has_set_keys = true; - let validators = - decided_validators.remove(&set).expect("set which wasn't decided set keys"); - let mut total_stake = 0; - for validator in validators { - total_stake += stakes.get(&(set.network, validator)).copied().unwrap_or(0); - } - latest_set.insert(set.network, (set.session, key_pair.0, total_stake)); - } - Event::Coins(coins::Event::BurnWithInstruction { .. }) => has_burn = true, - _ => {} - } - } + // Block 2: Second SetKeys (creates session 1) + let block2_hash = test.serai.make_block(2); + test.serai.set_events( + block2_hash, + vec![ + Event::ValidatorSets(validator_sets::Event::SetDecided { + set: vset1, + validators: vec![(validator1, KeyShares(2)), (validator2, KeyShares(1))], + }), + set_keys_event(set1, 2), + ], + ); - let mut has_events = if has_set_keys { - HasEvents::Notable - } else if has_burn { - HasEvents::NonNotable - } else { - HasEvents::No - }; - - let global_session_for_this_block = expected_latest_global_session; - - if has_events == HasEvents::Notable { - let mut sets = vec![]; - let mut keys = HashMap::new(); - let mut session_stakes = HashMap::new(); - let mut total_stake = 0; - - for network in ExternalNetworkId::all() { - if let Some((session, key, stake)) = latest_set.get(&network).copied() { - let set = ExternalValidatorSet { network, session }; - sets.push(set); - keys.insert(network, key); - session_stakes.insert(network, stake); - total_stake += stake; - } - } - - assert!(total_stake > 0, "cosigning sets for block #{block_number} had 0 stake in total"); - - let global_session = GlobalSession { - start_block_number: block_number + 1, - sets: sets.clone(), - keys, - stakes: session_stakes, - total_stake, - }; - let session_id = GlobalSession::id(sets); - if let Some(existing) = global_session_for_this_block { - expected_session_last_block.insert(existing, block_number); - } - expected_latest_global_session = Some(session_id); - expected_sessions.push((session_id, global_session)); - } + let mut task = test.into_intend_task(); + Test::assert_task_run_and_check_progress(&mut task, true).await; - if global_session_for_this_block.is_none() { - has_events = HasEvents::No; - } + let expected_set = Set { session: Session(1), key: Public([2u8; 32]), stake: Amount(300) }; + test.assert_latest_set_is_expected(ExternalNetworkId::Bitcoin, Some(&expected_set)); - if matches!(has_events, HasEvents::Notable | HasEvents::NonNotable) { - if let Some(global_session) = global_session_for_this_block { - let session = expected_sessions - .iter() - .find(|(session_id, _)| *session_id == global_session) - .map(|(_, session)| session) - .expect("global session missing from expected state"); - for set in &session.sets { - let intent = CosignIntent { - global_session, - block_number, - block_hash, - notable: has_events == HasEvents::Notable, - }; - expected_intents.entry(*set).or_default().push(intent); - } - } - } + test.assert_validators_is_expected(set0, None); + test.assert_validators_is_expected(set1, None); - expected_block_events.push(BlockEventData { block_number, has_events }); + // Block 1: First notable block (no prior session) -> HasEvents::No + let (session0_id, _) = test.assert_task_iteration_per_block_with_notable_events_ran(1, None); - stake_tracker = planned_stakes; - } + // Block 2: Second notable block (prior session exists) -> HasEvents::Notable + test.assert_task_iteration_per_block_with_notable_events_ran(2, Some(session0_id)); - let blocks_by_number = serai.blocks_by_number.clone(); + test.assert_scan_cosign_from_is_expected(3); +} - let mut env = TestEnvironment::from_serai(serai); +#[tokio::test] +async fn intend_handles_burn_with_instruction_events() { + let mut test = Test::new(); - let mut task = env.into_task(); + let validator1 = SeraiAddress([0x01; 32]); + let validator2 = SeraiAddress([0x02; 32]); - task.run_iteration().await.unwrap(); + let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + let vset0 = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; - let latest_block = block_count; + // Block 1: Create a session (first notable block, treated as No because no prior session) + let allocations_block1 = + [(validator1, ExternalNetworkId::Bitcoin, 100), (validator2, ExternalNetworkId::Bitcoin, 200)]; + let block1_hash = test.serai.make_block(1); + let mut events = events_from_allocations(&allocations_block1); + events.push(Event::ValidatorSets(validator_sets::Event::SetDecided { + set: vset0, + validators: vec![(validator1, KeyShares(1)), (validator2, KeyShares(2))], + })); + events.push(set_keys_event(set0, 1)); + test.serai.set_events(block1_hash, events); + + // Block 2: Burn event makes block NonNotable (with additional allocations) + let allocations_block2 = [(validator1, ExternalNetworkId::Bitcoin, 50)]; + let block2_hash = test.serai.make_block(2); + let mut events2 = events_from_allocations(&allocations_block2); + events2.push(burn_with_instruction_event(validator1)); + test.serai.set_events(block2_hash, events2); + + let mut task = test.into_intend_task(); + Test::assert_task_run_and_check_progress(&mut task, true).await; + + test.assert_stakes_is_expected(ExternalNetworkId::Bitcoin, validator1, Some(Amount(150))); + test.assert_stakes_is_expected(ExternalNetworkId::Bitcoin, validator2, Some(Amount(200))); + + // Block 1: First notable block (no prior session, treated as No) + test.assert_task_iteration_per_block(1); + test + .assert_block_events_is_expected(BlockEventData { block_number: 1, has_events: HasEvents::No }); + + // Block 2: NonNotable (has burn event, session exists from block 1) + test.assert_task_iteration_per_block_with_non_notable_events_ran(2); +} - let scan_cosign_from = ScanCosignFrom::get(&env.db); - assert_eq!(scan_cosign_from, Some(latest_block + 1)); +#[tokio::test] +async fn intend_ignores_non_validator_sets_events() { + let mut test = Test::new(); - let mut txn = env.db.txn(); + let vset0 = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; + let vset1 = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Ethereum), session: Session(0) }; - let mut block_events = vec![]; - while let Some(event) = BlockEvents::try_recv(&mut txn) { - block_events.push(event); - } + // Block 1: System event (outer _ => continue) and AcceptedHandover (inner _ => continue) + let block1_hash = test.serai.make_block(1); + test.serai.set_events( + block1_hash, + vec![ + Event::System(system::Event::TransactionSuccess), + Event::ValidatorSets(validator_sets::Event::AcceptedHandover { set: vset0 }), + ], + ); - assert_eq!(block_events.len(), expected_block_events.len()); + // Block 2: More ignored events on different network + let block2_hash = test.serai.make_block(2); + test.serai.set_events( + block2_hash, + vec![ + Event::System(system::Event::TransactionSuccess), + Event::ValidatorSets(validator_sets::Event::AcceptedHandover { set: vset1 }), + ], + ); - for (idx, (actual, expected)) in block_events.iter().zip(&expected_block_events).enumerate() { - assert_eq!(actual.block_number, expected.block_number); - assert_eq!(actual.has_events, expected.has_events); - } + let mut task = test.into_intend_task(); + Test::assert_task_run_and_check_progress(&mut task, true).await; - for (block_number, block) in &blocks_by_number { - let stored_hash = SubstrateBlockHash::get(&txn, *block_number); - let expected_hash = Some(block.header.hash()); - assert_eq!(stored_hash, expected_hash); - } + // Both blocks have only ignored events -> HasEvents::No + test.assert_task_iterations_with_no_events_ran((1, 2)); +} - let mut sessions_from_channel = vec![]; - while let Some(entry) = GlobalSessionsChannel::try_recv(&mut txn) { - sessions_from_channel.push(entry); - } +#[tokio::test] +async fn intend_ignores_serai_network_events() { + let mut test = Test::new(); - assert_eq!(sessions_from_channel.len(), expected_sessions.len()); - for (idx, ((actual_id, actual_session), (expected_id, expected_session))) in - sessions_from_channel.iter().zip(&expected_sessions).enumerate() - { - assert_eq!(actual_id, expected_id); - assert_global_session(actual_session, expected_session); - } + let validator = SeraiAddress([0x01; 32]); - for (session_id, expected_session) in &expected_sessions { - let stored = GlobalSessions::get(&txn, *session_id).expect("missing stored global session"); - assert_global_session(&stored, expected_session); - let expected_last_block = expected_session_last_block.get(session_id).copied(); - let stored_last_block = GlobalSessionsLastBlock::get(&txn, *session_id); - assert_eq!(stored_last_block, expected_last_block); - } - let latest_intended = LatestGlobalSessionIntended::get(&txn); - assert_eq!(latest_intended, expected_latest_global_session); + let vset_serai = ValidatorSet { network: NetworkId::Serai, session: Session(0) }; + + // Block 1: Allocation and Deallocation with NetworkId::Serai + let block1_hash = test.serai.make_block(1); + test.serai.set_events( + block1_hash, + vec![ + // Allocation with Serai network -> continue (line 154) + allocation_event(validator, NetworkId::Serai, 100), + // Deallocation with Serai network -> continue (line 159) + deallocation_event(validator, NetworkId::Serai, 50), + ], + ); - let all_sets: HashSet<_> = - expected_sessions.iter().flat_map(|(_, session)| session.sets.iter().copied()).collect(); + // Block 2: SetDecided with NetworkId::Serai + let block2_hash = test.serai.make_block(2); + test.serai.set_events( + block2_hash, + vec![Event::ValidatorSets(validator_sets::Event::SetDecided { + set: vset_serai, + validators: vec![(validator, KeyShares(1))], + })], + ); - for set in all_sets { - let mut actual = vec![]; - while let Some(intent) = IntendedCosigns::try_recv(&mut txn, set) { - actual.push(intent); - } - let expected = expected_intents.get(&set).cloned().unwrap_or_default(); - assert_eq!(actual, expected, "intents mismatch for set {:?}", set); - } + let mut task = test.into_intend_task(); + Test::assert_task_run_and_check_progress(&mut task, true).await; - txn.commit(); - } + // Verify no stakes were recorded for Serai network (allocations were ignored) + // Stakes::get only works with ExternalNetworkId, so we can't directly check Serai + // But we can verify the blocks were processed with no notable events + test.assert_task_iterations_with_no_events_ran((1, 2)); } diff --git a/coordinator/cosign/src/tests/mod.rs b/coordinator/cosign/src/tests/mod.rs index eceddb9b2..f9632ffe9 100644 --- a/coordinator/cosign/src/tests/mod.rs +++ b/coordinator/cosign/src/tests/mod.rs @@ -1,29 +1,20 @@ #[cfg(test)] -mod delay; +mod intend; #[cfg(test)] -mod intend; +mod delay; use blake2::{Digest, Blake2b256}; +use serai_task::ContinuallyRan; use core::future::Future; use std::{ collections::{HashMap, HashSet}, - sync::{ - atomic::{AtomicBool, AtomicUsize, Ordering}, - Arc, OnceLock, - }, }; -use rand_core::{OsRng, RngCore}; - -use schnorrkel::{ExpansionMode, Keypair, MiniSecretKey}; - use serai_client_serai::{ abi::{ primitives::{ - crypto::Public, merkle::{IncrementalUnbalancedMerkleTree, UnbalancedMerkleTree}, - network_id::ExternalNetworkId, BlockHash, }, Block, Event, Header, HeaderV1, BLOCK_HEADER_BRANCH_TAG, BLOCK_HEADER_LEAF_TAG, @@ -32,58 +23,15 @@ use serai_client_serai::{ }; use crate::{ - SeraiRpc, - intend::{CosignIntendTask}, - COSIGN_CONTEXT, Cosign, SignedCosign, + COSIGN_CONTEXT, Cosign, SeraiRpc, SignedCosign, delay::CosignDelayTask, intend::CosignIntendTask, }; use serai_db::MemDb; -struct TestLogger; - -static LOG_ENABLED: AtomicBool = AtomicBool::new(true); - -impl log::Log for TestLogger { - fn enabled(&self, _metadata: &log::Metadata<'_>) -> bool { - LOG_ENABLED.load(Ordering::Relaxed) - } - - fn log(&self, _record: &log::Record<'_>) {} - - fn flush(&self) {} -} - -fn init_logger() { - static LOGGER: TestLogger = TestLogger; - static INIT: OnceLock<()> = OnceLock::new(); - INIT.get_or_init(|| { - let _ = log::set_logger(&LOGGER); - log::set_max_level(log::LevelFilter::Trace); - }); -} - -pub(crate) fn cosign_fixture(seed: [u8; 32], cosigner: ExternalNetworkId) -> Cosign { - let block_number = u64::from_le_bytes(seed[..8].try_into().unwrap()); - let block_hash = seed.map(|b| b ^ 0xAA); - - Cosign { global_session: seed, block_number, block_hash: BlockHash(block_hash), cosigner } -} - -pub(crate) fn keypair_from_seed(seed: [u8; 32]) -> Keypair { - MiniSecretKey::from_bytes(&seed) - .expect("test seeds should always create a keypair") - .expand_to_keypair(ExpansionMode::Uniform) -} - pub(crate) fn sr25519_fixture() -> schnorrkel::Keypair { - let mut seed = [0u8; 32]; - - loop { - OsRng.fill_bytes(&mut seed); - if let Ok(mini) = schnorrkel::MiniSecretKey::from_bytes(&seed) { - let keypair = mini.expand_to_keypair(schnorrkel::ExpansionMode::Ed25519); - break keypair; - } - } + // Use a fixed seed to ensure deterministic keypairs across test calls. + let seed = [42u8; 32]; + let mini = schnorrkel::MiniSecretKey::from_bytes(&seed).expect("fixed seed should be valid"); + mini.expand_to_keypair(schnorrkel::ExpansionMode::Ed25519) } pub(crate) fn sign_cosign(cosign: Cosign, keypair: &schnorrkel::Keypair) -> SignedCosign { @@ -91,21 +39,10 @@ pub(crate) fn sign_cosign(cosign: Cosign, keypair: &schnorrkel::Keypair) -> Sign SignedCosign { cosign, signature: sig.to_bytes() } } -pub(crate) fn signed_cosign_fixture( - seed: [u8; 32], - cosigner: ExternalNetworkId, -) -> (SignedCosign, Public) { - let cosign = cosign_fixture(seed, cosigner); - let keypair = keypair_from_seed(seed.map(|b| b ^ 0x55)); - let signature = keypair.sign_simple(COSIGN_CONTEXT, &cosign.signature_message()); - - (SignedCosign { cosign, signature: signature.to_bytes() }, Public(keypair.public.to_bytes())) -} - #[derive(Clone)] pub(crate) struct Serai { - pub(crate) block_by_number_error: Option, - pub(crate) events_error: Option, + pub(crate) block_by_number_error: HashMap, + pub(crate) events_error: HashMap, pub(crate) blocks_by_number: HashMap, pub(crate) events_by_hash: HashMap, pub(crate) builds_upon: IncrementalUnbalancedMerkleTree, @@ -115,8 +52,8 @@ pub(crate) struct Serai { impl Default for Serai { fn default() -> Self { Self { - block_by_number_error: None, - events_error: None, + block_by_number_error: HashMap::new(), + events_error: HashMap::new(), blocks_by_number: HashMap::new(), events_by_hash: HashMap::new(), builds_upon: IncrementalUnbalancedMerkleTree::new(), @@ -134,6 +71,14 @@ impl Serai { self.missing_blocks.insert(block_number); } + pub(crate) fn set_block_error(&mut self, block_number: u64, error: &str) { + self.block_by_number_error.insert(block_number, error.to_string()); + } + + pub(crate) fn set_events_error(&mut self, block_hash: BlockHash, error: &str) { + self.events_error.insert(block_hash, error.to_string()); + } + pub(crate) fn make_block(&mut self, number: u64) -> BlockHash { let block = Block { header: Header::V1(HeaderV1 { @@ -156,24 +101,18 @@ impl Serai { .into(), ); - if number > 0u64 { - self.blocks_by_number.insert(number, block); - } + self.blocks_by_number.insert(number, block); block_hash } - pub(crate) fn new_events(&mut self, block_hash: BlockHash) { + pub(crate) fn initialize_empty_events(&mut self, block_hash: BlockHash) { self.events_by_hash = HashMap::from([(block_hash, Events::new())]); } pub(crate) fn set_events(&mut self, block_hash: BlockHash, events: Vec) { self.events_by_hash.insert(block_hash, Events::with(events)); } - - pub(crate) fn builds_upon(&self) -> &IncrementalUnbalancedMerkleTree { - &self.builds_upon - } } impl SeraiRpc for Serai { @@ -186,7 +125,7 @@ impl SeraiRpc for Serai { &self, block: u64, ) -> impl Send + Future, String>> { - let err = self.block_by_number_error.clone(); + let err = self.block_by_number_error.get(&block).cloned(); let block_entry = self.blocks_by_number.get(&block).cloned(); let is_missing = self.missing_blocks.contains(&block); @@ -202,7 +141,7 @@ impl SeraiRpc for Serai { } fn events(&self, block: BlockHash) -> impl Send + Future> { - let err = self.events_error.clone(); + let err = self.events_error.get(&block).cloned(); let events = self.events_by_hash.get(&block).cloned().unwrap_or_default(); async move { if let Some(e) = err { @@ -213,27 +152,45 @@ impl SeraiRpc for Serai { } } -pub(crate) struct TestEnvironment { +pub(crate) struct Test { pub(crate) serai: Serai, pub(crate) db: MemDb, } -impl Default for TestEnvironment { +impl Default for Test { fn default() -> Self { Self { serai: Serai::new(), db: MemDb::new() } } } -impl TestEnvironment { +impl Test { pub(crate) fn new() -> Self { Self::default() } + #[allow(dead_code)] pub(crate) fn from_serai(serai: Serai) -> Self { Self { serai, db: MemDb::new() } } - pub(crate) fn into_task(&self) -> CosignIntendTask { + fn into_intend_task(&self) -> CosignIntendTask { CosignIntendTask { db: self.db.clone(), serai: self.serai.clone() } } + + fn into_delay_task(&self) -> CosignDelayTask { + CosignDelayTask { db: self.db.clone() } + } + + pub(crate) async fn assert_task_run_and_check_progress( + task: &mut impl ContinuallyRan, + made_progress: bool, + ) { + assert_eq!(task.run_iteration().await.unwrap(), made_progress); + } + + pub(crate) async fn assert_task_failed(task: &mut impl ContinuallyRan, error: &str) { + let err = task.run_iteration().await.unwrap_err(); + let err_str = format!("{err:?}"); + assert!(err_str.contains(error), "{err_str}"); + } } From ac32f6d81fd8085824c0a1958c546e568f9f14ea Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Fri, 26 Dec 2025 13:25:25 -0300 Subject: [PATCH 05/71] fix: the txn commit behavior on delay and even better tests --- coordinator/cosign/src/delay.rs | 19 +-- coordinator/cosign/src/lib.rs | 4 +- coordinator/cosign/src/tests/delay.rs | 179 ++++++++++++++------------ coordinator/cosign/src/tests/mod.rs | 12 +- 4 files changed, 114 insertions(+), 100 deletions(-) diff --git a/coordinator/cosign/src/delay.rs b/coordinator/cosign/src/delay.rs index 41fcf6fd8..476e9cbf6 100644 --- a/coordinator/cosign/src/delay.rs +++ b/coordinator/cosign/src/delay.rs @@ -34,29 +34,34 @@ impl ContinuallyRan for CosignDelayTask { fn run_iteration(&mut self) -> impl Send + Future> { async move { let mut made_progress = false; + loop { let mut txn = self.db.txn(); + // Every loop iteration consumes a CosignedBlocks queue message let cosigned_block = CosignedBlocks::try_recv(&mut txn); - txn.commit(); let Some((block_number, time_evaluated)) = cosigned_block else { + txn.commit(); + // Stop when no blocks in queue break; }; if block_number == 0u64 { - return Ok(false); + txn.commit(); + continue; } // If we've already acknowledged a later block, consume and skip (don't wait). - let already_cosigned = LatestCosignedBlockNumber::get(&self.db).unwrap_or(0); + let already_cosigned = LatestCosignedBlockNumber::get(&txn).unwrap_or(0); if block_number <= already_cosigned { - made_progress = true; + txn.commit(); continue; } // Calculate when we should mark it as valid, checking for overflow to avoid panic let time_evaluated_duration = Duration::from_secs(time_evaluated); let Some(time_valid) = time_evaluated_duration.checked_add(ACKNOWLEDGEMENT_DELAY) else { + txn.commit(); return Err(format!( "time_evaluated ({time_evaluated}) would overflow when adding ACKNOWLEDGEMENT_DELAY" )); @@ -66,15 +71,13 @@ impl ContinuallyRan for CosignDelayTask { // If the time valid is greater than the current time, // sleep until the time valid is reached if time_valid > now { - // Sleep until then (no transaction held during sleep) + // Sleep until then (no db transaction held during sleep) tokio::time::sleep(time_valid.saturating_sub(now)).await; } - // Atomically consume the message AND update the cosigned block number - let mut txn = self.db.txn(); LatestCosignedBlockNumber::set(&mut txn, &block_number); - txn.commit(); + txn.commit(); made_progress = true; } diff --git a/coordinator/cosign/src/lib.rs b/coordinator/cosign/src/lib.rs index 0622333eb..e77edb73d 100644 --- a/coordinator/cosign/src/lib.rs +++ b/coordinator/cosign/src/lib.rs @@ -4,7 +4,9 @@ #![allow(clippy::std_instead_of_alloc, clippy::std_instead_of_core)] use core::{fmt::Debug, future::Future}; -use std::{collections::HashMap, time::Instant}; +use std::{collections::HashMap, sync::Arc, time::Instant}; + +use serai_client_serai::Serai; use blake2::{Digest as _, Blake2s256}; diff --git a/coordinator/cosign/src/tests/delay.rs b/coordinator/cosign/src/tests/delay.rs index 01bbd4ecc..d7c3cc4cf 100644 --- a/coordinator/cosign/src/tests/delay.rs +++ b/coordinator/cosign/src/tests/delay.rs @@ -1,31 +1,48 @@ -use std::time::{Duration, SystemTime}; +use std::time::Duration; use crate::{ - LatestCosignedBlockNumber, delay::ACKNOWLEDGEMENT_DELAY, evaluator::CosignedBlocks, tests::Test, + LatestCosignedBlockNumber, + delay::{ACKNOWLEDGEMENT_DELAY, CosignDelayTask, now_timestamp}, + evaluator::CosignedBlocks, + tests::Test, }; +fn now_secs() -> u64 { + now_timestamp().as_secs() +} + use serai_db::{Db as _, DbTxn as _}; use serai_task::ContinuallyRan; -fn now_timestamp() -> u64 { - SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap_or(Duration::ZERO).as_secs() -} - impl Test { + fn into_delay_task(&self) -> impl ContinuallyRan + 'static { + CosignDelayTask { db: self.db.clone() } + } + // Assert CosignedBlocks queue items have been consumed after task run - fn assert_queue_empty(&self) { + fn assert_queue_is_empty(&self) { assert!(CosignedBlocks::peek(&self.db).is_none(), "expected queue to be empty"); } + fn assert_queue_is_not_empty(&self) { + assert!(CosignedBlocks::peek(&self.db).is_some(), "expected queue to not be empty"); + } + // Assert LatestCosignedBlockNumber db points to latest block number after task run - fn assert_latest_cosigned_block_number(&self, block_number: Option) { + fn assert_latest_cosigned_block_number_is_expected(&self, block_number: Option) { assert_eq!(LatestCosignedBlockNumber::get(&self.db), block_number); } // Assert everything that changed or should have changed after a task iteration run - fn assert_task_iteration(&self, latest_cosigned_block_number: Option) { - self.assert_latest_cosigned_block_number(latest_cosigned_block_number); - self.assert_queue_empty(); + fn assert_task_iteration_returns(&self, latest_cosigned_block_number: Option) { + self.assert_latest_cosigned_block_number_is_expected(latest_cosigned_block_number); + self.assert_queue_is_empty(); + } + + // Assert everything that changed or should have changed after a task iteration failure + fn assert_task_iteration_fails(&self, latest_cosigned_block_number: Option) { + self.assert_latest_cosigned_block_number_is_expected(latest_cosigned_block_number); + self.assert_queue_is_not_empty(); } } @@ -41,11 +58,15 @@ async fn delay_task_returns_false_with_genesis_block() { { let mut txn = test.db.txn(); - CosignedBlocks::send(&mut txn, &(0u64, now_timestamp())); + CosignedBlocks::send(&mut txn, &(0u64, now_secs())); txn.commit(); } let mut task = test.into_delay_task(); + + // let already_cosigned = LatestCosignedBlockNumber::get(&self.db).unwrap_or(0); + // the already_cosigned block number always defaults to 0, so "genesis" + // is always considered cosigned, made_progress returns false Test::assert_task_run_and_check_progress(&mut task, false).await; } @@ -55,71 +76,78 @@ async fn delay_task_updates_latest_cosigned_block_number() { { let mut txn = test.db.txn(); - CosignedBlocks::send(&mut txn, &(1u64, now_timestamp())); + CosignedBlocks::send(&mut txn, &(0u64, now_secs())); + CosignedBlocks::send(&mut txn, &(1u64, now_secs())); + CosignedBlocks::send(&mut txn, &(2u64, now_secs())); txn.commit(); } let mut task = test.into_delay_task(); Test::assert_task_run_and_check_progress(&mut task, true).await; - - test.assert_task_iteration(Some(1u64)); + test.assert_task_iteration_returns(Some(2u64)); } #[tokio::test] -async fn delay_task_drains_multiple_messages_in_one_iteration() { +async fn delay_task_does_not_regress_and_skips_if_not_a_later_block() { let mut test = Test::new(); - let now = now_timestamp(); { let mut txn = test.db.txn(); - CosignedBlocks::send(&mut txn, &(1u64, now)); - CosignedBlocks::send(&mut txn, &(2u64, now)); - CosignedBlocks::send(&mut txn, &(3u64, now)); + CosignedBlocks::send(&mut txn, &(1u64, now_secs())); + CosignedBlocks::send(&mut txn, &(2u64, now_secs())); + + // Sent out of order below + CosignedBlocks::send(&mut txn, &(4u64, now_secs())); + CosignedBlocks::send(&mut txn, &(3u64, now_secs())); txn.commit(); } let mut task = test.into_delay_task(); Test::assert_task_run_and_check_progress(&mut task, true).await; - test.assert_task_iteration(Some(3u64)); -} + // Queue order: 1, 2, 4, 3 + // Block 1, 2 and 4 processed, block 3 skipped (3 < 4) -#[tokio::test] -async fn delay_task_does_not_regress_and_skips_wait_for_stale_messages() { - let mut test = Test::new(); - let now = now_timestamp(); + // This won't actually happen but it needs to be tested that it does what it is + // meant to do, which is that if we've already acknowledged a later block, consume and skip + test.assert_task_iteration_returns(Some(4u64)); { let mut txn = test.db.txn(); - CosignedBlocks::send(&mut txn, &(1u64, now)); - CosignedBlocks::send(&mut txn, &(2u64, now)); - CosignedBlocks::send(&mut txn, &(4u64, now)); + // Sends the same previous block number + CosignedBlocks::send(&mut txn, &(4u64, now_secs())); txn.commit(); } + let mut task = test.into_delay_task(); + // No progress following the previously set LatestCosignedBlockNumber was made, + // made_progress returns false + Test::assert_task_run_and_check_progress(&mut task, false).await; + test.assert_task_iteration_returns(Some(4u64)); + { let mut txn = test.db.txn(); - CosignedBlocks::send(&mut txn, &(3u64, now)); + // Sends the same previous block number + CosignedBlocks::send(&mut txn, &(4u64, now_secs())); + // This time ensure progress is made beyond 4 + CosignedBlocks::send(&mut txn, &(5u64, now_secs())); txn.commit(); } let mut task = test.into_delay_task(); + // Had a duplicate, but made 1 block worth of progress + // made_progress returns true Test::assert_task_run_and_check_progress(&mut task, true).await; - - // Queue order: 1, 2, 4, 3 - // Block 1 processed (1 > 0), Block 2 processed (2 > 1), - // Block 4 processed (4 > 2), Block 3 skipped (3 <= 4) - test.assert_task_iteration(Some(4u64)); + test.assert_task_iteration_returns(Some(5u64)); } #[tokio::test] async fn delay_task_does_not_ack_before_acknowledgement_delay() { let mut test = Test::new(); - let now = now_timestamp(); { let mut txn = test.db.txn(); - CosignedBlocks::send(&mut txn, &(1u64, now)); + CosignedBlocks::send(&mut txn, &(1u64, now_secs())); txn.commit(); } @@ -128,17 +156,22 @@ async fn delay_task_does_not_ack_before_acknowledgement_delay() { // Give the task a moment to start and reach the sleep tokio::time::sleep(Duration::from_millis(50)).await; - test.assert_latest_cosigned_block_number(None); - // Sleep for most of (but not all) the acknowledgement delay - should still not be set + // Still nothing is returned + test.assert_latest_cosigned_block_number_is_expected(None); + + // Sleep for most of (but not all) the acknowledgement delay tokio::time::sleep(ACKNOWLEDGEMENT_DELAY - Duration::from_secs(1)).await; - test.assert_latest_cosigned_block_number(None); - // Wait for the task to complete + // Still nothing is returned + test.assert_latest_cosigned_block_number_is_expected(None); + + // Wait for the task to actually complete let result = handle.await.unwrap(); assert_eq!(result, true); - test.assert_task_iteration(Some(1u64)); + // Now has a result + test.assert_task_iteration_returns(Some(1u64)); } #[tokio::test] @@ -152,12 +185,13 @@ async fn delay_task_with_zero_timestamp_processes_immediately() { } let mut task = test.into_delay_task(); + // This should complete immediately without sleeping - // Since now > 0 + ACKNOWLEDGEMENT_DELAY, - // time_valid < now (already valid), so no sleep occurs + // Since 0 as timestamp will always be an older date than the current time as timestamp + // and since the ACK time is considered to be passed, there is no sleep time to do Test::assert_task_run_and_check_progress(&mut task, true).await; - test.assert_task_iteration(Some(1u64)); + test.assert_task_iteration_returns(Some(1u64)); } #[tokio::test] @@ -166,20 +200,20 @@ async fn delay_task_with_max_timestamp_returns_error() { { let mut txn = test.db.txn(); - CosignedBlocks::send(&mut txn, &(1u64, u64::MAX)); + CosignedBlocks::send(&mut txn, &(0u64, now_secs())); + CosignedBlocks::send(&mut txn, &(1u64, now_secs())); + CosignedBlocks::send(&mut txn, &(2u64, u64::MAX)); + CosignedBlocks::send(&mut txn, &(3u64, now_secs())); txn.commit(); } let mut task = test.into_delay_task(); - let result = task.run_iteration().await; // When timestamp is u64::MAX, adding ACKNOWLEDGEMENT_DELAY would overflow // The task should return an error instead of panicking - assert!(result.is_err()); - assert!(result.unwrap_err().contains("overflow")); + Test::assert_task_failed(&mut task, "overflow").await; - // The block should not have been acknowledged - test.assert_task_iteration(None); + test.assert_task_iteration_fails(Some(1u64)); } #[tokio::test] @@ -187,11 +221,18 @@ async fn delay_task_with_far_future_timestamp_hangs() { // A timestamp far in the future (but not MAX to avoid overflow) // will cause the task to sleep for an extremely long time let mut test = Test::new(); - let far_future = now_timestamp() + 1_000_000; { let mut txn = test.db.txn(); - CosignedBlocks::send(&mut txn, &(1u64, far_future)); + // Use timestamp 0 for blocks 0 and 1 so they process immediately + // (time_valid = 0 + ACKNOWLEDGEMENT_DELAY is already in the past) + CosignedBlocks::send(&mut txn, &(0u64, 0u64)); + CosignedBlocks::send(&mut txn, &(1u64, 0u64)); + + let far_future = now_secs() + 1_000_000; + CosignedBlocks::send(&mut txn, &(2u64, far_future)); + + CosignedBlocks::send(&mut txn, &(3u64, 0u64)); txn.commit(); } @@ -202,35 +243,13 @@ async fn delay_task_with_far_future_timestamp_hangs() { assert!(result.is_err(), "Expected timeout, but task completed"); - // The block should not have been acknowledged since we timed out - test.assert_task_iteration(None); -} - -#[tokio::test] -async fn delay_task_increasing_blocks_with_increasing_timestamps() { - let mut test = Test::new(); - let base_time = now_timestamp(); - - { - let mut txn = test.db.txn(); - CosignedBlocks::send(&mut txn, &(1u64, base_time)); - CosignedBlocks::send(&mut txn, &(2u64, base_time + 1)); - CosignedBlocks::send(&mut txn, &(3u64, base_time + 2)); - txn.commit(); - } - - let mut task = test.into_delay_task(); - Test::assert_task_run_and_check_progress(&mut task, true).await; - - test.assert_task_iteration(Some(3u64)); + test.assert_task_iteration_fails(Some(1u64)); } #[tokio::test] async fn delay_task_increasing_blocks_with_decreasing_timestamps() { - // This simulates a scenario where later blocks were evaluated earlier - // (e.g., due to clock skew) let mut test = Test::new(); - let base_time = now_timestamp(); + let base_time = now_secs(); { let mut txn = test.db.txn(); @@ -243,7 +262,7 @@ async fn delay_task_increasing_blocks_with_decreasing_timestamps() { let mut task = test.into_delay_task(); Test::assert_task_run_and_check_progress(&mut task, true).await; - // All blocks should still be processed in order, ending with block 3 - // Even though block 3 has an earlier timestamp, it processes after block 1 and 2 - test.assert_task_iteration(Some(3u64)); + // nothing unusual happens, the task follow block numbers + // timestamps could be out of order + test.assert_task_iteration_returns(Some(3u64)); } diff --git a/coordinator/cosign/src/tests/mod.rs b/coordinator/cosign/src/tests/mod.rs index f9632ffe9..3cfceb7e4 100644 --- a/coordinator/cosign/src/tests/mod.rs +++ b/coordinator/cosign/src/tests/mod.rs @@ -22,9 +22,7 @@ use serai_client_serai::{ Events, }; -use crate::{ - COSIGN_CONTEXT, Cosign, SeraiRpc, SignedCosign, delay::CosignDelayTask, intend::CosignIntendTask, -}; +use crate::{COSIGN_CONTEXT, Cosign, SeraiRpc, SignedCosign}; use serai_db::MemDb; pub(crate) fn sr25519_fixture() -> schnorrkel::Keypair { @@ -173,14 +171,6 @@ impl Test { Self { serai, db: MemDb::new() } } - fn into_intend_task(&self) -> CosignIntendTask { - CosignIntendTask { db: self.db.clone(), serai: self.serai.clone() } - } - - fn into_delay_task(&self) -> CosignDelayTask { - CosignDelayTask { db: self.db.clone() } - } - pub(crate) async fn assert_task_run_and_check_progress( task: &mut impl ContinuallyRan, made_progress: bool, From ce6a48eb1535d36b577254ed4b542574996d3809 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Mon, 29 Dec 2025 10:39:23 -0300 Subject: [PATCH 06/71] feat(cosign): more delay improvements & refactor test into traits --- coordinator/cosign/src/delay.rs | 7 +- coordinator/cosign/src/tests/delay.rs | 170 +++++++++++++++++--------- coordinator/cosign/src/tests/mod.rs | 25 +--- 3 files changed, 118 insertions(+), 84 deletions(-) diff --git a/coordinator/cosign/src/delay.rs b/coordinator/cosign/src/delay.rs index 476e9cbf6..ee0ea6d3f 100644 --- a/coordinator/cosign/src/delay.rs +++ b/coordinator/cosign/src/delay.rs @@ -2,7 +2,7 @@ use core::future::Future; use std::time::{Duration, SystemTime}; use serai_db::*; -use serai_task::ContinuallyRan; +use serai_task::{DoesNotError, ContinuallyRan}; use crate::evaluator::CosignedBlocks; @@ -41,7 +41,8 @@ impl ContinuallyRan for CosignDelayTask { let cosigned_block = CosignedBlocks::try_recv(&mut txn); let Some((block_number, time_evaluated)) = cosigned_block else { - txn.commit(); + // Queue was empty -> nothing to commit + drop(txn); // Stop when no blocks in queue break; }; @@ -71,7 +72,7 @@ impl ContinuallyRan for CosignDelayTask { // If the time valid is greater than the current time, // sleep until the time valid is reached if time_valid > now { - // Sleep until then (no db transaction held during sleep) + // Sleep until then tokio::time::sleep(time_valid.saturating_sub(now)).await; } diff --git a/coordinator/cosign/src/tests/delay.rs b/coordinator/cosign/src/tests/delay.rs index d7c3cc4cf..b381d7265 100644 --- a/coordinator/cosign/src/tests/delay.rs +++ b/coordinator/cosign/src/tests/delay.rs @@ -4,42 +4,65 @@ use crate::{ LatestCosignedBlockNumber, delay::{ACKNOWLEDGEMENT_DELAY, CosignDelayTask, now_timestamp}, evaluator::CosignedBlocks, - tests::Test, + tests::{IntoTask, Test}, }; +use serai_db::{Db as _, DbTxn as _, MemDb}; +use serai_task::ContinuallyRan; + fn now_secs() -> u64 { now_timestamp().as_secs() } -use serai_db::{Db as _, DbTxn as _}; -use serai_task::ContinuallyRan; +struct DelayTest { + db: MemDb, +} + +impl Default for DelayTest { + fn default() -> Self { + Self { db: MemDb::new() } + } +} + +impl DelayTest { + pub(crate) fn new() -> Self { + Self::default() + } +} -impl Test { - fn into_delay_task(&self) -> impl ContinuallyRan + 'static { +impl IntoTask for DelayTest { + fn into_task(&self) -> impl ContinuallyRan + 'static { CosignDelayTask { db: self.db.clone() } } +} - // Assert CosignedBlocks queue items have been consumed after task run +impl DelayTest { fn assert_queue_is_empty(&self) { - assert!(CosignedBlocks::peek(&self.db).is_none(), "expected queue to be empty"); + assert_eq!(CosignedBlocks::peek(&self.db), None); } fn assert_queue_is_not_empty(&self) { - assert!(CosignedBlocks::peek(&self.db).is_some(), "expected queue to not be empty"); + assert_eq!(CosignedBlocks::peek(&self.db).is_some(), true); } - // Assert LatestCosignedBlockNumber db points to latest block number after task run fn assert_latest_cosigned_block_number_is_expected(&self, block_number: Option) { assert_eq!(LatestCosignedBlockNumber::get(&self.db), block_number); } // Assert everything that changed or should have changed after a task iteration run - fn assert_task_iteration_returns(&self, latest_cosigned_block_number: Option) { + fn assert_task_iteration_completes_with(&self, latest_cosigned_block_number: Option) { + // Assert LatestCosignedBlockNumber db points to latest block number after task run self.assert_latest_cosigned_block_number_is_expected(latest_cosigned_block_number); + // Assert CosignedBlocks queue items have been consumed after task run + self.assert_queue_is_empty(); + } + + // Assert nothing was added or remains after dbs are expected to be cleared + fn assert_task_iteration_db_is_clear(&self) { + self.assert_latest_cosigned_block_number_is_expected(None); self.assert_queue_is_empty(); } - // Assert everything that changed or should have changed after a task iteration failure fn assert_task_iteration_fails(&self, latest_cosigned_block_number: Option) { self.assert_latest_cosigned_block_number_is_expected(latest_cosigned_block_number); self.assert_queue_is_not_empty(); @@ -48,13 +71,15 @@ impl Test { #[tokio::test] async fn delay_task_returns_false_with_no_messages() { - let mut task = Test::new().into_delay_task(); + let test = DelayTest::new(); + let mut task = test.into_task(); Test::assert_task_run_and_check_progress(&mut task, false).await; + test.assert_task_iteration_db_is_clear(); } #[tokio::test] async fn delay_task_returns_false_with_genesis_block() { - let mut test = Test::new(); + let mut test = DelayTest::new(); { let mut txn = test.db.txn(); @@ -62,34 +87,74 @@ async fn delay_task_returns_false_with_genesis_block() { txn.commit(); } - let mut task = test.into_delay_task(); + let mut task = test.into_task(); // let already_cosigned = LatestCosignedBlockNumber::get(&self.db).unwrap_or(0); // the already_cosigned block number always defaults to 0, so "genesis" // is always considered cosigned, made_progress returns false Test::assert_task_run_and_check_progress(&mut task, false).await; + test.assert_task_iteration_db_is_clear(); } #[tokio::test] async fn delay_task_updates_latest_cosigned_block_number() { - let mut test = Test::new(); + let mut test = DelayTest::new(); { let mut txn = test.db.txn(); - CosignedBlocks::send(&mut txn, &(0u64, now_secs())); - CosignedBlocks::send(&mut txn, &(1u64, now_secs())); - CosignedBlocks::send(&mut txn, &(2u64, now_secs())); + // blocks with the same timestamps + // nothing unusual happens, the task follow block numbers + let now = now_secs(); + CosignedBlocks::send(&mut txn, &(0u64, now)); + CosignedBlocks::send(&mut txn, &(1u64, now)); + CosignedBlocks::send(&mut txn, &(2u64, now)); txn.commit(); } - let mut task = test.into_delay_task(); + let mut task = test.into_task(); + Test::assert_task_run_and_check_progress(&mut task, true).await; - test.assert_task_iteration_returns(Some(2u64)); + test.assert_task_iteration_completes_with(Some(2u64)); + + let mut test = DelayTest::new(); + + { + let mut txn = test.db.txn(); + // timestamps out of order + // nothing unusual happens, the task follow block numbers + let now = now_secs(); + CosignedBlocks::send(&mut txn, &(3u64, now)); + CosignedBlocks::send(&mut txn, &(4u64, now - 1)); + CosignedBlocks::send(&mut txn, &(5u64, now - 2)); + txn.commit(); + } + + let mut task = test.into_task(); + Test::assert_task_run_and_check_progress(&mut task, true).await; + test.assert_task_iteration_completes_with(Some(5u64)); + + // Test with increasing timestamps (all in the past, so they process immediately) + let mut test = DelayTest::new(); + + { + let mut txn = test.db.txn(); + // timestamps increasing in order + // nothing unusual happens, the task follow block numbers + let now = now_secs(); + CosignedBlocks::send(&mut txn, &(6u64, now)); + CosignedBlocks::send(&mut txn, &(7u64, now + 1)); + CosignedBlocks::send(&mut txn, &(8u64, now + 2)); + txn.commit(); + } + + let mut task = test.into_task(); + Test::assert_task_run_and_check_progress(&mut task, true).await; + test.assert_task_iteration_completes_with(Some(8u64)); } #[tokio::test] async fn delay_task_does_not_regress_and_skips_if_not_a_later_block() { - let mut test = Test::new(); + let mut test = DelayTest::new(); { let mut txn = test.db.txn(); @@ -102,15 +167,15 @@ async fn delay_task_does_not_regress_and_skips_if_not_a_later_block() { txn.commit(); } - let mut task = test.into_delay_task(); + let mut task = test.into_task(); Test::assert_task_run_and_check_progress(&mut task, true).await; // Queue order: 1, 2, 4, 3 - // Block 1, 2 and 4 processed, block 3 skipped (3 < 4) + // Block 1, 2 and 4 processed, block 3 skipped - // This won't actually happen but it needs to be tested that it does what it is + // This is unlikely to actually happen in practice but it needs to be tested that it does what it is // meant to do, which is that if we've already acknowledged a later block, consume and skip - test.assert_task_iteration_returns(Some(4u64)); + test.assert_task_iteration_completes_with(Some(4u64)); { let mut txn = test.db.txn(); @@ -119,11 +184,12 @@ async fn delay_task_does_not_regress_and_skips_if_not_a_later_block() { txn.commit(); } - let mut task = test.into_delay_task(); - // No progress following the previously set LatestCosignedBlockNumber was made, + let mut task = test.into_task(); + + // No progress was made since the same block number was skipped, // made_progress returns false Test::assert_task_run_and_check_progress(&mut task, false).await; - test.assert_task_iteration_returns(Some(4u64)); + test.assert_task_iteration_completes_with(Some(4u64)); { let mut txn = test.db.txn(); @@ -134,16 +200,16 @@ async fn delay_task_does_not_regress_and_skips_if_not_a_later_block() { txn.commit(); } - let mut task = test.into_delay_task(); + let mut task = test.into_task(); // Had a duplicate, but made 1 block worth of progress // made_progress returns true Test::assert_task_run_and_check_progress(&mut task, true).await; - test.assert_task_iteration_returns(Some(5u64)); + test.assert_task_iteration_completes_with(Some(5u64)); } #[tokio::test] async fn delay_task_does_not_ack_before_acknowledgement_delay() { - let mut test = Test::new(); + let mut test = DelayTest::new(); { let mut txn = test.db.txn(); @@ -151,7 +217,7 @@ async fn delay_task_does_not_ack_before_acknowledgement_delay() { txn.commit(); } - let mut task = test.into_delay_task(); + let mut task = test.into_task(); let handle = tokio::spawn(async move { task.run_iteration().await.unwrap() }); // Give the task a moment to start and reach the sleep @@ -171,12 +237,12 @@ async fn delay_task_does_not_ack_before_acknowledgement_delay() { assert_eq!(result, true); // Now has a result - test.assert_task_iteration_returns(Some(1u64)); + test.assert_task_iteration_completes_with(Some(1u64)); } #[tokio::test] async fn delay_task_with_zero_timestamp_processes_immediately() { - let mut test = Test::new(); + let mut test = DelayTest::new(); { let mut txn = test.db.txn(); @@ -184,19 +250,19 @@ async fn delay_task_with_zero_timestamp_processes_immediately() { txn.commit(); } - let mut task = test.into_delay_task(); + let mut task = test.into_task(); // This should complete immediately without sleeping // Since 0 as timestamp will always be an older date than the current time as timestamp // and since the ACK time is considered to be passed, there is no sleep time to do Test::assert_task_run_and_check_progress(&mut task, true).await; - test.assert_task_iteration_returns(Some(1u64)); + test.assert_task_iteration_completes_with(Some(1u64)); } #[tokio::test] async fn delay_task_with_max_timestamp_returns_error() { - let mut test = Test::new(); + let mut test = DelayTest::new(); { let mut txn = test.db.txn(); @@ -207,12 +273,13 @@ async fn delay_task_with_max_timestamp_returns_error() { txn.commit(); } - let mut task = test.into_delay_task(); + let mut task = test.into_task(); // When timestamp is u64::MAX, adding ACKNOWLEDGEMENT_DELAY would overflow // The task should return an error instead of panicking Test::assert_task_failed(&mut task, "overflow").await; + // since returned an error 3u64 should still be in queue test.assert_task_iteration_fails(Some(1u64)); } @@ -220,7 +287,7 @@ async fn delay_task_with_max_timestamp_returns_error() { async fn delay_task_with_far_future_timestamp_hangs() { // A timestamp far in the future (but not MAX to avoid overflow) // will cause the task to sleep for an extremely long time - let mut test = Test::new(); + let mut test = DelayTest::new(); { let mut txn = test.db.txn(); @@ -233,36 +300,17 @@ async fn delay_task_with_far_future_timestamp_hangs() { CosignedBlocks::send(&mut txn, &(2u64, far_future)); CosignedBlocks::send(&mut txn, &(3u64, 0u64)); + txn.commit(); } - let mut task = test.into_delay_task(); + let mut task = test.into_task(); // Use a timeout to prevent the test from hanging forever let result = tokio::time::timeout(Duration::from_millis(100), task.run_iteration()).await; assert!(result.is_err(), "Expected timeout, but task completed"); + // since had a forced time out 3u64 should still be in queue test.assert_task_iteration_fails(Some(1u64)); } - -#[tokio::test] -async fn delay_task_increasing_blocks_with_decreasing_timestamps() { - let mut test = Test::new(); - let base_time = now_secs(); - - { - let mut txn = test.db.txn(); - CosignedBlocks::send(&mut txn, &(1u64, base_time + 2)); - CosignedBlocks::send(&mut txn, &(2u64, base_time + 1)); - CosignedBlocks::send(&mut txn, &(3u64, base_time)); - txn.commit(); - } - - let mut task = test.into_delay_task(); - Test::assert_task_run_and_check_progress(&mut task, true).await; - - // nothing unusual happens, the task follow block numbers - // timestamps could be out of order - test.assert_task_iteration_returns(Some(3u64)); -} diff --git a/coordinator/cosign/src/tests/mod.rs b/coordinator/cosign/src/tests/mod.rs index 3cfceb7e4..ab23a8db2 100644 --- a/coordinator/cosign/src/tests/mod.rs +++ b/coordinator/cosign/src/tests/mod.rs @@ -23,7 +23,6 @@ use serai_client_serai::{ }; use crate::{COSIGN_CONTEXT, Cosign, SeraiRpc, SignedCosign}; -use serai_db::MemDb; pub(crate) fn sr25519_fixture() -> schnorrkel::Keypair { // Use a fixed seed to ensure deterministic keypairs across test calls. @@ -150,27 +149,9 @@ impl SeraiRpc for Serai { } } -pub(crate) struct Test { - pub(crate) serai: Serai, - pub(crate) db: MemDb, -} - -impl Default for Test { - fn default() -> Self { - Self { serai: Serai::new(), db: MemDb::new() } - } -} +pub(crate) struct Test; impl Test { - pub(crate) fn new() -> Self { - Self::default() - } - - #[allow(dead_code)] - pub(crate) fn from_serai(serai: Serai) -> Self { - Self { serai, db: MemDb::new() } - } - pub(crate) async fn assert_task_run_and_check_progress( task: &mut impl ContinuallyRan, made_progress: bool, @@ -184,3 +165,7 @@ impl Test { assert!(err_str.contains(error), "{err_str}"); } } + +pub(crate) trait IntoTask { + fn into_task(&self) -> impl ContinuallyRan + 'static; +} From ed3f0728529daa7d417ca4bc8684fe3cd3ebf46e Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Tue, 30 Dec 2025 09:32:19 -0300 Subject: [PATCH 07/71] refactor(cosign): more refactors --- coordinator/cosign/src/delay.rs | 10 +- coordinator/cosign/src/tests/delay.rs | 74 +++++++------ coordinator/cosign/src/tests/mod.rs | 149 +------------------------- 3 files changed, 44 insertions(+), 189 deletions(-) diff --git a/coordinator/cosign/src/delay.rs b/coordinator/cosign/src/delay.rs index ee0ea6d3f..0c5ed5bfd 100644 --- a/coordinator/cosign/src/delay.rs +++ b/coordinator/cosign/src/delay.rs @@ -2,7 +2,7 @@ use core::future::Future; use std::time::{Duration, SystemTime}; use serai_db::*; -use serai_task::{DoesNotError, ContinuallyRan}; +use serai_task::ContinuallyRan; use crate::evaluator::CosignedBlocks; @@ -37,7 +37,7 @@ impl ContinuallyRan for CosignDelayTask { loop { let mut txn = self.db.txn(); - // Every loop iteration consumes a CosignedBlocks queue message + // Every loop iteration consumes a CosignedBlocks queue message, if successful let cosigned_block = CosignedBlocks::try_recv(&mut txn); let Some((block_number, time_evaluated)) = cosigned_block else { @@ -69,10 +69,10 @@ impl ContinuallyRan for CosignDelayTask { }; let now = now_timestamp(); - // If the time valid is greater than the current time, - // sleep until the time valid is reached + // If the time valid is greater than the current time, sleep until the time valid is reached if time_valid > now { - // Sleep until then + // db txn being held and not committed until sleep completed + // if a timeout occurs, this block will be restarted on the next iteration tokio::time::sleep(time_valid.saturating_sub(now)).await; } diff --git a/coordinator/cosign/src/tests/delay.rs b/coordinator/cosign/src/tests/delay.rs index b381d7265..efa0493ce 100644 --- a/coordinator/cosign/src/tests/delay.rs +++ b/coordinator/cosign/src/tests/delay.rs @@ -24,14 +24,10 @@ impl Default for DelayTest { } } -impl DelayTest { - pub(crate) fn new() -> Self { - Self::default() - } -} - impl IntoTask for DelayTest { - fn into_task(&self) -> impl ContinuallyRan + 'static { + type Task = CosignDelayTask; + + fn into_task(&self) -> Self::Task { CosignDelayTask { db: self.db.clone() } } } @@ -50,9 +46,9 @@ impl DelayTest { } // Assert everything that changed or should have changed after a task iteration run - fn assert_task_iteration_completes_with(&self, latest_cosigned_block_number: Option) { + fn assert_task_iteration_completes_with(&self, latest_cosigned_block_number: u64) { // Assert LatestCosignedBlockNumber db points to latest block number after task run - self.assert_latest_cosigned_block_number_is_expected(latest_cosigned_block_number); + self.assert_latest_cosigned_block_number_is_expected(Some(latest_cosigned_block_number)); // Assert CosignedBlocks queue items have been consumed after task run self.assert_queue_is_empty(); } @@ -71,7 +67,7 @@ impl DelayTest { #[tokio::test] async fn delay_task_returns_false_with_no_messages() { - let test = DelayTest::new(); + let test = DelayTest::default(); let mut task = test.into_task(); Test::assert_task_run_and_check_progress(&mut task, false).await; test.assert_task_iteration_db_is_clear(); @@ -79,7 +75,7 @@ async fn delay_task_returns_false_with_no_messages() { #[tokio::test] async fn delay_task_returns_false_with_genesis_block() { - let mut test = DelayTest::new(); + let mut test = DelayTest::default(); { let mut txn = test.db.txn(); @@ -98,7 +94,7 @@ async fn delay_task_returns_false_with_genesis_block() { #[tokio::test] async fn delay_task_updates_latest_cosigned_block_number() { - let mut test = DelayTest::new(); + let mut test = DelayTest::default(); { let mut txn = test.db.txn(); @@ -113,15 +109,17 @@ async fn delay_task_updates_latest_cosigned_block_number() { let mut task = test.into_task(); + // returns made_progress as true Test::assert_task_run_and_check_progress(&mut task, true).await; - test.assert_task_iteration_completes_with(Some(2u64)); + // confirmed the last block as 2 + test.assert_task_iteration_completes_with(2u64); - let mut test = DelayTest::new(); + let mut test = DelayTest::default(); { let mut txn = test.db.txn(); // timestamps out of order - // nothing unusual happens, the task follow block numbers + // nothing unusual happens, the task stil follows block numbers let now = now_secs(); CosignedBlocks::send(&mut txn, &(3u64, now)); CosignedBlocks::send(&mut txn, &(4u64, now - 1)); @@ -131,15 +129,15 @@ async fn delay_task_updates_latest_cosigned_block_number() { let mut task = test.into_task(); Test::assert_task_run_and_check_progress(&mut task, true).await; - test.assert_task_iteration_completes_with(Some(5u64)); + test.assert_task_iteration_completes_with(5u64); // Test with increasing timestamps (all in the past, so they process immediately) - let mut test = DelayTest::new(); + let mut test = DelayTest::default(); { let mut txn = test.db.txn(); // timestamps increasing in order - // nothing unusual happens, the task follow block numbers + // nothing unusual happens, the task stil follows block numbers let now = now_secs(); CosignedBlocks::send(&mut txn, &(6u64, now)); CosignedBlocks::send(&mut txn, &(7u64, now + 1)); @@ -149,12 +147,12 @@ async fn delay_task_updates_latest_cosigned_block_number() { let mut task = test.into_task(); Test::assert_task_run_and_check_progress(&mut task, true).await; - test.assert_task_iteration_completes_with(Some(8u64)); + test.assert_task_iteration_completes_with(8u64); } #[tokio::test] async fn delay_task_does_not_regress_and_skips_if_not_a_later_block() { - let mut test = DelayTest::new(); + let mut test = DelayTest::default(); { let mut txn = test.db.txn(); @@ -164,10 +162,12 @@ async fn delay_task_does_not_regress_and_skips_if_not_a_later_block() { // Sent out of order below CosignedBlocks::send(&mut txn, &(4u64, now_secs())); CosignedBlocks::send(&mut txn, &(3u64, now_secs())); + txn.commit(); } let mut task = test.into_task(); + // returns made_progress as true Test::assert_task_run_and_check_progress(&mut task, true).await; // Queue order: 1, 2, 4, 3 @@ -175,7 +175,7 @@ async fn delay_task_does_not_regress_and_skips_if_not_a_later_block() { // This is unlikely to actually happen in practice but it needs to be tested that it does what it is // meant to do, which is that if we've already acknowledged a later block, consume and skip - test.assert_task_iteration_completes_with(Some(4u64)); + test.assert_task_iteration_completes_with(4u64); { let mut txn = test.db.txn(); @@ -189,7 +189,7 @@ async fn delay_task_does_not_regress_and_skips_if_not_a_later_block() { // No progress was made since the same block number was skipped, // made_progress returns false Test::assert_task_run_and_check_progress(&mut task, false).await; - test.assert_task_iteration_completes_with(Some(4u64)); + test.assert_task_iteration_completes_with(4u64); { let mut txn = test.db.txn(); @@ -204,12 +204,13 @@ async fn delay_task_does_not_regress_and_skips_if_not_a_later_block() { // Had a duplicate, but made 1 block worth of progress // made_progress returns true Test::assert_task_run_and_check_progress(&mut task, true).await; - test.assert_task_iteration_completes_with(Some(5u64)); + // confirmed the last block as 5 + test.assert_task_iteration_completes_with(5u64); } #[tokio::test] async fn delay_task_does_not_ack_before_acknowledgement_delay() { - let mut test = DelayTest::new(); + let mut test = DelayTest::default(); { let mut txn = test.db.txn(); @@ -220,29 +221,26 @@ async fn delay_task_does_not_ack_before_acknowledgement_delay() { let mut task = test.into_task(); let handle = tokio::spawn(async move { task.run_iteration().await.unwrap() }); - // Give the task a moment to start and reach the sleep - tokio::time::sleep(Duration::from_millis(50)).await; - - // Still nothing is returned + // nothing is returned test.assert_latest_cosigned_block_number_is_expected(None); // Sleep for most of (but not all) the acknowledgement delay tokio::time::sleep(ACKNOWLEDGEMENT_DELAY - Duration::from_secs(1)).await; - // Still nothing is returned + // still nothing is returned test.assert_latest_cosigned_block_number_is_expected(None); - // Wait for the task to actually complete + // wait for the task to actually complete let result = handle.await.unwrap(); assert_eq!(result, true); - // Now has a result - test.assert_task_iteration_completes_with(Some(1u64)); + // Now confirmed the last block as 1 + test.assert_task_iteration_completes_with(1u64); } #[tokio::test] async fn delay_task_with_zero_timestamp_processes_immediately() { - let mut test = DelayTest::new(); + let mut test = DelayTest::default(); { let mut txn = test.db.txn(); @@ -257,12 +255,12 @@ async fn delay_task_with_zero_timestamp_processes_immediately() { // and since the ACK time is considered to be passed, there is no sleep time to do Test::assert_task_run_and_check_progress(&mut task, true).await; - test.assert_task_iteration_completes_with(Some(1u64)); + test.assert_task_iteration_completes_with(1u64); } #[tokio::test] async fn delay_task_with_max_timestamp_returns_error() { - let mut test = DelayTest::new(); + let mut test = DelayTest::default(); { let mut txn = test.db.txn(); @@ -277,7 +275,7 @@ async fn delay_task_with_max_timestamp_returns_error() { // When timestamp is u64::MAX, adding ACKNOWLEDGEMENT_DELAY would overflow // The task should return an error instead of panicking - Test::assert_task_failed(&mut task, "overflow").await; + Test::assert_task_failed_with(&mut task, "overflow").await; // since returned an error 3u64 should still be in queue test.assert_task_iteration_fails(Some(1u64)); @@ -287,7 +285,7 @@ async fn delay_task_with_max_timestamp_returns_error() { async fn delay_task_with_far_future_timestamp_hangs() { // A timestamp far in the future (but not MAX to avoid overflow) // will cause the task to sleep for an extremely long time - let mut test = DelayTest::new(); + let mut test = DelayTest::default(); { let mut txn = test.db.txn(); @@ -311,6 +309,6 @@ async fn delay_task_with_far_future_timestamp_hangs() { assert!(result.is_err(), "Expected timeout, but task completed"); - // since had a forced time out 3u64 should still be in queue + // since had a forced timeout 3u64 should still be in queue test.assert_task_iteration_fails(Some(1u64)); } diff --git a/coordinator/cosign/src/tests/mod.rs b/coordinator/cosign/src/tests/mod.rs index ab23a8db2..ba3645b7d 100644 --- a/coordinator/cosign/src/tests/mod.rs +++ b/coordinator/cosign/src/tests/mod.rs @@ -4,153 +4,9 @@ mod intend; #[cfg(test)] mod delay; -use blake2::{Digest, Blake2b256}; use serai_task::ContinuallyRan; -use core::future::Future; -use std::{ - collections::{HashMap, HashSet}, -}; - -use serai_client_serai::{ - abi::{ - primitives::{ - merkle::{IncrementalUnbalancedMerkleTree, UnbalancedMerkleTree}, - BlockHash, - }, - Block, Event, Header, HeaderV1, BLOCK_HEADER_BRANCH_TAG, BLOCK_HEADER_LEAF_TAG, - }, - Events, -}; - -use crate::{COSIGN_CONTEXT, Cosign, SeraiRpc, SignedCosign}; - -pub(crate) fn sr25519_fixture() -> schnorrkel::Keypair { - // Use a fixed seed to ensure deterministic keypairs across test calls. - let seed = [42u8; 32]; - let mini = schnorrkel::MiniSecretKey::from_bytes(&seed).expect("fixed seed should be valid"); - mini.expand_to_keypair(schnorrkel::ExpansionMode::Ed25519) -} - -pub(crate) fn sign_cosign(cosign: Cosign, keypair: &schnorrkel::Keypair) -> SignedCosign { - let sig = keypair.sign_simple(COSIGN_CONTEXT, &cosign.signature_message()); - SignedCosign { cosign, signature: sig.to_bytes() } -} - -#[derive(Clone)] -pub(crate) struct Serai { - pub(crate) block_by_number_error: HashMap, - pub(crate) events_error: HashMap, - pub(crate) blocks_by_number: HashMap, - pub(crate) events_by_hash: HashMap, - pub(crate) builds_upon: IncrementalUnbalancedMerkleTree, - pub(crate) missing_blocks: HashSet, -} - -impl Default for Serai { - fn default() -> Self { - Self { - block_by_number_error: HashMap::new(), - events_error: HashMap::new(), - blocks_by_number: HashMap::new(), - events_by_hash: HashMap::new(), - builds_upon: IncrementalUnbalancedMerkleTree::new(), - missing_blocks: HashSet::new(), - } - } -} - -impl Serai { - pub(crate) fn new() -> Self { - Self::default() - } - - pub(crate) fn set_block_not_found(&mut self, block_number: u64) { - self.missing_blocks.insert(block_number); - } - - pub(crate) fn set_block_error(&mut self, block_number: u64, error: &str) { - self.block_by_number_error.insert(block_number, error.to_string()); - } - - pub(crate) fn set_events_error(&mut self, block_hash: BlockHash, error: &str) { - self.events_error.insert(block_hash, error.to_string()); - } - - pub(crate) fn make_block(&mut self, number: u64) -> BlockHash { - let block = Block { - header: Header::V1(HeaderV1 { - number, - builds_upon: self.builds_upon.clone().calculate(BLOCK_HEADER_BRANCH_TAG), - unix_time_in_millis: 0, - transactions_commitment: UnbalancedMerkleTree::EMPTY, - events_commitment: UnbalancedMerkleTree::EMPTY, - consensus_commitment: [0; 32], - }), - transactions: vec![], - }; - - let block_hash = block.header.hash(); - self.builds_upon.append( - BLOCK_HEADER_BRANCH_TAG, - Blake2b256::new_with_prefix([BLOCK_HEADER_LEAF_TAG]) - .chain_update(block_hash.0) - .finalize() - .into(), - ); - - self.blocks_by_number.insert(number, block); - - block_hash - } - - pub(crate) fn initialize_empty_events(&mut self, block_hash: BlockHash) { - self.events_by_hash = HashMap::from([(block_hash, Events::new())]); - } - - pub(crate) fn set_events(&mut self, block_hash: BlockHash, events: Vec) { - self.events_by_hash.insert(block_hash, Events::with(events)); - } -} - -impl SeraiRpc for Serai { - fn latest_finalized_block_number(&self) -> impl Send + Future> { - let latest = self.blocks_by_number.keys().copied().max().unwrap_or(0); - async move { Ok(latest) } - } - - fn block_by_number( - &self, - block: u64, - ) -> impl Send + Future, String>> { - let err = self.block_by_number_error.get(&block).cloned(); - let block_entry = self.blocks_by_number.get(&block).cloned(); - let is_missing = self.missing_blocks.contains(&block); - - async move { - if let Some(e) = err { - return Err(e); - } - if is_missing { - return Ok(None); - } - Ok(block_entry) - } - } - - fn events(&self, block: BlockHash) -> impl Send + Future> { - let err = self.events_error.get(&block).cloned(); - let events = self.events_by_hash.get(&block).cloned().unwrap_or_default(); - async move { - if let Some(e) = err { - return Err(e); - } - Ok(events) - } - } -} pub(crate) struct Test; - impl Test { pub(crate) async fn assert_task_run_and_check_progress( task: &mut impl ContinuallyRan, @@ -159,7 +15,7 @@ impl Test { assert_eq!(task.run_iteration().await.unwrap(), made_progress); } - pub(crate) async fn assert_task_failed(task: &mut impl ContinuallyRan, error: &str) { + pub(crate) async fn assert_task_failed_with(task: &mut impl ContinuallyRan, error: &str) { let err = task.run_iteration().await.unwrap_err(); let err_str = format!("{err:?}"); assert!(err_str.contains(error), "{err_str}"); @@ -167,5 +23,6 @@ impl Test { } pub(crate) trait IntoTask { - fn into_task(&self) -> impl ContinuallyRan + 'static; + type Task: ContinuallyRan + 'static; + fn into_task(&self) -> Self::Task; } From 53d9441bc184e6e2d151682eeb16ee6ac0c1b6a0 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Tue, 30 Dec 2025 16:03:57 -0300 Subject: [PATCH 08/71] refactor(cosign): minor refactors --- coordinator/cosign/src/delay.rs | 12 ++++++------ coordinator/cosign/src/tests/delay.rs | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/coordinator/cosign/src/delay.rs b/coordinator/cosign/src/delay.rs index 0c5ed5bfd..ffa829bce 100644 --- a/coordinator/cosign/src/delay.rs +++ b/coordinator/cosign/src/delay.rs @@ -61,12 +61,12 @@ impl ContinuallyRan for CosignDelayTask { // Calculate when we should mark it as valid, checking for overflow to avoid panic let time_evaluated_duration = Duration::from_secs(time_evaluated); - let Some(time_valid) = time_evaluated_duration.checked_add(ACKNOWLEDGEMENT_DELAY) else { - txn.commit(); - return Err(format!( - "time_evaluated ({time_evaluated}) would overflow when adding ACKNOWLEDGEMENT_DELAY" - )); - }; + let time_valid = + time_evaluated_duration.checked_add(ACKNOWLEDGEMENT_DELAY).ok_or_else(|| { + format!( + "time_evaluated ({time_evaluated}) would overflow when adding ACKNOWLEDGEMENT_DELAY", + ) + })?; let now = now_timestamp(); // If the time valid is greater than the current time, sleep until the time valid is reached diff --git a/coordinator/cosign/src/tests/delay.rs b/coordinator/cosign/src/tests/delay.rs index efa0493ce..feac6b701 100644 --- a/coordinator/cosign/src/tests/delay.rs +++ b/coordinator/cosign/src/tests/delay.rs @@ -275,7 +275,7 @@ async fn delay_task_with_max_timestamp_returns_error() { // When timestamp is u64::MAX, adding ACKNOWLEDGEMENT_DELAY would overflow // The task should return an error instead of panicking - Test::assert_task_failed_with(&mut task, "overflow").await; + Test::assert_task_run_and_failed_with(&mut task, "overflow").await; // since returned an error 3u64 should still be in queue test.assert_task_iteration_fails(Some(1u64)); From 3d8e153f2f628b27ce51fdf689622dbf0532ee80 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Tue, 30 Dec 2025 17:34:11 -0300 Subject: [PATCH 09/71] feat(cosign): intend changes, add evaluator --- coordinator/cosign/src/evaluator.rs | 101 ++- coordinator/cosign/src/intend.rs | 110 +-- coordinator/cosign/src/tests/evaluator.rs | 242 +++++++ coordinator/cosign/src/tests/intend.rs | 832 ++++++++++++---------- coordinator/cosign/src/tests/mod.rs | 5 +- 5 files changed, 829 insertions(+), 461 deletions(-) create mode 100644 coordinator/cosign/src/tests/evaluator.rs diff --git a/coordinator/cosign/src/evaluator.rs b/coordinator/cosign/src/evaluator.rs index 41642e5c4..b61e805b2 100644 --- a/coordinator/cosign/src/evaluator.rs +++ b/coordinator/cosign/src/evaluator.rs @@ -9,6 +9,9 @@ use crate::{ intend::{GlobalSessionsChannel, BlockEventData, BlockEvents}, }; +pub(crate) const REQUEST_COSIGNS_SPACING: Duration = Duration::from_secs(60); +const COSIGN_COMMIT_THRESHOLD: u64 = 83; + create_db!( SubstrateCosignEvaluator { // The global session currently being evaluated. @@ -35,13 +38,16 @@ db_channel!( fn currently_evaluated_global_session_strict( txn: &mut impl DbTxn, block_number: u64, -) -> ([u8; 32], GlobalSession) { +) -> Result<([u8; 32], GlobalSession), String> { let mut res = { let existing = match CurrentlyEvaluatedGlobalSession::get(txn) { Some(existing) => existing, None => { - let first = GlobalSessionsChannel::try_recv(txn) - .expect("fetching latest global session yet none declared"); + let first = GlobalSessionsChannel::try_recv(txn).ok_or_else(|| { + format!( + "fetching global session for block #{block_number} but none declared in channel yet" + ) + })?; CurrentlyEvaluatedGlobalSession::set(txn, &first); first } @@ -66,13 +72,28 @@ fn currently_evaluated_global_session_strict( } } - res + Ok(res) } pub(crate) fn currently_evaluated_global_session(getter: &impl Get) -> Option<[u8; 32]> { CurrentlyEvaluatedGlobalSession::get(getter).map(|(id, _info)| id) } +fn should_request_cosigns(last_request_for_cosigns: &mut Instant) -> bool { + if Instant::now() < (*last_request_for_cosigns + REQUEST_COSIGNS_SPACING) { + return false; + } + + *last_request_for_cosigns = Instant::now(); + + true +} + +// Calculate the minimum threshold required for cosigning +fn cosign_threshold(total_stake: u64) -> u64 { + ((total_stake * COSIGN_COMMIT_THRESHOLD) / 100) + 1 +} + /// A task to determine if a block has been cosigned and we should handle it. pub(crate) struct CosignEvaluatorTask { pub(crate) db: D, @@ -84,49 +105,53 @@ impl ContinuallyRan for CosignEvaluatorTask impl Send + Future> { - let should_request_cosigns = |last_request_for_cosigns: &mut Instant| { - const REQUEST_COSIGNS_SPACING: Duration = Duration::from_secs(60); - if Instant::now() < (*last_request_for_cosigns + REQUEST_COSIGNS_SPACING) { - return false; - } - *last_request_for_cosigns = Instant::now(); - true - }; - async move { let mut known_cosign = None; let mut made_progress = false; loop { let mut txn = self.db.txn(); - let Some(BlockEventData { block_number, has_events }) = BlockEvents::try_recv(&mut txn) - else { + let events = BlockEvents::try_recv(&mut txn); + + let Some(BlockEventData { block_number, has_events }) = events else { + // no block_events from BlockEvents channel, nothing to commit + drop(txn); break; }; + if block_number == 0 { + // clear BlockEvents queue and continue to next + txn.commit(); + continue; + } + // Fetch the global session information let (global_session, global_session_info) = - currently_evaluated_global_session_strict(&mut txn, block_number); + currently_evaluated_global_session_strict(&mut txn, block_number)?; match has_events { // Because this had notable events, we require an explicit cosign for this block by a // supermajority of the prior block's validator sets HasEvents::Notable => { - let mut weight_cosigned = 0; + let mut weight_cosigned = 0u64; + for set in global_session_info.sets { // Check if we have the cosign from this set if NetworksLatestCosignedBlock::get(&txn, global_session, set.network) - .map(|signed_cosign| signed_cosign.cosign.block_number) == - Some(block_number) + .map(|signed_cosign| signed_cosign.cosign.block_number) + == Some(block_number) { - // Since have this cosign, add the set's weight to the weight which has cosigned - weight_cosigned += - global_session_info.stakes.get(&set.network).ok_or_else(|| { - "ValidatorSet in global session yet didn't have its stake".to_owned() - })?; + // Since we have this cosign, add the set's weight to the weight which has cosigned + let stake = global_session_info.stakes.get(&set.network).ok_or_else(|| { + "ValidatorSet in global session yet didn't have its stake".to_owned() + })?; + + weight_cosigned = weight_cosigned + .checked_add(*stake) + .ok_or_else(|| "weight_cosigned overflow".to_owned())?; } } // Check if the sum weight doesn't cross the required threshold - if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) { + if weight_cosigned < cosign_threshold(global_session_info.total_stake) { // Request the necessary cosigns over the network if should_request_cosigns(&mut self.last_request_for_cosigns) { self @@ -135,12 +160,16 @@ impl ContinuallyRan for CosignEvaluatorTask ContinuallyRan for CosignEvaluatorTask = None; + for set in global_session_info.sets { // Check if this set cosigned this block or not let Some(cosign) = @@ -172,11 +202,14 @@ impl ContinuallyRan for CosignEvaluatorTask= block_number { - weight_cosigned += - global_session_info.stakes.get(&set.network).ok_or_else(|| { - "ValidatorSet in global session yet didn't have its stake".to_owned() - })?; + let stake = global_session_info.stakes.get(&set.network).ok_or_else(|| { + "ValidatorSet in global session yet didn't have its stake".to_owned() + })?; + weight_cosigned = weight_cosigned + .checked_add(*stake) + .ok_or_else(|| "weight_cosigned overflow".to_owned())?; } // Update the lowest block common to all of these cosigns @@ -186,7 +219,7 @@ impl ContinuallyRan for CosignEvaluatorTask ContinuallyRan for CosignEvaluatorTask ContinuallyRan for CosignEvaluatorTask ContinuallyRan for CosignEvaluatorTask Result<(Block, Events, HasEvents), String> { - let block = match serai.block_by_number(block_number).await { - Ok(Some(block)) => block, - Ok(None) => return Err("couldn't get block which should've been finalized".to_owned()), - Err(e) => return Err(format!("RPC error fetching block #{block_number}: {e}")), - }; - let events = match serai.events(block.header.hash()).await { - Ok(events) => events, - Err(e) => return Err(format!("RPC error fetching events for block #{block_number}: {e}")), - }; - - if events.validator_sets().set_keys_events().next().is_some() { - return Ok((block, events, HasEvents::Notable)); - } - - if events.coins().burn_with_instruction_events().next().is_some() { - return Ok((block, events, HasEvents::NonNotable)); - } - - Ok((block, events, HasEvents::No)) -} - // Fetch the `ExternalValidatorSet`s, and their associated keys, used for cosigning as of this // block. fn cosigning_sets(getter: &impl Get) -> Vec<(ExternalValidatorSet, Public, Amount)> { @@ -106,10 +80,11 @@ impl ContinuallyRan for CosignIntendTask { fn run_iteration(&mut self) -> impl Send + Future> { async move { let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1); - let latest_block_number = match self.serai.latest_finalized_block_number().await { - Ok(n) => n, - Err(e) => return Err(format!("RPC error fetching latest finalized block number: {e}")), - }; + let latest_block_number = self + .serai + .latest_finalized_block_number() + .await + .map_err(|e| format!("RPC error fetching latest finalized block number: {e}"))?; if latest_block_number < start_block_number { return Ok(false); @@ -118,8 +93,20 @@ impl ContinuallyRan for CosignIntendTask { for block_number in start_block_number..=latest_block_number { let mut txn = self.db.txn(); - let (block, events, mut has_events) = - block_has_events_justifying_a_cosign(&self.serai, block_number).await?; + let block = self + .serai + .block_by_number(block_number) + .await + .map_err(|e| format!("RPC error fetching block #{block_number}: {e}"))? + .ok_or_else(|| "couldn't get block which should've been finalized".to_owned())?; + + let events = self + .serai + .events(block.header.hash()) + .await + .map_err(|e| format!("RPC error fetching events for block #{block_number}: {e}"))?; + + let mut has_events = HasEvents::No; let mut builds_upon = BuildsUpon::get(&txn).unwrap_or(IncrementalUnbalancedMerkleTree::new()); @@ -154,17 +141,27 @@ impl ContinuallyRan for CosignIntendTask { validator_sets::Event::Allocation { validator, network, amount } => { let Ok(network) = ExternalNetworkId::try_from(*network) else { continue }; let existing = Stakes::get(&txn, network, *validator).unwrap_or(Amount(0)); - Stakes::set(&mut txn, network, *validator, &Amount(existing.0 + amount.0)); + let new_stake = existing.0.checked_add(amount.0).ok_or_else(|| { + format!( + "stake overflow for validator {:?} on network {:?}: {} + {}", + validator, network, existing.0, amount.0 + ) + })?; + Stakes::set(&mut txn, network, *validator, &Amount(new_stake)); } validator_sets::Event::Deallocation { validator, network, amount, timeline: _ } => { let Ok(network) = ExternalNetworkId::try_from(*network) else { continue }; - let existing = Stakes::get(&txn, network, *validator).unwrap_or(Amount(0)); - Stakes::set( - &mut txn, - network, - *validator, - &Amount(existing.0.saturating_sub(amount.0)), - ); + + let existing_stake = Stakes::get(&txn, network, *validator) + .ok_or_else(|| format!("unable to deallocate with no prior existing stake"))?; + + let new_stake = existing_stake.0.checked_sub(amount.0).ok_or_else(|| { + format!( + "stake underflow for validator {:?} on network {:?}: {} - {}", + validator, network, existing_stake.0, amount.0 + ) + })?; + Stakes::set(&mut txn, network, *validator, &Amount(new_stake)); } validator_sets::Event::SetDecided { set, validators } => { let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue }; @@ -175,10 +172,13 @@ impl ContinuallyRan for CosignIntendTask { ); } validator_sets::Event::SetKeys { set, key_pair } => { + has_events = HasEvents::Notable; + + let validators = Validators::take(&mut txn, *set) + .ok_or_else(|| "set which wasn't decided set keys".to_string())?; + let mut stake = 0; - for validator in - Validators::take(&mut txn, *set).expect("set which wasn't decided set keys") - { + for validator in validators { stake += Stakes::get(&txn, set.network, validator).unwrap_or(Amount(0)).0; } LatestSet::set( @@ -189,6 +189,12 @@ impl ContinuallyRan for CosignIntendTask { } _ => continue, }, + Event::Coins(event) => match event { + coins::Event::BurnWithInstruction { .. } => { + has_events = HasEvents::NonNotable; + } + _ => continue, + }, _ => continue, } } @@ -212,7 +218,9 @@ impl ContinuallyRan for CosignIntendTask { sets.push(set); keys.insert(set.network, key); stakes.insert(set.network, stake.0); - total_stake = total_stake.saturating_add(stake.0); + total_stake = total_stake + .checked_add(stake.0) + .ok_or_else(|| format!("total stake overflow: {} + {}", total_stake, stake.0))?; } if total_stake == 0 { // commit only per block finished otherwise reset progress @@ -248,13 +256,19 @@ impl ContinuallyRan for CosignIntendTask { HasEvents::Notable | HasEvents::NonNotable => { let global_session_for_this_block = global_session_for_this_block .expect("global session for this block was None but still attempting to cosign it"); - let global_session_info = GlobalSessions::get(&txn, global_session_for_this_block) - .expect("last global session intended wasn't saved to the database"); + let global_session_info = + GlobalSessions::get(&txn, global_session_for_this_block).ok_or_else(|| { + format!( + "global session {:?} intended for block #{block_number} wasn't saved to the database", + global_session_for_this_block + ) + })?; // Tell each set of their expectation to cosign this block for set in global_session_info.sets { #[cfg(not(coverage))] log::debug!("{set:?} will be cosigning block #{block_number}"); + IntendedCosigns::send( &mut txn, set, @@ -276,6 +290,8 @@ impl ContinuallyRan for CosignIntendTask { ScanCosignFrom::set(&mut txn, &(block_number + 1)); // All-or-nothing, commit only per block finished otherwise reset progress + // avoids partially adding db entries without committing the full expected db additions + // i.e. saving a SubstrateBlockHash initially but later failing mid-way txn.commit(); } diff --git a/coordinator/cosign/src/tests/evaluator.rs b/coordinator/cosign/src/tests/evaluator.rs new file mode 100644 index 000000000..bd0ea8ba0 --- /dev/null +++ b/coordinator/cosign/src/tests/evaluator.rs @@ -0,0 +1,242 @@ +use std::{ + collections::HashMap, + sync::{ + Arc, + atomic::{AtomicUsize, Ordering}, + }, + time::{Duration, Instant}, +}; + +use serai_db::{DbTxn, Db as _, MemDb}; +use serai_client_serai::abi::primitives::{ + crypto::Public, + validator_sets::{ExternalValidatorSet, Session}, +}; + +use crate::{ + BlockHash, Cosign, ExternalNetworkId, GlobalSession, HasEvents, NetworksLatestCosignedBlock, + SignedCosign, + evaluator::{ + CosignEvaluatorTask, CosignedBlocks, CurrentlyEvaluatedGlobalSession, REQUEST_COSIGNS_SPACING, + }, + intend::{BlockEventData, BlockEvents, GlobalSessionsChannel}, + tests::{IntoTask, Test}, +}; +use crate::RequestNotableCosigns; + +use serai_task::{ContinuallyRan}; + +#[derive(Clone)] +pub(crate) struct TestRequest { + pub(crate) calls: Arc, + pub(crate) should_error: bool, +} + +pub(crate) struct EvaluatorTest { + pub(crate) db: MemDb, +} + +impl Default for EvaluatorTest { + fn default() -> Self { + Self { db: MemDb::new() } + } +} + +impl IntoTask for EvaluatorTest { + type Task = CosignEvaluatorTask; + + fn into_task(&self) -> Self::Task { + let (request, _calls) = TestRequest::new(false); + CosignEvaluatorTask { db: self.db.clone(), request, last_request_for_cosigns: Instant::now() } + } +} + +#[derive(Debug)] +pub(crate) struct RequestError; + +impl TestRequest { + pub(crate) fn new(should_error: bool) -> (Self, Arc) { + let calls = Arc::new(AtomicUsize::new(0)); + (Self { calls: calls.clone(), should_error }, calls) + } +} + +impl RequestNotableCosigns for TestRequest { + type Error = RequestError; + + fn request_notable_cosigns( + &self, + _global_session: [u8; 32], + ) -> impl Send + core::future::Future> { + let calls = self.calls.clone(); + let should_error = self.should_error; + async move { + calls.fetch_add(1, Ordering::SeqCst); + if should_error { + Err(RequestError) + } else { + Ok(()) + } + } + } +} + +impl EvaluatorTest { + fn assert_no_currently_evaluated_global_session(&self) { + assert_eq!(CurrentlyEvaluatedGlobalSession::get(&self.db).is_none(), true); + } + + /// Asserts that cosigned blocks from start_block to end_block (inclusive) are present in order. + fn assert_cosigned_blocks_range(&mut self, start_block: u64, end_block: u64) { + let mut txn = self.db.txn(); + for expected_block in start_block..=end_block { + let (block_number, _time) = CosignedBlocks::try_recv(&mut txn) + .unwrap_or_else(|| panic!("expected cosigned block {expected_block}")); + assert_eq!(block_number, expected_block, "cosigned block mismatch"); + } + assert!(CosignedBlocks::try_recv(&mut txn).is_none(), "unexpected extra cosigned block"); + txn.commit(); + } + + fn assert_no_cosigned_blocks(&self) { + assert_eq!(CosignedBlocks::peek(&self.db).is_none(), true); + } + + fn assert_no_global_sessions_channel(&self) { + assert_eq!(GlobalSessionsChannel::peek(&self.db).is_none(), true); + } + + fn assert_has_global_sessions_channel(&self) { + assert_eq!(GlobalSessionsChannel::peek(&self.db).is_some(), true); + } + + fn assert_no_block_events(&self) { + assert_eq!(BlockEvents::peek(&self.db).is_none(), true); + } + + fn assert_has_block_events(&self) { + assert_eq!(BlockEvents::peek(&self.db).is_some(), true); + } + + /// Asserts that all evaluator DB entries are cleared (return None or are empty). + /// This is useful for verifying initial state or that cleanup worked correctly. + fn assert_evaluator_db_is_clear(&self) { + self.assert_no_currently_evaluated_global_session(); + self.assert_no_cosigned_blocks(); + self.assert_no_global_sessions_channel(); + self.assert_no_block_events(); + } + + fn assert_task_iteration_completed(&mut self, start_block: u64, end_block: u64) { + self.assert_no_global_sessions_channel(); + self.assert_no_block_events(); + self.assert_cosigned_blocks_range(start_block, end_block); + } + + fn assert_task_iteration_failed_at(&mut self, block_number: u64) { + self.assert_no_global_sessions_channel(); + self.assert_has_block_events(); + self.assert_cosigned_blocks_range(block_number - 1, block_number - 1); + } + + const GLOBAL_SESSION: [u8; 32] = [1u8; 32]; + + /// Initializes a global session with the hardcoded test ID and the given start block number. + /// Returns the global session ID for use in tests that need it. + fn init_global_session(&mut self, start_block_number: u64) -> [u8; 32] { + let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + + let mut keys = HashMap::new(); + keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); + + let mut stakes = HashMap::new(); + stakes.insert(ExternalNetworkId::Bitcoin, 1u64); + + let info = + GlobalSession { start_block_number, sets: vec![set], keys, stakes, total_stake: 1u64 }; + + let mut txn = self.db.txn(); + GlobalSessionsChannel::send(&mut txn, &(Self::GLOBAL_SESSION, info)); + txn.commit(); + + Self::GLOBAL_SESSION + } +} + +#[tokio::test] +async fn evaluator_task_returns_false_with_no_block_events() { + let test = EvaluatorTest::default(); + let mut task = test.into_task(); + Test::assert_task_run_and_check_progress(&mut task, false).await; + test.assert_evaluator_db_is_clear(); +} + +#[tokio::test] +async fn evaluator_task_returns_false_with_genesis_block() { + let mut test = EvaluatorTest::default(); + + { + let mut txn = test.db.txn(); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 0, has_events: HasEvents::No }); + txn.commit(); + } + + let mut task = test.into_task(); + Test::assert_task_run_and_check_progress(&mut task, false).await; +} + +#[tokio::test] +async fn evaluator_task_processes_blocks_with_no_events() { + let mut test = EvaluatorTest::default(); + test.init_global_session(0); + + { + let mut txn = test.db.txn(); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 0, has_events: HasEvents::No }); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 2, has_events: HasEvents::No }); + txn.commit(); + } + + let mut task = test.into_task(); + Test::assert_task_run_and_check_progress(&mut task, true).await; + test.assert_task_iteration_completed(1, 2); +} + +#[tokio::test] +async fn evaluator_task_errors_on_notable_events_without_cosign() { + let mut test = EvaluatorTest::default(); + test.init_global_session(0); + + { + let mut txn = test.db.txn(); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 0, has_events: HasEvents::No }); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 2, has_events: HasEvents::Notable }, + ); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 3, has_events: HasEvents::No }); + txn.commit(); + } + + let mut task = test.into_task(); + Test::assert_task_run_and_failed_with(&mut task, "wasn't yet cosigned").await; + test.assert_task_iteration_failed_at(2); + + { + let mut txn = test.db.txn(); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 2, has_events: HasEvents::Notable }, + ); + txn.commit(); + } + + let mut task: CosignEvaluatorTask = test.into_task().into(); + task.last_request_for_cosigns = Instant::now() - Duration::from_secs(5); + + Test::assert_task_run_and_failed_with(&mut task, "wasn't yet cosigned").await; + // test.assert_task_iteration_failed_at(2); +} diff --git a/coordinator/cosign/src/tests/intend.rs b/coordinator/cosign/src/tests/intend.rs index 57de29738..78c442f58 100644 --- a/coordinator/cosign/src/tests/intend.rs +++ b/coordinator/cosign/src/tests/intend.rs @@ -1,23 +1,29 @@ use core::future::Future; -use std::collections::HashMap; +use std::{ + collections::{HashMap, HashSet}, + time::{SystemTime, UNIX_EPOCH}, +}; -use serai_db::{Db as _, DbTxn}; +use blake2::{Blake2b256, Digest}; +use serai_db::{Db as _, DbTxn, MemDb}; use serai_client_serai::{ + Events, abi::{ + Block, Event, Header, HeaderV1, BLOCK_HEADER_BRANCH_TAG, BLOCK_HEADER_LEAF_TAG, coins, primitives::{ - address::{SeraiAddress, ExternalAddress}, - balance::{Amount, ExternalBalance}, - coin::ExternalCoin, - crypto::{Public, ExternalKey, KeyPair}, + BlockHash, + address::{ExternalAddress, SeraiAddress}, + balance::{Amount, Balance, ExternalBalance}, + coin::{Coin, ExternalCoin}, + crypto::{ExternalKey, KeyPair, Public}, instructions::{OutInstruction, OutInstructionWithBalance}, + merkle::{IncrementalUnbalancedMerkleTree, UnbalancedMerkleTree}, network_id::{ExternalNetworkId, NetworkId}, - validator_sets::{Session, ValidatorSet, ExternalValidatorSet, KeyShares}, - BlockHash, + validator_sets::{ExternalValidatorSet, KeyShares, Session, ValidatorSet}, }, - coins, system, validator_sets, Block, Event, + system, validator_sets, }, - Events, }; use crate::{ @@ -25,19 +31,16 @@ use crate::{ BlockEventData, BlockEvents, BuildsUpon, CosignIntendTask, GlobalSessionsChannel, IntendedCosigns, LatestSet, ScanCosignFrom, Set, Stakes, Validators, }, - tests::*, + tests::{IntoTask, Test}, CosignIntent, GlobalSession, GlobalSessions, GlobalSessionsLastBlock, HasEvents, LatestGlobalSessionIntended, SeraiRpc, SubstrateBlockHash, }; -fn set_keys_event_with_pair(set: ExternalValidatorSet, key_pair: &KeyPair) -> Event { - Event::ValidatorSets(validator_sets::Event::SetKeys { set, key_pair: key_pair.clone() }) -} - -fn set_keys_event(set: ExternalValidatorSet, key_seed: u8) -> Event { - let key_pair = - KeyPair(Public([key_seed; 32]), ExternalKey(vec![key_seed; 32].try_into().unwrap())); - set_keys_event_with_pair(set, &key_pair) +fn set_keys_event(set: ExternalValidatorSet) -> Event { + Event::ValidatorSets(validator_sets::Event::SetKeys { + set, + key_pair: KeyPair(Public([0x01; 32]), ExternalKey(vec![0x02; 32].try_into().unwrap())), + }) } fn set_decided_event(set: ValidatorSet, validator: SeraiAddress) -> Event { @@ -65,11 +68,12 @@ fn deallocation_event(validator: SeraiAddress, network: NetworkId, amount: u64) } fn burn_with_instruction_event(from: SeraiAddress) -> Event { - let address = ExternalAddress::try_from(vec![1u8, 2u8, 3u8]).unwrap(); Event::Coins(coins::Event::BurnWithInstruction { from, instruction: OutInstructionWithBalance { - instruction: OutInstruction::Transfer(address), + instruction: OutInstruction::Transfer( + ExternalAddress::try_from(vec![1u8, 2u8, 3u8]).unwrap(), + ), balance: ExternalBalance { coin: ExternalCoin::Bitcoin, amount: Amount(1) }, }, }) @@ -84,17 +88,176 @@ fn events_from_allocations(allocations: &[(SeraiAddress, ExternalNetworkId, u64) .collect() } -impl Test { +#[derive(Clone)] +pub(crate) struct Serai { + pub(crate) latest_finalized_error: Option, + pub(crate) block_by_number_error: HashMap, + pub(crate) events_error: HashMap, + pub(crate) blocks_by_number: HashMap, + pub(crate) events_by_hash: HashMap, + pub(crate) builds_upon: IncrementalUnbalancedMerkleTree, + pub(crate) missing_blocks: HashSet, +} + +impl Default for Serai { + fn default() -> Self { + Self { + latest_finalized_error: None, + block_by_number_error: HashMap::new(), + events_error: HashMap::new(), + blocks_by_number: HashMap::new(), + events_by_hash: HashMap::new(), + builds_upon: IncrementalUnbalancedMerkleTree::new(), + missing_blocks: HashSet::new(), + } + } +} + +impl Serai { + pub(crate) fn new() -> Self { + Self::default() + } + + pub(crate) fn set_latest_finalized_error(&mut self, error: &str) { + self.latest_finalized_error = Some(error.to_string()); + } + + pub(crate) fn set_block_not_found(&mut self, block_number: u64) { + self.missing_blocks.insert(block_number); + } + + pub(crate) fn set_block_error(&mut self, block_number: u64, error: &str) { + self.block_by_number_error.insert(block_number, error.to_string()); + } + + pub(crate) fn set_events_error(&mut self, block_hash: BlockHash, error: &str) { + self.events_error.insert(block_hash, error.to_string()); + } + + pub(crate) fn make_block(&mut self, number: u64) -> BlockHash { + let block = Block { + header: Header::V1(HeaderV1 { + number, + builds_upon: self.builds_upon.clone().calculate(BLOCK_HEADER_BRANCH_TAG), + unix_time_in_millis: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis() + as u64, + transactions_commitment: UnbalancedMerkleTree::EMPTY, + events_commitment: UnbalancedMerkleTree::EMPTY, + consensus_commitment: [0; 32], + }), + transactions: vec![], + }; + + let block_hash = block.header.hash(); + + self.builds_upon.append( + BLOCK_HEADER_BRANCH_TAG, + Blake2b256::new_with_prefix([BLOCK_HEADER_LEAF_TAG]) + .chain_update(block_hash.0) + .finalize() + .into(), + ); + + self.blocks_by_number.insert(number, block); + + block_hash + } + + pub(crate) fn set_events(&mut self, block_hash: BlockHash, events: Vec) { + self.events_by_hash.insert(block_hash, Events::with(events)); + } +} + +impl SeraiRpc for Serai { + fn latest_finalized_block_number(&self) -> impl Send + Future> { + let err = self.latest_finalized_error.clone(); + let latest = self.blocks_by_number.keys().copied().max().unwrap_or(0); + async move { + if let Some(e) = err { + return Err(e); + } + Ok(latest) + } + } + + fn block_by_number( + &self, + block: u64, + ) -> impl Send + Future, String>> { + let err = self.block_by_number_error.get(&block).cloned(); + let block_entry = self.blocks_by_number.get(&block).cloned(); + let is_missing = self.missing_blocks.contains(&block); + + async move { + if let Some(e) = err { + return Err(e); + } + if is_missing { + return Ok(None); + } + Ok(block_entry) + } + } + + fn events(&self, block: BlockHash) -> impl Send + Future> { + let err = self.events_error.get(&block).cloned(); + let events = self.events_by_hash.get(&block).cloned().unwrap_or_default(); + async move { + if let Some(e) = err { + return Err(e); + } + Ok(events) + } + } +} + +pub(crate) struct IntendTest { + pub(crate) serai: Serai, + pub(crate) db: MemDb, +} + +impl Default for IntendTest { + fn default() -> Self { + Self { serai: Serai::new(), db: MemDb::new() } + } +} + +impl IntoTask for IntendTest { + type Task = CosignIntendTask; + + fn into_task(&self) -> Self::Task { + CosignIntendTask { db: self.db.clone(), serai: self.serai.clone() } + } +} + +impl IntendTest { fn assert_substrate_block_hash_exists(&self, block_number: u64) -> BlockHash { let block_hash = SubstrateBlockHash::get(&self.db, block_number); assert!(block_hash.is_some(), "no substrate blockhash for block {block_number}"); block_hash.expect("no substrate blockhash") } + fn assert_no_global_sessions_channel(&self) { + assert_eq!(GlobalSessionsChannel::peek(&self.db).is_none(), true); + } + + fn assert_no_block_events(&self) { + assert_eq!(BlockEvents::peek(&self.db).is_none(), true); + } + + fn assert_no_substrate_block_hash(&self, block_number: u64) { + let block_hash = SubstrateBlockHash::get(&self.db, block_number); + assert!(block_hash.is_none(), "expected no substrate blockhash for block {block_number}"); + } + fn assert_builds_upon_is_expected(&self, expected: &IncrementalUnbalancedMerkleTree) { assert_eq!(BuildsUpon::get(&self.db).as_ref(), Some(expected)); } + fn assert_no_builds_upon(&self) { + assert_eq!(BuildsUpon::get(&self.db), None); + } + // Assert everything that changed or should have changed after a simple task iteration run with linear blocks // (substrate block hashes are set and builds upon is expected) fn assert_task_iteration_per_block(&self, block_number: u64) -> BlockHash { @@ -103,6 +266,11 @@ impl Test { block_hash } + fn assert_task_iteration_per_block_clears(&self, block_number: u64) { + self.assert_no_substrate_block_hash(block_number); + self.assert_no_builds_upon(); + } + fn assert_block_events_is_expected(&mut self, expected: BlockEventData) { let mut txn = self.db.txn(); let actual = BlockEvents::try_recv(&mut txn); @@ -120,6 +288,10 @@ impl Test { assert_eq!(ScanCosignFrom::get(&self.db), Some(expected)); } + fn assert_no_scan_cosign_from(&self) { + assert_eq!(ScanCosignFrom::get(&self.db), None); + } + // Assert everything that changed or should have changed after task iteration is ran per block // (BlockEventData points to current block and events, ScanCosignFrom is the next block) fn assert_task_iteration_per_block_concluded( @@ -136,10 +308,7 @@ impl Test { self.assert_task_iteration_per_block_concluded(block_number, HasEvents::No); } - fn assert_task_iterations_with_no_events_ran(&mut self, block_numbers: (u64, u64)) { - let start_block = block_numbers.0; - let end_block = block_numbers.1; - + fn assert_task_iterations_with_no_events_ran(&mut self, start_block: u64, end_block: u64) { for block_number in start_block..=end_block { self.assert_task_iteration_per_block(block_number); self.assert_block_events_is_expected(BlockEventData { @@ -151,23 +320,19 @@ impl Test { self.assert_scan_cosign_from_is_expected(end_block + 1); } - /// Asserts that block 1 was processed successfully but block 2 failed. - /// Takes the expected `builds_upon` value (state after block 1 was processed). - fn assert_block_1_succeeded_block_2_failed( - &self, + /// Asserts that blocks were processed successfully up to (but not including) failed_block. + /// Takes the expected `builds_upon` value (state after the last successful block was processed). + fn assert_task_iterations_with_no_events_failed_at( + &mut self, + failed_block: u64, expected_builds_upon: &IncrementalUnbalancedMerkleTree, ) { - let getter = &self.db; - - assert!(SubstrateBlockHash::get(getter, 1).is_some()); - assert!(SubstrateBlockHash::get(getter, 2).is_none()); + let prev_builds_upon = self.serai.builds_upon.clone(); - // BuildsUpon should reflect state after block 1 (before block 2 failed) - self.assert_builds_upon_is_expected(expected_builds_upon); + self.serai.builds_upon = expected_builds_upon.clone(); + self.assert_task_iteration_per_block_with_no_events_ran(failed_block - 1); - assert_eq!(BlockEvents::peek(getter).expect("missing block events").block_number, 1); - // Next ScanCosignFrom is still block 2 since it failed and must be re-ran - assert_eq!(ScanCosignFrom::get(getter).expect("missing scan cosign from"), 2); + self.serai.builds_upon = prev_builds_upon; } fn assert_stakes_is_expected( @@ -179,6 +344,10 @@ impl Test { assert_eq!(Stakes::get(&self.db, network, validator), expected); } + fn assert_no_stakes(&self, network: ExternalNetworkId, validator: SeraiAddress) { + assert_eq!(Stakes::get(&self.db, network, validator), None); + } + /// Asserts stakes match the accumulated totals from a slice of allocations. /// Groups by (network, validator) and sums amounts before asserting. fn assert_stakes_from_allocations_is_expected( @@ -210,6 +379,10 @@ impl Test { assert_eq!(Validators::get(&self.db, set), expected); } + fn assert_no_validators(&self, set: ExternalValidatorSet) { + assert_eq!(Validators::get(&self.db, set), None); + } + fn assert_latest_set_is_expected(&self, network: ExternalNetworkId, expected: Option<&Set>) { let actual = LatestSet::get(&self.db, network); match (actual.as_ref(), expected) { @@ -223,7 +396,10 @@ impl Test { } } - #[allow(dead_code)] + fn assert_no_latest_set(&self, network: ExternalNetworkId) { + assert_eq!(LatestSet::get(&self.db, network).is_none(), true); + } + fn assert_global_sessions_get(&self, session_id: [u8; 32], expected: Option<&GlobalSession>) { match (GlobalSessions::get(&self.db, session_id), expected) { (Some(ref actual), Some(exp)) => Self::assert_global_session(actual, exp), @@ -238,12 +414,14 @@ impl Test { assert_eq!(GlobalSessionsLastBlock::get(&self.db, session_id), Some(expected)); } - #[allow(dead_code)] fn assert_latest_global_session_intended(&self, expected: Option<[u8; 32]>) { assert_eq!(LatestGlobalSessionIntended::get(&self.db), expected); } - #[allow(dead_code)] + fn assert_no_latest_global_session_intended(&self) { + assert_eq!(LatestGlobalSessionIntended::get(&self.db), None); + } + fn assert_global_sessions_channel_peek(&self, expected: Option<&([u8; 32], GlobalSession)>) { let actual = GlobalSessionsChannel::peek(&self.db); match (actual.as_ref(), expected) { @@ -264,18 +442,71 @@ impl Test { assert_eq!(IntendedCosigns::peek(&self.db, set), Some(expected)); } - /// Asserts that a notable block was processed correctly, verifying: - /// - Substrate block hash and builds_upon are set - /// - BlockEvents has the correct event type: - /// - HasEvents::No for the first notable block (no prior session to cosign it) - /// - HasEvents::Notable for subsequent notable blocks - /// - A new GlobalSession was created and stored - /// - GlobalSessionsLastBlock is set for the previous session (if one existed) - /// - LatestGlobalSessionIntended is updated to the new session - /// - GlobalSessionsChannel received the new session - /// - IntendedCosigns are sent for the previous session's sets (if one existed) - /// - /// Returns the new session ID and session info for further assertions if needed. + fn assert_no_intended_cosigns(&self, set: ExternalValidatorSet) { + assert_eq!(IntendedCosigns::peek(&self.db, set).is_none(), true); + } + + /// Asserts that all DB entries are cleared (return None or are empty). + /// This is useful for verifying initial state or that cleanup worked correctly. + fn assert_db_cleared( + &self, + block_numbers: &[u64], + networks: &[ExternalNetworkId], + sets: &[ExternalValidatorSet], + session_ids: &[[u8; 32]], + stakes: &[(ExternalNetworkId, SeraiAddress)], + ) { + self.assert_global_db_is_clear(); + + for &block_number in block_numbers { + self.assert_no_substrate_block_hash(block_number); + } + + for &network in networks { + self.assert_no_latest_set(network); + } + + for &set in sets { + self.assert_no_validators(set); + self.assert_no_intended_cosigns(set); + } + + for &session_id in session_ids { + self.assert_global_sessions_get(session_id, None); + self.assert_no_global_sessions_last_block(session_id); + } + + for &(network, validator) in stakes { + self.assert_no_stakes(network, validator); + } + } + + /// Asserts that all global (parameterless) DB entries are cleared. + /// Use this for a quick check when you don't need to verify parameterized entries. + fn assert_global_db_is_clear(&self) { + // create_db! { Cosign {... + self.assert_no_latest_global_session_intended(); + + // create_db!( CosignIntend {... + self.assert_no_scan_cosign_from(); + self.assert_no_builds_upon(); + + // db_channel! { CosignIntendChannels {... + self.assert_no_global_sessions_channel(); + self.assert_no_block_events(); + } + + fn assert_global_db_is_clear_after_block(&self, block_number: u64) { + self.assert_global_db_is_clear(); + + // create_db! { Cosign {... + self.assert_no_substrate_block_hash(block_number); + } + + fn assert_no_global_sessions_last_block(&self, session_id: [u8; 32]) { + assert_eq!(GlobalSessionsLastBlock::get(&self.db, session_id), None); + } + fn assert_task_iteration_per_block_with_notable_events_ran( &mut self, block_number: u64, @@ -292,7 +523,6 @@ impl Test { has_events: expected_has_events, }); - // Get session from channel (channels preserve order, so this gives us the session for this block) let mut txn = self.db.txn(); let channel_entry = GlobalSessionsChannel::try_recv(&mut txn); txn.commit(); @@ -333,11 +563,6 @@ impl Test { (session_id, session) } - /// Asserts that a non-notable block (e.g., with burn events) was processed correctly, verifying: - /// - Substrate block hash and builds_upon are set - /// - BlockEvents has NonNotable for this block - /// - IntendedCosigns are sent for the active session's sets (with notable=false) - /// - ScanCosignFrom is set to the next block fn assert_task_iteration_per_block_with_non_notable_events_ran(&mut self, block_number: u64) { let block_hash = self.assert_task_iteration_per_block(block_number); self.assert_task_iteration_per_block_concluded(block_number, HasEvents::NonNotable); @@ -345,7 +570,6 @@ impl Test { let active_session_id = LatestGlobalSessionIntended::get(&self.db) .expect("NonNotable block requires an active session from a prior notable block"); - // IntendedCosigns are sent for the active session's sets with notable=false let session = GlobalSessions::get(&self.db, active_session_id).expect("active session should exist"); for set in session.sets { @@ -364,66 +588,46 @@ impl Test { #[tokio::test] async fn intend_returns_false_with_no_blocks() { - let test = Test::new(); - let mut task = test.into_intend_task(); + let test = IntendTest::default(); + let mut task = test.into_task(); Test::assert_task_run_and_check_progress(&mut task, false).await; + test.assert_global_db_is_clear(); } #[tokio::test] async fn intend_returns_false_with_genesis_block() { - let mut test = Test::new(); + let mut test = IntendTest::default(); - let genesis_hash = test.serai.make_block(0); - test.serai.initialize_empty_events(genesis_hash); + test.serai.make_block(0); - let mut task = test.into_intend_task(); + let mut task = test.into_task(); // In intend.rs let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1); - // will default to the 1st block, and without a greater serai.latest_finalized_block_number() + // will always default to the 1st block, and without a greater serai.latest_finalized_block_number() // there will nothing to iterate, returning false as in "did not progress" Test::assert_task_run_and_check_progress(&mut task, false).await; + test.assert_global_db_is_clear_after_block(0u64); } #[tokio::test] -async fn intend_returns_true_with_one_block() { - let mut test = Test::new(); - - let block1_hash = test.serai.make_block(1); - test.serai.initialize_empty_events(block1_hash); - - let mut task = test.into_intend_task(); - - // Should return true as in "did progress the new block" - Test::assert_task_run_and_check_progress(&mut task, true).await; - - test.assert_task_iteration_per_block_with_no_events_ran(1); -} - -#[tokio::test] -async fn intend_returns_true_with_linear_blocks_with_no_events() { - let mut test = Test::new(); - - let block1_hash = test.serai.make_block(1); - test.serai.initialize_empty_events(block1_hash); +async fn intend_returns_true_with_linear_blocks() { + let mut test = IntendTest::default(); - let block2_hash = test.serai.make_block(2); - test.serai.initialize_empty_events(block2_hash); + test.serai.make_block(1); + test.serai.make_block(2); + test.serai.make_block(3); - let block3_hash = test.serai.make_block(3); - test.serai.initialize_empty_events(block3_hash); + let mut task = test.into_task(); - let mut task = test.into_intend_task(); Test::assert_task_run_and_check_progress(&mut task, true).await; - - test.assert_task_iterations_with_no_events_ran((1, 3)); + test.assert_task_iterations_with_no_events_ran(1, 3); } #[tokio::test] async fn intend_errors_if_chain_is_not_linear() { - let mut test = Test::new(); + let mut test = IntendTest::default(); - let block1_hash = test.serai.make_block(1); - test.serai.initialize_empty_events(block1_hash); + test.serai.make_block(1); // Capture builds_upon after block 1 (before block 2 modifies it) let builds_upon_after_block_1 = test.serai.builds_upon.clone(); @@ -431,116 +635,138 @@ async fn intend_errors_if_chain_is_not_linear() { // Block #2 does not build upon block #1 test.serai.builds_upon = IncrementalUnbalancedMerkleTree::new(); - let block2_hash = test.serai.make_block(2); - test.serai.initialize_empty_events(block2_hash); + test.serai.make_block(2); + + let mut task = test.into_task(); + + Test::assert_task_run_and_failed_with(&mut task, "doesn't build upon").await; + + test.assert_task_iterations_with_no_events_failed_at(2, &builds_upon_after_block_1); + + // Now fix the chain: remove the broken block 2 and recreate it properly + test.serai.blocks_by_number.remove(&2); + test.serai.builds_upon = builds_upon_after_block_1; - let mut task = test.into_intend_task(); - Test::assert_task_failed(&mut task, "doesn't build upon").await; + test.serai.make_block(2); + + let mut task = test.into_task(); - test.assert_block_1_succeeded_block_2_failed(&builds_upon_after_block_1); + // Re-run the task, block 2 properly builds upon block 1 + Test::assert_task_run_and_check_progress(&mut task, true).await; + // block 1 was already asserted and cleared from queue, assert only block 2 now + test.assert_task_iteration_per_block_with_no_events_ran(2); } #[tokio::test] async fn intend_errors_if_block_not_found() { - let mut test = Test::new(); + let mut test = IntendTest::default(); - // Block 1 exists and can be fetched - let block1_hash = test.serai.make_block(1); - test.serai.initialize_empty_events(block1_hash); + test.serai.make_block(1); - // Capture builds_upon after block 1 (before block 2 modifies it) + // Capture builds_upon after block 1 let builds_upon_after_block_1 = test.serai.builds_upon.clone(); // Block 2 exists in terms of finalization, but returns None when fetched test.serai.make_block(2); test.serai.set_block_not_found(2); - let mut task = test.into_intend_task(); - Test::assert_task_failed(&mut task, "couldn't get block which should've been finalized").await; + let mut task = test.into_task(); + Test::assert_task_run_and_failed_with( + &mut task, + "couldn't get block which should've been finalized", + ) + .await; + + test.assert_task_iterations_with_no_events_failed_at(2, &builds_upon_after_block_1); + + test.serai.missing_blocks.remove(&2); + + let mut task = test.into_task(); - test.assert_block_1_succeeded_block_2_failed(&builds_upon_after_block_1); + // Re-run the task, block 2 now fetched and processed + Test::assert_task_run_and_check_progress(&mut task, true).await; + test.assert_task_iteration_per_block_with_no_events_ran(2); } #[tokio::test] async fn intend_handles_rpc_error_on_block_fetch() { - let mut test = Test::new(); + let mut test = IntendTest::default(); - // Block 1 exists and can be fetched - let block1_hash = test.serai.make_block(1); - test.serai.initialize_empty_events(block1_hash); + test.serai.make_block(1); - // Capture builds_upon after block 1 (before block 2 modifies it) + // Capture builds_upon after block 1 let builds_upon_after_block_1 = test.serai.builds_upon.clone(); // Block 2 exists in terms of finalization, but fetching it returns an error test.serai.make_block(2); test.serai.set_block_error(2, "connection refused"); - let mut task = test.into_intend_task(); - Test::assert_task_failed(&mut task, "RPC error fetching block").await; + let mut task = test.into_task(); + Test::assert_task_run_and_failed_with(&mut task, "RPC error fetching block").await; - test.assert_block_1_succeeded_block_2_failed(&builds_upon_after_block_1); + test.assert_task_iterations_with_no_events_failed_at(2, &builds_upon_after_block_1); + + test.serai.block_by_number_error.remove(&2); + + let mut task = test.into_task(); + + // Re-run the task, block 2 now fetched and processed + Test::assert_task_run_and_check_progress(&mut task, true).await; + test.assert_task_iteration_per_block_with_no_events_ran(2); } #[tokio::test] async fn intend_handles_rpc_error_on_events_fetch() { - let mut test = Test::new(); + let mut test = IntendTest::default(); - // Block 1 exists and can be fetched - let block1_hash = test.serai.make_block(1); - test.serai.initialize_empty_events(block1_hash); + test.serai.make_block(1); - // Capture builds_upon after block 1 (before block 2 modifies it) + // Capture builds_upon after block 1 let builds_upon_after_block_1 = test.serai.builds_upon.clone(); - // Block 2 exists in terms of finalization, but fetching it returns an error + // Block 2 exists in terms of finalization, but fetching it returns an event error let block2_hash = test.serai.make_block(2); test.serai.set_events_error(block2_hash, "timeout"); - let mut task = test.into_intend_task(); - Test::assert_task_failed(&mut task, "RPC error fetching events").await; + let mut task = test.into_task(); + Test::assert_task_run_and_failed_with(&mut task, "RPC error fetching events").await; + + test.assert_task_iterations_with_no_events_failed_at(2, &builds_upon_after_block_1); - test.assert_block_1_succeeded_block_2_failed(&builds_upon_after_block_1); + test.serai.events_error.remove(&block2_hash); + + let mut task = test.into_task(); + + // Re-run the task, block 2 now fetched and processed + Test::assert_task_run_and_check_progress(&mut task, true).await; + test.assert_task_iteration_per_block_with_no_events_ran(2); } #[tokio::test] async fn intend_handles_rpc_error_on_latest_finalized() { - let mut test = Test::new(); + let mut test = IntendTest::default(); - // We need to add a block first so latest_finalized_block_number would normally succeed test.serai.make_block(1); + test.serai.set_latest_finalized_error("network error"); - // Create a wrapper that returns error for latest_finalized_block_number - #[derive(Clone)] - struct FailingSeraiRPC; - impl SeraiRpc for FailingSeraiRPC { - fn latest_finalized_block_number(&self) -> impl Send + Future> { - async { Err("network error".to_string()) } - } - fn block_by_number( - &self, - _block: u64, - ) -> impl Send + Future, String>> { - async { Ok(None) } - } - fn events(&self, _block: BlockHash) -> impl Send + Future> { - async { Ok(Events::new()) } - } - } + let mut task = test.into_task(); + Test::assert_task_run_and_failed_with(&mut task, "RPC error fetching latest finalized").await; + + test.serai.latest_finalized_error = None; - // Create a custom Serai that will fail on latest_finalized_block_number - let mut task = CosignIntendTask { db: test.db.clone(), serai: FailingSeraiRPC }; - Test::assert_task_failed(&mut task, "RPC error fetching latest finalized").await; + let mut task = test.into_task(); + + Test::assert_task_run_and_check_progress(&mut task, true).await; + test.assert_task_iteration_per_block_with_no_events_ran(1); } #[tokio::test] async fn intend_handles_allocation_events() { - let mut test = Test::new(); + let mut test = IntendTest::default(); let validator1 = SeraiAddress([0x01; 32]); let validator2 = SeraiAddress([0x02; 32]); - // Block 1: Allocations across multiple networks let allocations_block1 = [ (validator1, ExternalNetworkId::Bitcoin, 50), (validator1, ExternalNetworkId::Bitcoin, 100), @@ -550,322 +776,164 @@ async fn intend_handles_allocation_events() { let block1_hash = test.serai.make_block(1); test.serai.set_events(block1_hash, events_from_allocations(&allocations_block1)); - // Block 2: More allocations let allocations_block2 = [(validator2, ExternalNetworkId::Ethereum, 75), (validator1, ExternalNetworkId::Bitcoin, 25)]; + let block2_hash = test.serai.make_block(2); test.serai.set_events(block2_hash, events_from_allocations(&allocations_block2)); - let mut task = test.into_intend_task(); + let mut task = test.into_task(); Test::assert_task_run_and_check_progress(&mut task, true).await; let all_allocations: Vec<_> = allocations_block1.iter().chain(allocations_block2.iter()).copied().collect(); + test.assert_stakes_from_allocations_is_expected(&all_allocations); - // Both blocks have only allocation events (no SetKeys, no burn) -> HasEvents::No - test.assert_task_iterations_with_no_events_ran((1, 2)); + // Both blocks have only allocation events which are a HasEvents::No + // not HasEvents::Notable neither HasEvents::NonNotable + test.assert_task_iterations_with_no_events_ran(1, 2); } #[tokio::test] -async fn intend_handles_deallocation_event() { - let mut test = Test::new(); +async fn intend_handles_allocation_events_overflow() { + let mut test = IntendTest::default(); let validator = SeraiAddress([0x01; 32]); - // Block 1: Allocate then deallocate some let block1_hash = test.serai.make_block(1); - test.serai.set_events( - block1_hash, - vec![ - // Allocate first - allocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 100), - // Deallocate some - deallocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 30), - ], - ); + let allocations_block1 = [(validator, ExternalNetworkId::Bitcoin, u64::MAX)]; + test.serai.set_events(block1_hash, events_from_allocations(&allocations_block1)); + + // Capture builds_upon after block 1 + let builds_upon_after_block_1 = test.serai.builds_upon.clone(); - // Block 2: Deallocate more than remaining to test saturation + // Block 2: Allocate more u64::MAX amount - should cause overflow error let block2_hash = test.serai.make_block(2); test.serai.set_events( block2_hash, - vec![ - // Deallocate more than remaining (70 left, deallocating 100) - deallocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 100), - ], + vec![allocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), u64::MAX)], ); - // Create task after all blocks are set up - let mut task = test.into_intend_task(); - Test::assert_task_run_and_check_progress(&mut task, true).await; + let mut task = test.into_task(); - test.assert_stakes_is_expected(ExternalNetworkId::Bitcoin, validator, Some(Amount(0))); + Test::assert_task_run_and_failed_with(&mut task, "stake overflow").await; - // Both blocks have only allocation/deallocation events (no SetKeys, no burn) -> HasEvents::No - test.assert_task_iterations_with_no_events_ran((1, 2)); + test.assert_stakes_is_expected(ExternalNetworkId::Bitcoin, validator, Some(Amount(u64::MAX))); + test.assert_task_iterations_with_no_events_failed_at(2, &builds_upon_after_block_1); + + // Stake still stores the values from block 1, before the failure + let all_allocations: Vec<_> = allocations_block1.iter().copied().collect(); + test.assert_stakes_from_allocations_is_expected(&all_allocations); } #[tokio::test] -async fn intend_errors_if_notable_block_has_no_stake() { - let mut test = Test::new(); - - let validator1 = SeraiAddress([0x01; 32]); - let validator2 = SeraiAddress([0x02; 32]); +async fn intend_handles_allocation_events_ignore_serai_network() { + let mut test = IntendTest::default(); - let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - let vset0 = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; - let set1 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(1) }; - let vset1 = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(1) }; + let validator = SeraiAddress([0x01; 32]); - // Block 1: Normal notable block with allocations let block1_hash = test.serai.make_block(1); - test.serai.set_events( - block1_hash, - vec![ - allocation_event(validator1, NetworkId::External(ExternalNetworkId::Bitcoin), 100), - set_decided_event(vset0, validator1), - set_keys_event(set0, 1), - ], - ); + // Block 1: Allocation with NetworkId::Serai + test.serai.set_events(block1_hash, vec![allocation_event(validator, NetworkId::Serai, 100)]); - // Block 2: SetDecided and SetKeys for new session with validator2 who has no allocations -> 0 stake - let block2_hash = test.serai.make_block(2); - test - .serai - .set_events(block2_hash, vec![set_decided_event(vset1, validator2), set_keys_event(set1, 2)]); + let mut task = test.into_task(); + Test::assert_task_run_and_check_progress(&mut task, true).await; - let mut task = test.into_intend_task(); - Test::assert_task_failed(&mut task, "had 0 stake").await; + // Verify no stakes were recorded for Serai network (allocations were ignored) + // Stakes::get only works with ExternalNetworkId, so we can't directly check Serai + // But we can verify the blocks were processed with no notable events + test.assert_task_iteration_per_block_with_no_events_ran(1); } #[tokio::test] -async fn intend_handles_set_decided_event() { - let mut test = Test::new(); +async fn intend_handles_deallocation_without_prior_allocation() { + let mut test = IntendTest::default(); - let validator1 = SeraiAddress([0x01; 32]); - let validator2 = SeraiAddress([0x02; 32]); - let validator3 = SeraiAddress([0x03; 32]); - - let set0_btc = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - let vset0_btc = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; - let set0_eth = ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }; - let vset0_eth = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Ethereum), session: Session(0) }; + let validator = SeraiAddress([0x01; 32]); - // Block 1: SetDecided for Bitcoin + // Block 1: Deallocate without any prior allocation should error let block1_hash = test.serai.make_block(1); test.serai.set_events( block1_hash, - vec![Event::ValidatorSets(validator_sets::Event::SetDecided { - set: vset0_btc, - validators: vec![ - (validator1, KeyShares(1)), - (validator2, KeyShares(2)), - (validator3, KeyShares(3)), - ], - })], + vec![deallocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 100)], ); - // Block 2: SetDecided for Ethereum with different validators - let block2_hash = test.serai.make_block(2); - test.serai.set_events( - block2_hash, - vec![Event::ValidatorSets(validator_sets::Event::SetDecided { - set: vset0_eth, - validators: vec![(validator1, KeyShares(2)), (validator2, KeyShares(3))], - })], - ); + let mut task = test.into_task(); + Test::assert_task_run_and_failed_with(&mut task, "no prior existing stake").await; - let mut task = test.into_intend_task(); - Test::assert_task_run_and_check_progress(&mut task, true).await; - - // Verify validators are stored for each set - test.assert_validators_is_expected(set0_btc, Some(vec![validator1, validator2, validator3])); - test.assert_validators_is_expected(set0_eth, Some(vec![validator1, validator2])); - - // SetDecided alone doesn't make a block notable (only SetKeys does) -> HasEvents::No - test.assert_task_iterations_with_no_events_ran((1, 2)); + // No stakes should be recorded since the operation failed + test.assert_global_db_is_clear_after_block(1); } #[tokio::test] -async fn intend_handles_set_keys_event() { - let mut test = Test::new(); - - let validator1 = SeraiAddress([0x01; 32]); - let validator2 = SeraiAddress([0x02; 32]); +async fn intend_handles_deallocation_event() { + let mut test = IntendTest::default(); - let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - let vset0 = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; - let set1 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(1) }; - let vset1 = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(1) }; + let validator = SeraiAddress([0x01; 32]); - // Block 1: First SetKeys (creates session 0) let block1_hash = test.serai.make_block(1); test.serai.set_events( block1_hash, vec![ - allocation_event(validator1, NetworkId::External(ExternalNetworkId::Bitcoin), 100), - allocation_event(validator2, NetworkId::External(ExternalNetworkId::Bitcoin), 200), - Event::ValidatorSets(validator_sets::Event::SetDecided { - set: vset0, - validators: vec![(validator1, KeyShares(1)), (validator2, KeyShares(2))], - }), - set_keys_event(set0, 1), + allocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 100), + deallocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 30), ], ); - // Block 2: Second SetKeys (creates session 1) + // Capture builds_upon after block 1 + let builds_upon_after_block_1 = test.serai.builds_upon.clone(); + let block2_hash = test.serai.make_block(2); test.serai.set_events( block2_hash, vec![ - Event::ValidatorSets(validator_sets::Event::SetDecided { - set: vset1, - validators: vec![(validator1, KeyShares(2)), (validator2, KeyShares(1))], - }), - set_keys_event(set1, 2), + // Deallocate more than remaining (70 left, deallocating 100) should cause underflow error + deallocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 100), ], ); - let mut task = test.into_intend_task(); - Test::assert_task_run_and_check_progress(&mut task, true).await; - - let expected_set = Set { session: Session(1), key: Public([2u8; 32]), stake: Amount(300) }; - test.assert_latest_set_is_expected(ExternalNetworkId::Bitcoin, Some(&expected_set)); - - test.assert_validators_is_expected(set0, None); - test.assert_validators_is_expected(set1, None); - - // Block 1: First notable block (no prior session) -> HasEvents::No - let (session0_id, _) = test.assert_task_iteration_per_block_with_notable_events_ran(1, None); - - // Block 2: Second notable block (prior session exists) -> HasEvents::Notable - test.assert_task_iteration_per_block_with_notable_events_ran(2, Some(session0_id)); - - test.assert_scan_cosign_from_is_expected(3); -} - -#[tokio::test] -async fn intend_handles_burn_with_instruction_events() { - let mut test = Test::new(); - - let validator1 = SeraiAddress([0x01; 32]); - let validator2 = SeraiAddress([0x02; 32]); - - let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - let vset0 = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; - - // Block 1: Create a session (first notable block, treated as No because no prior session) - let allocations_block1 = - [(validator1, ExternalNetworkId::Bitcoin, 100), (validator2, ExternalNetworkId::Bitcoin, 200)]; - let block1_hash = test.serai.make_block(1); - let mut events = events_from_allocations(&allocations_block1); - events.push(Event::ValidatorSets(validator_sets::Event::SetDecided { - set: vset0, - validators: vec![(validator1, KeyShares(1)), (validator2, KeyShares(2))], - })); - events.push(set_keys_event(set0, 1)); - test.serai.set_events(block1_hash, events); - - // Block 2: Burn event makes block NonNotable (with additional allocations) - let allocations_block2 = [(validator1, ExternalNetworkId::Bitcoin, 50)]; - let block2_hash = test.serai.make_block(2); - let mut events2 = events_from_allocations(&allocations_block2); - events2.push(burn_with_instruction_event(validator1)); - test.serai.set_events(block2_hash, events2); - - let mut task = test.into_intend_task(); - Test::assert_task_run_and_check_progress(&mut task, true).await; - - test.assert_stakes_is_expected(ExternalNetworkId::Bitcoin, validator1, Some(Amount(150))); - test.assert_stakes_is_expected(ExternalNetworkId::Bitcoin, validator2, Some(Amount(200))); - - // Block 1: First notable block (no prior session, treated as No) - test.assert_task_iteration_per_block(1); - test - .assert_block_events_is_expected(BlockEventData { block_number: 1, has_events: HasEvents::No }); + // Create task after all blocks are set up + let mut task = test.into_task(); + Test::assert_task_run_and_failed_with(&mut task, "stake underflow").await; - // Block 2: NonNotable (has burn event, session exists from block 1) - test.assert_task_iteration_per_block_with_non_notable_events_ran(2); + // Verify block 1 was processed successfully before the error on block 2 + test.assert_stakes_is_expected(ExternalNetworkId::Bitcoin, validator, Some(Amount(70))); + test.assert_task_iterations_with_no_events_failed_at(2, &builds_upon_after_block_1); } #[tokio::test] -async fn intend_ignores_non_validator_sets_events() { - let mut test = Test::new(); +async fn intend_handles_deallocation_underflow_error() { + let mut test = IntendTest::default(); - let vset0 = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; - let vset1 = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Ethereum), session: Session(0) }; + let validator = SeraiAddress([0x01; 32]); - // Block 1: System event (outer _ => continue) and AcceptedHandover (inner _ => continue) let block1_hash = test.serai.make_block(1); test.serai.set_events( block1_hash, vec![ - Event::System(system::Event::TransactionSuccess), - Event::ValidatorSets(validator_sets::Event::AcceptedHandover { set: vset0 }), - ], - ); - - // Block 2: More ignored events on different network - let block2_hash = test.serai.make_block(2); - test.serai.set_events( - block2_hash, - vec![ - Event::System(system::Event::TransactionSuccess), - Event::ValidatorSets(validator_sets::Event::AcceptedHandover { set: vset1 }), + allocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 50), + deallocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 200), ], ); - let mut task = test.into_intend_task(); - Test::assert_task_run_and_check_progress(&mut task, true).await; + let mut task = test.into_task(); + Test::assert_task_run_and_failed_with(&mut task, "stake underflow").await; - // Both blocks have only ignored events -> HasEvents::No - test.assert_task_iterations_with_no_events_ran((1, 2)); + test.assert_global_db_is_clear_after_block(1); } #[tokio::test] -async fn intend_ignores_serai_network_events() { - let mut test = Test::new(); +async fn intend_handles_deallocation_events_ignore_serai_network() { + let mut test = IntendTest::default(); let validator = SeraiAddress([0x01; 32]); - let vset_serai = ValidatorSet { network: NetworkId::Serai, session: Session(0) }; - - // Block 1: Allocation and Deallocation with NetworkId::Serai let block1_hash = test.serai.make_block(1); - test.serai.set_events( - block1_hash, - vec![ - // Allocation with Serai network -> continue (line 154) - allocation_event(validator, NetworkId::Serai, 100), - // Deallocation with Serai network -> continue (line 159) - deallocation_event(validator, NetworkId::Serai, 50), - ], - ); + test.serai.set_events(block1_hash, vec![deallocation_event(validator, NetworkId::Serai, 100)]); - // Block 2: SetDecided with NetworkId::Serai - let block2_hash = test.serai.make_block(2); - test.serai.set_events( - block2_hash, - vec![Event::ValidatorSets(validator_sets::Event::SetDecided { - set: vset_serai, - validators: vec![(validator, KeyShares(1))], - })], - ); - - let mut task = test.into_intend_task(); + let mut task = test.into_task(); Test::assert_task_run_and_check_progress(&mut task, true).await; - // Verify no stakes were recorded for Serai network (allocations were ignored) - // Stakes::get only works with ExternalNetworkId, so we can't directly check Serai - // But we can verify the blocks were processed with no notable events - test.assert_task_iterations_with_no_events_ran((1, 2)); + test.assert_task_iteration_per_block_with_no_events_ran(1); } diff --git a/coordinator/cosign/src/tests/mod.rs b/coordinator/cosign/src/tests/mod.rs index ba3645b7d..75f0b67e2 100644 --- a/coordinator/cosign/src/tests/mod.rs +++ b/coordinator/cosign/src/tests/mod.rs @@ -1,6 +1,9 @@ #[cfg(test)] mod intend; +#[cfg(test)] +mod evaluator; + #[cfg(test)] mod delay; @@ -15,7 +18,7 @@ impl Test { assert_eq!(task.run_iteration().await.unwrap(), made_progress); } - pub(crate) async fn assert_task_failed_with(task: &mut impl ContinuallyRan, error: &str) { + pub(crate) async fn assert_task_run_and_failed_with(task: &mut impl ContinuallyRan, error: &str) { let err = task.run_iteration().await.unwrap_err(); let err_str = format!("{err:?}"); assert!(err_str.contains(error), "{err_str}"); From 8bac6ffccd8d58a95fd9122a3ac30607940347d8 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Tue, 30 Dec 2025 17:55:58 -0300 Subject: [PATCH 10/71] fix: test failures --- coordinator/cosign/src/tests/intend.rs | 2 +- substrate/abi/src/modules/validator_sets.rs | 2 ++ substrate/primitives/src/lib.rs | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/coordinator/cosign/src/tests/intend.rs b/coordinator/cosign/src/tests/intend.rs index 78c442f58..627c4a120 100644 --- a/coordinator/cosign/src/tests/intend.rs +++ b/coordinator/cosign/src/tests/intend.rs @@ -46,7 +46,7 @@ fn set_keys_event(set: ExternalValidatorSet) -> Event { fn set_decided_event(set: ValidatorSet, validator: SeraiAddress) -> Event { Event::ValidatorSets(validator_sets::Event::SetDecided { set, - validators: vec![(validator, KeyShares(1))], + validators: vec![(validator, KeyShares::ONE)], }) } diff --git a/substrate/abi/src/modules/validator_sets.rs b/substrate/abi/src/modules/validator_sets.rs index 056ed1e4a..e53338c64 100644 --- a/substrate/abi/src/modules/validator_sets.rs +++ b/substrate/abi/src/modules/validator_sets.rs @@ -11,6 +11,8 @@ use serai_primitives::{ validator_sets::*, }; +pub use serai_primitives::validator_sets::DeallocationTimeline; + /// A call to the validator sets module. #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum Call { diff --git a/substrate/primitives/src/lib.rs b/substrate/primitives/src/lib.rs index 72b36d2d0..12b56fe17 100644 --- a/substrate/primitives/src/lib.rs +++ b/substrate/primitives/src/lib.rs @@ -75,7 +75,7 @@ impl From for BlockNumber { level so this is fine for our use-case. If we do ever see a 64-byte block hash, we can simply hash it into a 32-byte hash or truncate it. */ -#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize, BorshSerialize, BorshDeserialize)] pub struct BlockHash(pub [u8; 32]); #[cfg(feature = "scale")] crate::borsh_as_scale!(BlockHash); From f49fc0596a16d787d309051cde6a9c24491c83e2 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Fri, 2 Jan 2026 18:35:36 -0300 Subject: [PATCH 11/71] feat(cosign): more tests --- coordinator/cosign/src/delay.rs | 32 +- coordinator/cosign/src/evaluator.rs | 4 + coordinator/cosign/src/intend.rs | 17 +- coordinator/cosign/src/lib.rs | 1 + coordinator/cosign/src/tests/cosigning.rs | 118 +++++ coordinator/cosign/src/tests/delay.rs | 110 ++-- coordinator/cosign/src/tests/evaluator.rs | 46 +- coordinator/cosign/src/tests/intend.rs | 580 +++++++++++++++++++--- coordinator/cosign/src/tests/mod.rs | 67 ++- coordinator/cosign/src/tests/types.rs | 115 +++++ 10 files changed, 909 insertions(+), 181 deletions(-) create mode 100644 coordinator/cosign/src/tests/cosigning.rs create mode 100644 coordinator/cosign/src/tests/types.rs diff --git a/coordinator/cosign/src/delay.rs b/coordinator/cosign/src/delay.rs index ffa829bce..f587e08c4 100644 --- a/coordinator/cosign/src/delay.rs +++ b/coordinator/cosign/src/delay.rs @@ -7,8 +7,15 @@ use serai_task::ContinuallyRan; use crate::evaluator::CosignedBlocks; /// How often callers should broadcast the cosigns flagged for rebroadcasting. +#[cfg(not(test))] pub const BROADCAST_FREQUENCY: Duration = Duration::from_secs(60); +#[cfg(not(test))] const SYNCHRONY_EXPECTATION: Duration = Duration::from_secs(10); +/// How often callers should broadcast the cosigns flagged for rebroadcasting. +#[cfg(test)] +pub const BROADCAST_FREQUENCY: Duration = Duration::from_secs(6); +#[cfg(test)] +const SYNCHRONY_EXPECTATION: Duration = Duration::from_secs(1); pub(crate) const ACKNOWLEDGEMENT_DELAY: Duration = Duration::from_secs(BROADCAST_FREQUENCY.as_secs() + SYNCHRONY_EXPECTATION.as_secs()); @@ -37,17 +44,16 @@ impl ContinuallyRan for CosignDelayTask { loop { let mut txn = self.db.txn(); - // Every loop iteration consumes a CosignedBlocks queue message, if successful - let cosigned_block = CosignedBlocks::try_recv(&mut txn); - let Some((block_number, time_evaluated)) = cosigned_block else { + // Peek before consuming + let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else { // Queue was empty -> nothing to commit drop(txn); - // Stop when no blocks in queue break; }; if block_number == 0u64 { + // Clear block from queue txn.commit(); continue; } @@ -55,25 +61,27 @@ impl ContinuallyRan for CosignDelayTask { // If we've already acknowledged a later block, consume and skip (don't wait). let already_cosigned = LatestCosignedBlockNumber::get(&txn).unwrap_or(0); if block_number <= already_cosigned { + // Clear block from queue txn.commit(); continue; } // Calculate when we should mark it as valid, checking for overflow to avoid panic - let time_evaluated_duration = Duration::from_secs(time_evaluated); - let time_valid = - time_evaluated_duration.checked_add(ACKNOWLEDGEMENT_DELAY).ok_or_else(|| { + let time_valid = Duration::from_secs(time_evaluated) + .checked_add(ACKNOWLEDGEMENT_DELAY) + .ok_or_else(|| { format!( - "time_evaluated ({time_evaluated}) would overflow when adding ACKNOWLEDGEMENT_DELAY", + "time_evaluated ({time_evaluated}) would overflow when adding ACKNOWLEDGEMENT_DELAY" ) })?; let now = now_timestamp(); - // If the time valid is greater than the current time, sleep until the time valid is reached if time_valid > now { - // db txn being held and not committed until sleep completed - // if a timeout occurs, this block will be restarted on the next iteration - tokio::time::sleep(time_valid.saturating_sub(now)).await; + // NOT READY YET - don't consume, just return + // leave message in queue, check again in next task iteration + // simulates sleeping until ready, but continually iterating until ready instead + drop(txn); + return Ok(made_progress); } LatestCosignedBlockNumber::set(&mut txn, &block_number); diff --git a/coordinator/cosign/src/evaluator.rs b/coordinator/cosign/src/evaluator.rs index b61e805b2..6e43328af 100644 --- a/coordinator/cosign/src/evaluator.rs +++ b/coordinator/cosign/src/evaluator.rs @@ -9,7 +9,11 @@ use crate::{ intend::{GlobalSessionsChannel, BlockEventData, BlockEvents}, }; +#[cfg(not(test))] pub(crate) const REQUEST_COSIGNS_SPACING: Duration = Duration::from_secs(60); +#[cfg(test)] +pub(crate) const REQUEST_COSIGNS_SPACING: Duration = Duration::from_secs(6); + const COSIGN_COMMIT_THRESHOLD: u64 = 83; create_db!( diff --git a/coordinator/cosign/src/intend.rs b/coordinator/cosign/src/intend.rs index a757331ce..832a24271 100644 --- a/coordinator/cosign/src/intend.rs +++ b/coordinator/cosign/src/intend.rs @@ -165,16 +165,19 @@ impl ContinuallyRan for CosignIntendTask { } validator_sets::Event::SetDecided { set, validators } => { let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue }; - Validators::set( - &mut txn, - set, - &validators.iter().map(|(validator, _key_shares)| *validator).collect(), - ); + if validators.len() > 0 { + Validators::set( + &mut txn, + set, + &validators.iter().map(|(validator, _key_shares)| *validator).collect(), + ); + } } validator_sets::Event::SetKeys { set, key_pair } => { has_events = HasEvents::Notable; let validators = Validators::take(&mut txn, *set) + .filter(|v| !v.is_empty()) .ok_or_else(|| "set which wasn't decided set keys".to_string())?; let mut stake = 0; @@ -223,7 +226,7 @@ impl ContinuallyRan for CosignIntendTask { .ok_or_else(|| format!("total stake overflow: {} + {}", total_stake, stake.0))?; } if total_stake == 0 { - // commit only per block finished otherwise reset progress + // commit only per block finished otherwise reset db progress drop(txn); return Err(format!("cosigning sets for block #{block_number} had 0 stake in total"))?; } @@ -289,7 +292,7 @@ impl ContinuallyRan for CosignIntendTask { // Mark this block as handled, meaning we should scan from the next block moving on ScanCosignFrom::set(&mut txn, &(block_number + 1)); - // All-or-nothing, commit only per block finished otherwise reset progress + // All-or-nothing, commit only per block finished otherwise reset db progress // avoids partially adding db entries without committing the full expected db additions // i.e. saving a SubstrateBlockHash initially but later failing mid-way txn.commit(); diff --git a/coordinator/cosign/src/lib.rs b/coordinator/cosign/src/lib.rs index e77edb73d..10488cb7d 100644 --- a/coordinator/cosign/src/lib.rs +++ b/coordinator/cosign/src/lib.rs @@ -225,6 +225,7 @@ pub struct Cosigning { _task_handles: Vec, } impl Cosigning { + #[cfg(test)] /// Create a cosigning handle using an already-initialized database. /// /// This does not spawn any background tasks; use `Cosigning::spawn` for the full service. diff --git a/coordinator/cosign/src/tests/cosigning.rs b/coordinator/cosign/src/tests/cosigning.rs new file mode 100644 index 000000000..eeb55e204 --- /dev/null +++ b/coordinator/cosign/src/tests/cosigning.rs @@ -0,0 +1,118 @@ +use std::{collections::HashMap, time::Instant}; + +use borsh::{BorshDeserialize, BorshSerialize}; + +use blake2::{Blake2s256, Digest}; + +use serai_cosign_types::COSIGN_CONTEXT; +use serai_db::{Db as _, DbTxn, MemDb}; + +use serai_client_serai::abi::primitives::{ + BlockHash, + crypto::Public, + network_id::ExternalNetworkId, + validator_sets::{ExternalValidatorSet, Session}, +}; + +use crate::{ + BROADCAST_FREQUENCY, Cosign, CosignIntent, Cosigning, Faulted, FaultedSession, Faults, + GlobalSession, GlobalSessions, GlobalSessionsLastBlock, IntakeCosignError, + NetworksLatestCosignedBlock, SeraiRpc, SignedCosign, SubstrateBlockHash, + delay::LatestCosignedBlockNumber, + evaluator::CurrentlyEvaluatedGlobalSession, + tests::{TestRequest, intend::Serai, sign_cosign, sr25519_fixture}, +}; + +use crate::intend::IntendedCosigns; + +#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] +struct TestGlobalSession { + start_block_number: u64, + sets: Vec, + keys: HashMap, + stakes: HashMap, + total_stake: u64, +} +impl TestGlobalSession { + fn id(&self) -> [u8; 32] { + let mut sets = self.sets.clone(); + sets.sort_by_key(|a| borsh::to_vec(a).unwrap()); + Blake2s256::digest(borsh::to_vec(&sets).unwrap()).into() + } + + fn to_global(&self) -> GlobalSession { + GlobalSession { + start_block_number: self.start_block_number, + sets: self.sets.clone(), + keys: self.keys.clone(), + stakes: self.stakes.clone(), + total_stake: self.total_stake, + } + } +} + +fn session_fixture() -> TestGlobalSession { + let network = ExternalNetworkId::Bitcoin; + let set = ExternalValidatorSet { network, session: Session(0) }; + + let mut keys = HashMap::new(); + let mut stakes = HashMap::new(); + + let keypair = sr25519_fixture(); + let pubkey = Public(keypair.public.to_bytes()); + keys.insert(network, pubkey); + stakes.insert(network, 100); + + TestGlobalSession { start_block_number: 1, sets: vec![set], keys, stakes, total_stake: 100 } +} + +fn seed_minimal_state(db: &mut MemDb, session: &TestGlobalSession) { + let mut txn = db.txn(); + let id = session.id(); + + // Required by `Cosigning::intake_cosign`. + GlobalSessions::set(&mut txn, id, &session.to_global()); + + // Required by `Cosigning::cosigns_to_rebroadcast` in the non-faulted case. + CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, session.to_global())); + + // Required for `intake_cosign` to not classify a session as "future". + LatestCosignedBlockNumber::set(&mut txn, &0u64); + + txn.commit(); +} + +#[test] +fn global_session_id_generation() { + let network1 = ExternalNetworkId::Bitcoin; + let set1 = ExternalValidatorSet { network: network1, session: Session(0) }; + let set2 = ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }; + + // Create two vectors with the same sets but in different order + let cosigners1 = vec![set1, set2]; + let cosigners2 = vec![set2, set1]; + + // Both should produce the same ID (order-independent) + let id1 = GlobalSession::id(cosigners1.clone()); + let id2 = GlobalSession::id(cosigners2); + assert_eq!(id1, id2, "IDs should be the same regardless of input order"); + + // Same input should always produce the same ID (deterministic) + let id3 = GlobalSession::id(cosigners1.clone()); + assert_eq!(id1, id3, "same input should produce the same ID"); + + // Different sets should produce different IDs + let set3 = ExternalValidatorSet { network: network1, session: Session(1) }; // same network as set1, different session + assert_ne!( + GlobalSession::id(vec![set1]), + GlobalSession::id(vec![set3]), + "different validator sets should produce different IDs" + ); +} + +#[test] +fn cosigns_to_rebroadcast_empty_without_state() { + let db = MemDb::new(); + let cosigning = Cosigning::new(db); + assert!(cosigning.cosigns_to_rebroadcast().is_empty()); +} diff --git a/coordinator/cosign/src/tests/delay.rs b/coordinator/cosign/src/tests/delay.rs index feac6b701..85273daff 100644 --- a/coordinator/cosign/src/tests/delay.rs +++ b/coordinator/cosign/src/tests/delay.rs @@ -8,12 +8,16 @@ use crate::{ }; use serai_db::{Db as _, DbTxn as _, MemDb}; -use serai_task::ContinuallyRan; fn now_secs() -> u64 { now_timestamp().as_secs() } +fn past_timestamp() -> u64 { + // A timestamp old enough that time_valid is already passed + now_secs().saturating_sub(2 * ACKNOWLEDGEMENT_DELAY.as_secs()) +} + struct DelayTest { db: MemDb, } @@ -69,7 +73,7 @@ impl DelayTest { async fn delay_task_returns_false_with_no_messages() { let test = DelayTest::default(); let mut task = test.into_task(); - Test::assert_task_run_and_check_progress(&mut task, false).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, false).await; test.assert_task_iteration_db_is_clear(); } @@ -88,7 +92,7 @@ async fn delay_task_returns_false_with_genesis_block() { // let already_cosigned = LatestCosignedBlockNumber::get(&self.db).unwrap_or(0); // the already_cosigned block number always defaults to 0, so "genesis" // is always considered cosigned, made_progress returns false - Test::assert_task_run_and_check_progress(&mut task, false).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, false).await; test.assert_task_iteration_db_is_clear(); } @@ -100,17 +104,17 @@ async fn delay_task_updates_latest_cosigned_block_number() { let mut txn = test.db.txn(); // blocks with the same timestamps // nothing unusual happens, the task follow block numbers - let now = now_secs(); - CosignedBlocks::send(&mut txn, &(0u64, now)); - CosignedBlocks::send(&mut txn, &(1u64, now)); - CosignedBlocks::send(&mut txn, &(2u64, now)); + let past = past_timestamp(); + CosignedBlocks::send(&mut txn, &(0u64, past)); + CosignedBlocks::send(&mut txn, &(1u64, past)); + CosignedBlocks::send(&mut txn, &(2u64, past)); txn.commit(); } let mut task = test.into_task(); // returns made_progress as true - Test::assert_task_run_and_check_progress(&mut task, true).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; // confirmed the last block as 2 test.assert_task_iteration_completes_with(2u64); @@ -120,15 +124,15 @@ async fn delay_task_updates_latest_cosigned_block_number() { let mut txn = test.db.txn(); // timestamps out of order // nothing unusual happens, the task stil follows block numbers - let now = now_secs(); - CosignedBlocks::send(&mut txn, &(3u64, now)); - CosignedBlocks::send(&mut txn, &(4u64, now - 1)); - CosignedBlocks::send(&mut txn, &(5u64, now - 2)); + let past = past_timestamp(); + CosignedBlocks::send(&mut txn, &(3u64, past)); + CosignedBlocks::send(&mut txn, &(4u64, past - 1)); + CosignedBlocks::send(&mut txn, &(5u64, past - 2)); txn.commit(); } let mut task = test.into_task(); - Test::assert_task_run_and_check_progress(&mut task, true).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; test.assert_task_iteration_completes_with(5u64); // Test with increasing timestamps (all in the past, so they process immediately) @@ -138,15 +142,15 @@ async fn delay_task_updates_latest_cosigned_block_number() { let mut txn = test.db.txn(); // timestamps increasing in order // nothing unusual happens, the task stil follows block numbers - let now = now_secs(); - CosignedBlocks::send(&mut txn, &(6u64, now)); - CosignedBlocks::send(&mut txn, &(7u64, now + 1)); - CosignedBlocks::send(&mut txn, &(8u64, now + 2)); + let past = past_timestamp(); + CosignedBlocks::send(&mut txn, &(6u64, past)); + CosignedBlocks::send(&mut txn, &(7u64, past + 1)); + CosignedBlocks::send(&mut txn, &(8u64, past + 2)); txn.commit(); } let mut task = test.into_task(); - Test::assert_task_run_and_check_progress(&mut task, true).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; test.assert_task_iteration_completes_with(8u64); } @@ -156,19 +160,19 @@ async fn delay_task_does_not_regress_and_skips_if_not_a_later_block() { { let mut txn = test.db.txn(); - CosignedBlocks::send(&mut txn, &(1u64, now_secs())); - CosignedBlocks::send(&mut txn, &(2u64, now_secs())); + CosignedBlocks::send(&mut txn, &(1u64, past_timestamp())); + CosignedBlocks::send(&mut txn, &(2u64, past_timestamp())); // Sent out of order below - CosignedBlocks::send(&mut txn, &(4u64, now_secs())); - CosignedBlocks::send(&mut txn, &(3u64, now_secs())); + CosignedBlocks::send(&mut txn, &(4u64, past_timestamp())); + CosignedBlocks::send(&mut txn, &(3u64, past_timestamp())); txn.commit(); } let mut task = test.into_task(); // returns made_progress as true - Test::assert_task_run_and_check_progress(&mut task, true).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; // Queue order: 1, 2, 4, 3 // Block 1, 2 and 4 processed, block 3 skipped @@ -180,7 +184,7 @@ async fn delay_task_does_not_regress_and_skips_if_not_a_later_block() { { let mut txn = test.db.txn(); // Sends the same previous block number - CosignedBlocks::send(&mut txn, &(4u64, now_secs())); + CosignedBlocks::send(&mut txn, &(4u64, past_timestamp())); txn.commit(); } @@ -188,22 +192,22 @@ async fn delay_task_does_not_regress_and_skips_if_not_a_later_block() { // No progress was made since the same block number was skipped, // made_progress returns false - Test::assert_task_run_and_check_progress(&mut task, false).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, false).await; test.assert_task_iteration_completes_with(4u64); { let mut txn = test.db.txn(); // Sends the same previous block number - CosignedBlocks::send(&mut txn, &(4u64, now_secs())); + CosignedBlocks::send(&mut txn, &(4u64, past_timestamp())); // This time ensure progress is made beyond 4 - CosignedBlocks::send(&mut txn, &(5u64, now_secs())); + CosignedBlocks::send(&mut txn, &(5u64, past_timestamp())); txn.commit(); } let mut task = test.into_task(); // Had a duplicate, but made 1 block worth of progress // made_progress returns true - Test::assert_task_run_and_check_progress(&mut task, true).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; // confirmed the last block as 5 test.assert_task_iteration_completes_with(5u64); } @@ -219,22 +223,18 @@ async fn delay_task_does_not_ack_before_acknowledgement_delay() { } let mut task = test.into_task(); - let handle = tokio::spawn(async move { task.run_iteration().await.unwrap() }); - // nothing is returned - test.assert_latest_cosigned_block_number_is_expected(None); - - // Sleep for most of (but not all) the acknowledgement delay - tokio::time::sleep(ACKNOWLEDGEMENT_DELAY - Duration::from_secs(1)).await; + // First iteration returns early - not ready yet + Test::assert_task_run_iteration_and_check_progress(&mut task, false).await; - // still nothing is returned test.assert_latest_cosigned_block_number_is_expected(None); + test.assert_queue_is_not_empty(); // Message still in queue - // wait for the task to actually complete - let result = handle.await.unwrap(); - assert_eq!(result, true); + // Wait for the delay to pass + tokio::time::sleep(ACKNOWLEDGEMENT_DELAY + Duration::from_secs(1)).await; - // Now confirmed the last block as 1 + // Now iteration should succeed + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; test.assert_task_iteration_completes_with(1u64); } @@ -253,7 +253,7 @@ async fn delay_task_with_zero_timestamp_processes_immediately() { // This should complete immediately without sleeping // Since 0 as timestamp will always be an older date than the current time as timestamp // and since the ACK time is considered to be passed, there is no sleep time to do - Test::assert_task_run_and_check_progress(&mut task, true).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; test.assert_task_iteration_completes_with(1u64); } @@ -264,10 +264,10 @@ async fn delay_task_with_max_timestamp_returns_error() { { let mut txn = test.db.txn(); - CosignedBlocks::send(&mut txn, &(0u64, now_secs())); - CosignedBlocks::send(&mut txn, &(1u64, now_secs())); + CosignedBlocks::send(&mut txn, &(0u64, past_timestamp())); + CosignedBlocks::send(&mut txn, &(1u64, past_timestamp())); CosignedBlocks::send(&mut txn, &(2u64, u64::MAX)); - CosignedBlocks::send(&mut txn, &(3u64, now_secs())); + CosignedBlocks::send(&mut txn, &(3u64, past_timestamp())); txn.commit(); } @@ -282,33 +282,27 @@ async fn delay_task_with_max_timestamp_returns_error() { } #[tokio::test] -async fn delay_task_with_far_future_timestamp_hangs() { - // A timestamp far in the future (but not MAX to avoid overflow) - // will cause the task to sleep for an extremely long time +async fn delay_task_with_far_future_timestamp_returns_early() { let mut test = DelayTest::default(); { let mut txn = test.db.txn(); - // Use timestamp 0 for blocks 0 and 1 so they process immediately - // (time_valid = 0 + ACKNOWLEDGEMENT_DELAY is already in the past) - CosignedBlocks::send(&mut txn, &(0u64, 0u64)); - CosignedBlocks::send(&mut txn, &(1u64, 0u64)); + CosignedBlocks::send(&mut txn, &(0u64, past_timestamp())); + CosignedBlocks::send(&mut txn, &(1u64, past_timestamp())); let far_future = now_secs() + 1_000_000; CosignedBlocks::send(&mut txn, &(2u64, far_future)); - CosignedBlocks::send(&mut txn, &(3u64, 0u64)); - + CosignedBlocks::send(&mut txn, &(3u64, past_timestamp())); txn.commit(); } let mut task = test.into_task(); - // Use a timeout to prevent the test from hanging forever - let result = tokio::time::timeout(Duration::from_millis(100), task.run_iteration()).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - assert!(result.is_err(), "Expected timeout, but task completed"); - - // since had a forced timeout 3u64 should still be in queue - test.assert_task_iteration_fails(Some(1u64)); + // It processed blocks 0 and 1, then returned early on block 2 + test.assert_latest_cosigned_block_number_is_expected(Some(1)); + // Block 2,3 stay in queue until the time is valid + test.assert_queue_is_not_empty(); } diff --git a/coordinator/cosign/src/tests/evaluator.rs b/coordinator/cosign/src/tests/evaluator.rs index bd0ea8ba0..8b551ad83 100644 --- a/coordinator/cosign/src/tests/evaluator.rs +++ b/coordinator/cosign/src/tests/evaluator.rs @@ -20,18 +20,10 @@ use crate::{ CosignEvaluatorTask, CosignedBlocks, CurrentlyEvaluatedGlobalSession, REQUEST_COSIGNS_SPACING, }, intend::{BlockEventData, BlockEvents, GlobalSessionsChannel}, - tests::{IntoTask, Test}, + tests::{IntoTask, Test, TestRequest}, }; use crate::RequestNotableCosigns; -use serai_task::{ContinuallyRan}; - -#[derive(Clone)] -pub(crate) struct TestRequest { - pub(crate) calls: Arc, - pub(crate) should_error: bool, -} - pub(crate) struct EvaluatorTest { pub(crate) db: MemDb, } @@ -51,36 +43,6 @@ impl IntoTask for EvaluatorTest { } } -#[derive(Debug)] -pub(crate) struct RequestError; - -impl TestRequest { - pub(crate) fn new(should_error: bool) -> (Self, Arc) { - let calls = Arc::new(AtomicUsize::new(0)); - (Self { calls: calls.clone(), should_error }, calls) - } -} - -impl RequestNotableCosigns for TestRequest { - type Error = RequestError; - - fn request_notable_cosigns( - &self, - _global_session: [u8; 32], - ) -> impl Send + core::future::Future> { - let calls = self.calls.clone(); - let should_error = self.should_error; - async move { - calls.fetch_add(1, Ordering::SeqCst); - if should_error { - Err(RequestError) - } else { - Ok(()) - } - } - } -} - impl EvaluatorTest { fn assert_no_currently_evaluated_global_session(&self) { assert_eq!(CurrentlyEvaluatedGlobalSession::get(&self.db).is_none(), true); @@ -167,7 +129,7 @@ impl EvaluatorTest { async fn evaluator_task_returns_false_with_no_block_events() { let test = EvaluatorTest::default(); let mut task = test.into_task(); - Test::assert_task_run_and_check_progress(&mut task, false).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, false).await; test.assert_evaluator_db_is_clear(); } @@ -182,7 +144,7 @@ async fn evaluator_task_returns_false_with_genesis_block() { } let mut task = test.into_task(); - Test::assert_task_run_and_check_progress(&mut task, false).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, false).await; } #[tokio::test] @@ -199,7 +161,7 @@ async fn evaluator_task_processes_blocks_with_no_events() { } let mut task = test.into_task(); - Test::assert_task_run_and_check_progress(&mut task, true).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; test.assert_task_iteration_completed(1, 2); } diff --git a/coordinator/cosign/src/tests/intend.rs b/coordinator/cosign/src/tests/intend.rs index 627c4a120..2020d0dcf 100644 --- a/coordinator/cosign/src/tests/intend.rs +++ b/coordinator/cosign/src/tests/intend.rs @@ -22,7 +22,7 @@ use serai_client_serai::{ network_id::{ExternalNetworkId, NetworkId}, validator_sets::{ExternalValidatorSet, KeyShares, Session, ValidatorSet}, }, - system, validator_sets, + signals, validator_sets, }, }; @@ -39,15 +39,12 @@ use crate::{ fn set_keys_event(set: ExternalValidatorSet) -> Event { Event::ValidatorSets(validator_sets::Event::SetKeys { set, - key_pair: KeyPair(Public([0x01; 32]), ExternalKey(vec![0x02; 32].try_into().unwrap())), + key_pair: KeyPair(Public([0xff; 32]), ExternalKey(vec![0xff; 32].try_into().unwrap())), }) } -fn set_decided_event(set: ValidatorSet, validator: SeraiAddress) -> Event { - Event::ValidatorSets(validator_sets::Event::SetDecided { - set, - validators: vec![(validator, KeyShares::ONE)], - }) +fn set_decided_event(set: ValidatorSet, validators: Vec<(SeraiAddress, KeyShares)>) -> Event { + Event::ValidatorSets(validator_sets::Event::SetDecided { set, validators }) } fn allocation_event(validator: SeraiAddress, network: NetworkId, amount: u64) -> Event { @@ -114,10 +111,6 @@ impl Default for Serai { } impl Serai { - pub(crate) fn new() -> Self { - Self::default() - } - pub(crate) fn set_latest_finalized_error(&mut self, error: &str) { self.latest_finalized_error = Some(error.to_string()); } @@ -218,7 +211,7 @@ pub(crate) struct IntendTest { impl Default for IntendTest { fn default() -> Self { - Self { serai: Serai::new(), db: MemDb::new() } + Self { serai: Serai::default(), db: MemDb::default() } } } @@ -587,15 +580,15 @@ impl IntendTest { } #[tokio::test] -async fn intend_returns_false_with_no_blocks() { +async fn intend_task_returns_false_with_no_blocks() { let test = IntendTest::default(); let mut task = test.into_task(); - Test::assert_task_run_and_check_progress(&mut task, false).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, false).await; test.assert_global_db_is_clear(); } #[tokio::test] -async fn intend_returns_false_with_genesis_block() { +async fn intend_task_returns_false_with_genesis_block() { let mut test = IntendTest::default(); test.serai.make_block(0); @@ -605,12 +598,12 @@ async fn intend_returns_false_with_genesis_block() { // In intend.rs let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1); // will always default to the 1st block, and without a greater serai.latest_finalized_block_number() // there will nothing to iterate, returning false as in "did not progress" - Test::assert_task_run_and_check_progress(&mut task, false).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, false).await; test.assert_global_db_is_clear_after_block(0u64); } #[tokio::test] -async fn intend_returns_true_with_linear_blocks() { +async fn intend_task_returns_true_with_linear_blocks() { let mut test = IntendTest::default(); test.serai.make_block(1); @@ -619,12 +612,12 @@ async fn intend_returns_true_with_linear_blocks() { let mut task = test.into_task(); - Test::assert_task_run_and_check_progress(&mut task, true).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; test.assert_task_iterations_with_no_events_ran(1, 3); } #[tokio::test] -async fn intend_errors_if_chain_is_not_linear() { +async fn intend_task_errors_if_chain_is_not_linear() { let mut test = IntendTest::default(); test.serai.make_block(1); @@ -652,13 +645,13 @@ async fn intend_errors_if_chain_is_not_linear() { let mut task = test.into_task(); // Re-run the task, block 2 properly builds upon block 1 - Test::assert_task_run_and_check_progress(&mut task, true).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; // block 1 was already asserted and cleared from queue, assert only block 2 now test.assert_task_iteration_per_block_with_no_events_ran(2); } #[tokio::test] -async fn intend_errors_if_block_not_found() { +async fn intend_task_errors_if_block_not_found() { let mut test = IntendTest::default(); test.serai.make_block(1); @@ -684,12 +677,12 @@ async fn intend_errors_if_block_not_found() { let mut task = test.into_task(); // Re-run the task, block 2 now fetched and processed - Test::assert_task_run_and_check_progress(&mut task, true).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; test.assert_task_iteration_per_block_with_no_events_ran(2); } #[tokio::test] -async fn intend_handles_rpc_error_on_block_fetch() { +async fn intend_task_handles_rpc_error_on_block_fetch() { let mut test = IntendTest::default(); test.serai.make_block(1); @@ -711,12 +704,12 @@ async fn intend_handles_rpc_error_on_block_fetch() { let mut task = test.into_task(); // Re-run the task, block 2 now fetched and processed - Test::assert_task_run_and_check_progress(&mut task, true).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; test.assert_task_iteration_per_block_with_no_events_ran(2); } #[tokio::test] -async fn intend_handles_rpc_error_on_events_fetch() { +async fn intend_task_handles_rpc_error_on_events_fetch() { let mut test = IntendTest::default(); test.serai.make_block(1); @@ -738,12 +731,12 @@ async fn intend_handles_rpc_error_on_events_fetch() { let mut task = test.into_task(); // Re-run the task, block 2 now fetched and processed - Test::assert_task_run_and_check_progress(&mut task, true).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; test.assert_task_iteration_per_block_with_no_events_ran(2); } #[tokio::test] -async fn intend_handles_rpc_error_on_latest_finalized() { +async fn intend_task_handles_rpc_error_on_latest_finalized() { let mut test = IntendTest::default(); test.serai.make_block(1); @@ -756,12 +749,12 @@ async fn intend_handles_rpc_error_on_latest_finalized() { let mut task = test.into_task(); - Test::assert_task_run_and_check_progress(&mut task, true).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; test.assert_task_iteration_per_block_with_no_events_ran(1); } #[tokio::test] -async fn intend_handles_allocation_events() { +async fn intend_task_handles_allocation_events() { let mut test = IntendTest::default(); let validator1 = SeraiAddress([0x01; 32]); @@ -783,7 +776,7 @@ async fn intend_handles_allocation_events() { test.serai.set_events(block2_hash, events_from_allocations(&allocations_block2)); let mut task = test.into_task(); - Test::assert_task_run_and_check_progress(&mut task, true).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; let all_allocations: Vec<_> = allocations_block1.iter().chain(allocations_block2.iter()).copied().collect(); @@ -796,7 +789,7 @@ async fn intend_handles_allocation_events() { } #[tokio::test] -async fn intend_handles_allocation_events_overflow() { +async fn intend_task_handles_allocation_events_overflow() { let mut test = IntendTest::default(); let validator = SeraiAddress([0x01; 32]); @@ -828,112 +821,577 @@ async fn intend_handles_allocation_events_overflow() { } #[tokio::test] -async fn intend_handles_allocation_events_ignore_serai_network() { +async fn intend_task_handles_deallocation_without_prior_allocation() { + let mut test = IntendTest::default(); + + let validator = SeraiAddress([0x01; 32]); + + let block1_hash = test.serai.make_block(1); + test.serai.set_events( + block1_hash, + // Deallocate without any prior allocation should error + vec![deallocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 100)], + ); + + let mut task = test.into_task(); + Test::assert_task_run_and_failed_with(&mut task, "no prior existing stake").await; + + // No stakes should be recorded since the operation failed + test.assert_global_db_is_clear_after_block(1); +} + +#[tokio::test] +async fn intend_task_handles_deallocation_event() { let mut test = IntendTest::default(); let validator = SeraiAddress([0x01; 32]); let block1_hash = test.serai.make_block(1); - // Block 1: Allocation with NetworkId::Serai - test.serai.set_events(block1_hash, vec![allocation_event(validator, NetworkId::Serai, 100)]); + test.serai.set_events( + block1_hash, + vec![ + allocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 100), + deallocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 30), + ], + ); + // Create task after all blocks are set up let mut task = test.into_task(); - Test::assert_task_run_and_check_progress(&mut task, true).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - // Verify no stakes were recorded for Serai network (allocations were ignored) - // Stakes::get only works with ExternalNetworkId, so we can't directly check Serai - // But we can verify the blocks were processed with no notable events + test.assert_stakes_is_expected(ExternalNetworkId::Bitcoin, validator, Some(Amount(70))); test.assert_task_iteration_per_block_with_no_events_ran(1); } #[tokio::test] -async fn intend_handles_deallocation_without_prior_allocation() { +async fn intend_task_handles_deallocation_underflow_error() { let mut test = IntendTest::default(); let validator = SeraiAddress([0x01; 32]); - // Block 1: Deallocate without any prior allocation should error let block1_hash = test.serai.make_block(1); test.serai.set_events( block1_hash, - vec![deallocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 100)], + vec![ + allocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 50), + deallocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 200), + ], ); let mut task = test.into_task(); - Test::assert_task_run_and_failed_with(&mut task, "no prior existing stake").await; + Test::assert_task_run_and_failed_with(&mut task, "stake underflow").await; - // No stakes should be recorded since the operation failed test.assert_global_db_is_clear_after_block(1); } #[tokio::test] -async fn intend_handles_deallocation_event() { +async fn intend_task_handles_set_decided_event_with_empty_validators() { let mut test = IntendTest::default(); - let validator = SeraiAddress([0x01; 32]); + let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + let vset0 = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; + + let block1_hash = test.serai.make_block(1); + test.serai.set_events(block1_hash, vec![set_decided_event(vset0, vec![])]); + + let mut task = test.into_task(); + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; + + // Verify that an empty validators vec results in no validators being stored + test.assert_validators_is_expected(set0, None); + + // SetDecided is a HasEvents::No type + test.assert_task_iteration_per_block_with_no_events_ran(1); +} + +#[tokio::test] +async fn intend_task_handles_set_decided_event() { + let mut test = IntendTest::default(); + let validator1 = SeraiAddress([0x01; 32]); + let validator2 = SeraiAddress([0x02; 32]); + let validator3 = SeraiAddress([0x03; 32]); + + let set0_btc = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + let vset0_btc = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; + let set0_eth = ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }; + let vset0_eth = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Ethereum), session: Session(0) }; + + // Block 1: SetDecided for Bitcoin + let block1_hash = test.serai.make_block(1); + test.serai.set_events( + block1_hash, + vec![set_decided_event( + vset0_btc, + vec![ + (validator1, KeyShares::ONE), + (validator2, KeyShares::try_from(2).unwrap()), + (validator3, KeyShares::try_from(3).unwrap()), + ], + )], + ); + + // Block 2: SetDecided for Ethereum with different validators + let block2_hash = test.serai.make_block(2); + test.serai.set_events( + block2_hash, + vec![set_decided_event( + vset0_eth, + vec![ + (validator1, KeyShares::try_from(2).unwrap()), + (validator2, KeyShares::try_from(3).unwrap()), + ], + )], + ); + + let mut task = test.into_task(); + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; + + // Verify validators are stored for each set + test.assert_validators_is_expected(set0_btc, Some(vec![validator1, validator2, validator3])); + test.assert_validators_is_expected(set0_eth, Some(vec![validator1, validator2])); + + // SetDecided is a HasEvents::No type, not HasEvents::Notable neither HasEvents::NonNotable + test.assert_task_iterations_with_no_events_ran(1, 2); +} + +#[tokio::test] +async fn intend_task_handles_set_keys_without_set_decided() { + let mut test = IntendTest::default(); + + let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + + // Block 1: SetKeys without prior SetDecided should error + let block1_hash = test.serai.make_block(1); + test.serai.set_events(block1_hash, vec![set_keys_event(set0)]); + + let mut task = test.into_task(); + Test::assert_task_run_and_failed_with(&mut task, "set which wasn't decided set keys").await; + + // No state should be recorded since the operation failed + test.assert_global_db_is_clear_after_block(1); +} + +#[tokio::test] +async fn intend_task_handles_set_keys_event() { + let mut test = IntendTest::default(); + + let validator1 = SeraiAddress([0x01; 32]); + let validator2 = SeraiAddress([0x02; 32]); + + let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + let vset0 = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; + let set1 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(1) }; + let vset1 = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(1) }; + + // Block 1: First SetKeys (creates session 0) let block1_hash = test.serai.make_block(1); test.serai.set_events( block1_hash, vec![ - allocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 100), - deallocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 30), + allocation_event(validator1, NetworkId::External(ExternalNetworkId::Bitcoin), 100), + allocation_event(validator2, NetworkId::External(ExternalNetworkId::Bitcoin), 200), + set_decided_event( + vset0, + vec![(validator1, KeyShares::ONE), (validator2, KeyShares::try_from(2).unwrap())], + ), + set_keys_event(set0), + ], + ); + + // Block 2: Second SetKeys (creates session 1) + let block2_hash = test.serai.make_block(2); + test.serai.set_events( + block2_hash, + vec![ + set_decided_event( + vset1, + vec![(validator1, KeyShares::try_from(2).unwrap()), (validator2, KeyShares::ONE)], + ), + set_keys_event(set1), + ], + ); + + let mut task = test.into_task(); + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; + + let expected_set = Set { session: Session(1), key: Public([0xff; 32]), stake: Amount(300) }; + test.assert_latest_set_is_expected(ExternalNetworkId::Bitcoin, Some(&expected_set)); + + test.assert_validators_is_expected(set0, None); + test.assert_validators_is_expected(set1, None); + + // Block 1: First notable block (no prior session) -> HasEvents::No + let (session0_id, _) = test.assert_task_iteration_per_block_with_notable_events_ran(1, None); + + // Block 2: Second notable block (prior session exists) -> HasEvents::Notable + test.assert_task_iteration_per_block_with_notable_events_ran(2, Some(session0_id)); + + test.assert_scan_cosign_from_is_expected(3); +} + +#[tokio::test] +async fn intend_task_handles_set_keys_event_error_if_notable_block_has_no_stake() { + let mut test = IntendTest::default(); + + let validator1 = SeraiAddress([0x01; 32]); + let validator2 = SeraiAddress([0x02; 32]); + + let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + let vset0 = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; + let set1 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(1) }; + let vset1 = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(1) }; + + // Block 1: Normal notable block with allocations + let block1_hash = test.serai.make_block(1); + test.serai.set_events( + block1_hash, + vec![ + allocation_event(validator1, NetworkId::External(ExternalNetworkId::Bitcoin), 100), + set_decided_event(vset0, vec![(validator1, KeyShares::ONE)]), + set_keys_event(set0), + ], + ); + + // Block 2: SetDecided and SetKeys for new session with validator2 who has no allocations -> 0 stake + let block2_hash = test.serai.make_block(2); + test.serai.set_events( + block2_hash, + vec![set_decided_event(vset1, vec![(validator2, KeyShares::ONE)]), set_keys_event(set1)], + ); + + let mut task = test.into_task(); + Test::assert_task_run_and_failed_with(&mut task, "had 0 stake").await; +} + +#[tokio::test] +async fn intend_task_handles_notable_event_errors_with_total_stake_overflow() { + let mut test = IntendTest::default(); + + let validator1 = SeraiAddress([0x01; 32]); + let validator2 = SeraiAddress([0x02; 32]); + + let set0_btc = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + let vset0_btc = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; + let set0_eth = ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }; + let vset0_eth = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Ethereum), session: Session(0) }; + + // Block 1: Allocate near-max stake to validator1 on Bitcoin + let block1_hash = test.serai.make_block(1); + test.serai.set_events( + block1_hash, + vec![ + allocation_event( + validator1, + NetworkId::External(ExternalNetworkId::Bitcoin), + u64::MAX - 1000, + ), + set_decided_event(vset0_btc, vec![(validator1, KeyShares::ONE)]), + set_keys_event(set0_btc), ], ); // Capture builds_upon after block 1 let builds_upon_after_block_1 = test.serai.builds_upon.clone(); + // Block 2: Allocate more stake on Ethereum - this should cause total_stake overflow let block2_hash = test.serai.make_block(2); test.serai.set_events( block2_hash, vec![ - // Deallocate more than remaining (70 left, deallocating 100) should cause underflow error - deallocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 100), + allocation_event(validator2, NetworkId::External(ExternalNetworkId::Ethereum), 2000), + set_decided_event(vset0_eth, vec![(validator2, KeyShares::ONE)]), + set_keys_event(set0_eth), ], ); - // Create task after all blocks are set up let mut task = test.into_task(); - Test::assert_task_run_and_failed_with(&mut task, "stake underflow").await; + + // Run should fail on block 2 due to total_stake overflow (after successfully processing block 1) + Test::assert_task_run_and_failed_with(&mut task, "total stake overflow").await; // Verify block 1 was processed successfully before the error on block 2 - test.assert_stakes_is_expected(ExternalNetworkId::Bitcoin, validator, Some(Amount(70))); + test.assert_stakes_is_expected( + ExternalNetworkId::Bitcoin, + validator1, + Some(Amount(u64::MAX - 1000)), + ); + test.assert_latest_set_is_expected( + ExternalNetworkId::Bitcoin, + Some(&Set { session: Session(0), key: Public([0xff; 32]), stake: Amount(u64::MAX - 1000) }), + ); test.assert_task_iterations_with_no_events_failed_at(2, &builds_upon_after_block_1); } #[tokio::test] -async fn intend_handles_deallocation_underflow_error() { +async fn intend_task_handles_burn_with_instruction_events() { + let mut test = IntendTest::default(); + + let validator1 = SeraiAddress([0x01; 32]); + let validator2 = SeraiAddress([0x02; 32]); + + let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + let vset0 = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; + + // Block 1: Create a session (first notable block, treated as No because no prior session) + let allocations_block1 = + [(validator1, ExternalNetworkId::Bitcoin, 100), (validator2, ExternalNetworkId::Bitcoin, 200)]; + let block1_hash = test.serai.make_block(1); + let mut events = events_from_allocations(&allocations_block1); + events.push(set_decided_event( + vset0, + vec![(validator1, KeyShares::ONE), (validator2, KeyShares::try_from(2).unwrap())], + )); + events.push(set_keys_event(set0)); + test.serai.set_events(block1_hash, events); + + // Block 2: Burn event makes block NonNotable (with additional allocations) + let allocations_block2 = [(validator1, ExternalNetworkId::Bitcoin, 50)]; + let block2_hash = test.serai.make_block(2); + let mut events2 = events_from_allocations(&allocations_block2); + events2.push(burn_with_instruction_event(validator1)); + test.serai.set_events(block2_hash, events2); + + let mut task = test.into_task(); + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; + + test.assert_stakes_is_expected(ExternalNetworkId::Bitcoin, validator1, Some(Amount(150))); + test.assert_stakes_is_expected(ExternalNetworkId::Bitcoin, validator2, Some(Amount(200))); + + // Block 1: First notable block (no prior session, treated as No) + test.assert_task_iteration_per_block(1); + test + .assert_block_events_is_expected(BlockEventData { block_number: 1, has_events: HasEvents::No }); + + // Block 2: NonNotable (has burn event, session exists from block 1) + test.assert_task_iteration_per_block_with_non_notable_events_ran(2); +} + +#[tokio::test] +async fn intend_task_handles_ignore_non_validator_sets_events() { + let mut test = IntendTest::default(); + + let vset0 = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; + let vset1 = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Ethereum), session: Session(0) }; + + // Block 1: Signals event (outer _ => continue) and AcceptedHandover (inner _ => continue) + let block1_hash = test.serai.make_block(1); + test.serai.set_events( + block1_hash, + vec![ + Event::Signals(signals::Event::NetworkHalted { network: ExternalNetworkId::Bitcoin }), + Event::ValidatorSets(validator_sets::Event::AcceptedHandover { set: vset0 }), + ], + ); + + // Block 2: More ignored events on different network + let block2_hash = test.serai.make_block(2); + test.serai.set_events( + block2_hash, + vec![ + Event::Signals(signals::Event::NetworkHalted { network: ExternalNetworkId::Ethereum }), + Event::ValidatorSets(validator_sets::Event::AcceptedHandover { set: vset1 }), + ], + ); + + let mut task = test.into_task(); + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; + + // Both blocks have only ignored events -> HasEvents::No + test.assert_task_iterations_with_no_events_ran(1, 2); +} + +#[tokio::test] +async fn intend_task_handles_ignore_non_burn_with_instruction_coins_events() { let mut test = IntendTest::default(); let validator = SeraiAddress([0x01; 32]); + // Block 1: Mint and Transfer events (should be ignored) let block1_hash = test.serai.make_block(1); test.serai.set_events( block1_hash, vec![ - allocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 50), - deallocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 200), + Event::Coins(coins::Event::Mint { + to: validator, + coins: Balance { coin: Coin::External(ExternalCoin::Bitcoin), amount: Amount(100) }, + }), + Event::Coins(coins::Event::Transfer { + from: validator, + to: SeraiAddress([0x02; 32]), + coins: Balance { coin: Coin::External(ExternalCoin::Bitcoin), amount: Amount(50) }, + }), ], ); + // Block 2: Burn event (not BurnWithInstruction, should be ignored) + let block2_hash = test.serai.make_block(2); + test.serai.set_events( + block2_hash, + vec![Event::Coins(coins::Event::Burn { + from: validator, + coins: Balance { coin: Coin::External(ExternalCoin::Bitcoin), amount: Amount(50) }, + })], + ); + let mut task = test.into_task(); - Test::assert_task_run_and_failed_with(&mut task, "stake underflow").await; + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - test.assert_global_db_is_clear_after_block(1); + // All Coins events except BurnWithInstruction are ignored -> HasEvents::No + test.assert_task_iterations_with_no_events_ran(1, 2); } #[tokio::test] -async fn intend_handles_deallocation_events_ignore_serai_network() { +async fn intend_task_handles_ignores_serai_network_events() { let mut test = IntendTest::default(); let validator = SeraiAddress([0x01; 32]); + let vset_serai = ValidatorSet { network: NetworkId::Serai, session: Session(0) }; + let block1_hash = test.serai.make_block(1); - test.serai.set_events(block1_hash, vec![deallocation_event(validator, NetworkId::Serai, 100)]); + test.serai.set_events( + block1_hash, + vec![ + allocation_event(validator, NetworkId::Serai, 100), + // Can even try a greater deallocation amount, both will be ignored anyway + deallocation_event(validator, NetworkId::Serai, 150), + ], + ); + + let block2_hash = test.serai.make_block(2); + test.serai.set_events( + block2_hash, + vec![set_decided_event(vset_serai, vec![(validator, KeyShares::ONE)])], + ); let mut task = test.into_task(); - Test::assert_task_run_and_check_progress(&mut task, true).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - test.assert_task_iteration_per_block_with_no_events_ran(1); + test.assert_task_iterations_with_no_events_ran(1, 2); +} + +#[tokio::test] +async fn intend_task_handles_downgrades_events_when_no_session_available() { + let mut test = IntendTest::default(); + + let validator1 = SeraiAddress([0x01; 32]); + let validator2 = SeraiAddress([0x02; 32]); + + let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + let vset0 = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; + + let block1_hash = test.serai.make_block(1); + test.serai.set_events(block1_hash, vec![burn_with_instruction_event(validator1)]); + + let allocations_block2 = + [(validator1, ExternalNetworkId::Bitcoin, 100), (validator2, ExternalNetworkId::Bitcoin, 200)]; + let block2_hash = test.serai.make_block(2); + let mut events = events_from_allocations(&allocations_block2); + events.push(set_decided_event( + vset0, + vec![(validator1, KeyShares::ONE), (validator2, KeyShares::try_from(2).unwrap())], + )); + events.push(set_keys_event(set0)); + test.serai.set_events(block2_hash, events); + + let block3_hash = test.serai.make_block(3); + test.serai.set_events(block3_hash, vec![burn_with_instruction_event(validator2)]); + + let mut task = test.into_task(); + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; + + test.assert_task_iteration_per_block(1); + test + .assert_block_events_is_expected(BlockEventData { block_number: 1, has_events: HasEvents::No }); + + test.assert_task_iteration_per_block(2); + test + .assert_block_events_is_expected(BlockEventData { block_number: 2, has_events: HasEvents::No }); + + test.assert_task_iteration_per_block_with_non_notable_events_ran(3); + + test.assert_scan_cosign_from_is_expected(4); +} + +#[tokio::test] +async fn intend_task_handles_errors_when_global_session_not_in_database() { + use serai_db::Db as _; + + let mut test = IntendTest::default(); + + let validator = SeraiAddress([0x01; 32]); + + let fake_session_id = [0xAB; 32]; + { + let mut txn = test.db.txn(); + LatestGlobalSessionIntended::set(&mut txn, &fake_session_id); + txn.commit(); + } + + let block1_hash = test.serai.make_block(1); + test.serai.set_events(block1_hash, vec![burn_with_instruction_event(validator)]); + + let mut task = test.into_task(); + Test::assert_task_run_and_failed_with(&mut task, "wasn't saved to the database").await; + + test.assert_no_substrate_block_hash(1); + test.assert_no_scan_cosign_from(); + test.assert_no_block_events(); + + test.assert_latest_global_session_intended(Some(fake_session_id)); +} + +#[tokio::test] +async fn intend_task_handles_safeguard_prevents_cosigning_with_no_session() { + let mut test = IntendTest::default(); + + let validator1 = SeraiAddress([0x01; 32]); + let validator2 = SeraiAddress([0x02; 32]); + + let block1_hash = test.serai.make_block(1); + test.serai.set_events(block1_hash, vec![burn_with_instruction_event(validator1)]); + + let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + let vset0 = + ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; + + let allocations = + [(validator1, ExternalNetworkId::Bitcoin, 100), (validator2, ExternalNetworkId::Bitcoin, 200)]; + let block2_hash = test.serai.make_block(2); + let mut events = events_from_allocations(&allocations); + events.push(set_decided_event(vset0, vec![(validator1, KeyShares::ONE)])); + events.push(set_keys_event(set0)); + test.serai.set_events(block2_hash, events); + + let mut task = test.into_task(); + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; + + test.assert_task_iteration_per_block(1); + test + .assert_block_events_is_expected(BlockEventData { block_number: 1, has_events: HasEvents::No }); + + test.assert_task_iteration_per_block(2); + test + .assert_block_events_is_expected(BlockEventData { block_number: 2, has_events: HasEvents::No }); + + assert!( + LatestGlobalSessionIntended::get(&test.db).is_some(), + "session should have been created by block 2" + ); + + test.assert_scan_cosign_from_is_expected(3); } diff --git a/coordinator/cosign/src/tests/mod.rs b/coordinator/cosign/src/tests/mod.rs index 75f0b67e2..b198fd635 100644 --- a/coordinator/cosign/src/tests/mod.rs +++ b/coordinator/cosign/src/tests/mod.rs @@ -7,11 +7,27 @@ mod evaluator; #[cfg(test)] mod delay; +#[cfg(test)] +mod cosigning; + +#[cfg(test)] +mod types; + +use std::{ + sync::{ + Arc, + atomic::{AtomicUsize, Ordering}, + }, +}; + +use serai_cosign_types::{COSIGN_CONTEXT, Cosign, SignedCosign}; use serai_task::ContinuallyRan; +use crate::RequestNotableCosigns; + pub(crate) struct Test; impl Test { - pub(crate) async fn assert_task_run_and_check_progress( + pub(crate) async fn assert_task_run_iteration_and_check_progress( task: &mut impl ContinuallyRan, made_progress: bool, ) { @@ -29,3 +45,52 @@ pub(crate) trait IntoTask { type Task: ContinuallyRan + 'static; fn into_task(&self) -> Self::Task; } + +#[derive(Clone)] +pub(crate) struct TestRequest { + pub(crate) calls: Arc, + pub(crate) should_error: bool, +} + +#[derive(Debug)] +pub(crate) struct RequestError; + +impl TestRequest { + pub(crate) fn new(should_error: bool) -> (Self, Arc) { + let calls = Arc::new(AtomicUsize::new(0)); + (Self { calls: calls.clone(), should_error }, calls) + } +} + +impl RequestNotableCosigns for TestRequest { + type Error = RequestError; + + fn request_notable_cosigns( + &self, + _global_session: [u8; 32], + ) -> impl Send + core::future::Future> { + let calls = self.calls.clone(); + let should_error = self.should_error; + async move { + calls.fetch_add(1, Ordering::SeqCst); + if should_error { + Err(RequestError) + } else { + Ok(()) + } + } + } +} + +pub(crate) fn sr25519_fixture() -> schnorrkel::Keypair { + schnorrkel::MiniSecretKey::from_bytes(&[0xff; 32]) + .expect("fixed seed should be valid") + .expand_to_keypair(schnorrkel::ExpansionMode::Ed25519) +} + +pub(crate) fn sign_cosign(cosign: Cosign, keypair: &schnorrkel::Keypair) -> SignedCosign { + SignedCosign { + cosign: cosign.clone(), + signature: keypair.sign_simple(COSIGN_CONTEXT, &cosign.signature_message()).to_bytes(), + } +} diff --git a/coordinator/cosign/src/tests/types.rs b/coordinator/cosign/src/tests/types.rs new file mode 100644 index 000000000..dba674c4c --- /dev/null +++ b/coordinator/cosign/src/tests/types.rs @@ -0,0 +1,115 @@ +use crate::{BlockHash, Cosign, CosignIntent, ExternalNetworkId, Public, SignedCosign}; +use crate::tests::{sign_cosign, sr25519_fixture}; + +#[test] +fn cosign_intent_to_cosign() { + let intent = CosignIntent { + global_session: [1u8; 32], + block_number: 5, + block_hash: BlockHash([5u8; 32]), + notable: true, + }; + + let cosign = intent.into_cosign(ExternalNetworkId::Bitcoin); + + assert_eq!(cosign.global_session, [1u8; 32]); + assert_eq!(cosign.block_number, 5); + assert_eq!(cosign.block_hash, BlockHash([5u8; 32])); + assert_eq!(cosign.cosigner, ExternalNetworkId::Bitcoin); +} + +#[test] +fn cosign_signature_message() { + let cosign = Cosign { + global_session: [1u8; 32], + block_number: 5, + block_hash: BlockHash([5u8; 32]), + cosigner: ExternalNetworkId::Bitcoin, + }; + + let msg1 = cosign.signature_message(); + let msg2 = cosign.signature_message(); + + assert_eq!(msg1, msg2, "signature_message should be deterministic"); +} + +#[test] +fn signed_cosign_verify_signature_valid() { + let keypair = sr25519_fixture(); + let cosign = Cosign { + global_session: [1u8; 32], + block_number: 5, + block_hash: BlockHash([5u8; 32]), + cosigner: ExternalNetworkId::Bitcoin, + }; + + let signed = sign_cosign(cosign, &keypair); + let pubkey = Public(keypair.public.to_bytes()); + + assert!(signed.verify_signature(pubkey), "valid signature should verify"); +} + +#[test] +fn signed_cosign_verify_signature_invalid() { + let keypair1 = sr25519_fixture(); + let keypair2 = schnorrkel::MiniSecretKey::from_bytes(&[0x01; 32]) + .unwrap() + .expand_to_keypair(schnorrkel::ExpansionMode::Ed25519); + + let cosign = Cosign { + global_session: [1u8; 32], + block_number: 5, + block_hash: BlockHash([5u8; 32]), + cosigner: ExternalNetworkId::Bitcoin, + }; + + let signed = sign_cosign(cosign, &keypair1); + let wrong_pubkey = Public(keypair2.public.to_bytes()); + + assert!(!signed.verify_signature(wrong_pubkey), "invalid signature should not verify"); +} + +#[test] +fn signed_cosign_verify_signature_invalid_public_key_bytes() { + let keypair = sr25519_fixture(); + let cosign = Cosign { + global_session: [1u8; 32], + block_number: 5, + block_hash: BlockHash([5u8; 32]), + cosigner: ExternalNetworkId::Bitcoin, + }; + + let signed = sign_cosign(cosign, &keypair); + + let invalid_bytes = [255u8; 32]; + assert!( + schnorrkel::PublicKey::from_bytes(&invalid_bytes).is_err(), + "test precondition: bytes should be invalid for schnorrkel" + ); + + let invalid_pubkey = Public(invalid_bytes); + assert!(!signed.verify_signature(invalid_pubkey), "invalid public key bytes should return false"); +} + +#[test] +fn signed_cosign_verify_signature_invalid_signature_bytes() { + let cosign = Cosign { + global_session: [1u8; 32], + block_number: 5, + block_hash: BlockHash([5u8; 32]), + cosigner: ExternalNetworkId::Bitcoin, + }; + + let invalid_sig_bytes = [255u8; 64]; + assert!( + schnorrkel::Signature::from_bytes(&invalid_sig_bytes).is_err(), + "test precondition: signature bytes should be invalid for schnorrkel" + ); + + let signed = SignedCosign { cosign, signature: invalid_sig_bytes }; + + let keypair = sr25519_fixture(); + let valid_pubkey = Public(keypair.public.to_bytes()); + + assert!(!signed.verify_signature(valid_pubkey), "invalid signature bytes should return false"); +} From 722dfd73c69aac99e56ae1e1a605738c37869a15 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Tue, 6 Jan 2026 10:52:10 -0300 Subject: [PATCH 12/71] refactor(cosign): simpliy & cleanup delay tests --- coordinator/cosign/src/delay.rs | 42 ++--- coordinator/cosign/src/intend.rs | 56 ++---- coordinator/cosign/src/tests/delay.rs | 243 +++++--------------------- 3 files changed, 73 insertions(+), 268 deletions(-) diff --git a/coordinator/cosign/src/delay.rs b/coordinator/cosign/src/delay.rs index f587e08c4..c658eba9b 100644 --- a/coordinator/cosign/src/delay.rs +++ b/coordinator/cosign/src/delay.rs @@ -2,7 +2,7 @@ use core::future::Future; use std::time::{Duration, SystemTime}; use serai_db::*; -use serai_task::ContinuallyRan; +use serai_task::{DoesNotError, ContinuallyRan}; use crate::evaluator::CosignedBlocks; @@ -36,28 +36,20 @@ pub(crate) struct CosignDelayTask { } impl ContinuallyRan for CosignDelayTask { - type Error = String; + type Error = DoesNotError; fn run_iteration(&mut self) -> impl Send + Future> { async move { let mut made_progress = false; - loop { let mut txn = self.db.txn(); - // Peek before consuming + // Peek the next block to mark as cosigned, without consuming yet let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else { - // Queue was empty -> nothing to commit - drop(txn); + // Queue was empty -> nothing to commit, txn gets dropped break; }; - if block_number == 0u64 { - // Clear block from queue - txn.commit(); - continue; - } - // If we've already acknowledged a later block, consume and skip (don't wait). let already_cosigned = LatestCosignedBlockNumber::get(&txn).unwrap_or(0); if block_number <= already_cosigned { @@ -66,27 +58,25 @@ impl ContinuallyRan for CosignDelayTask { continue; } - // Calculate when we should mark it as valid, checking for overflow to avoid panic - let time_valid = Duration::from_secs(time_evaluated) - .checked_add(ACKNOWLEDGEMENT_DELAY) - .ok_or_else(|| { - format!( - "time_evaluated ({time_evaluated}) would overflow when adding ACKNOWLEDGEMENT_DELAY" - ) - })?; + // Calculate when we should mark it as valid + let time_valid = Duration::from_secs(time_evaluated) + ACKNOWLEDGEMENT_DELAY; let now = now_timestamp(); + // drop txn during sleep + drop(txn); + if time_valid > now { - // NOT READY YET - don't consume, just return - // leave message in queue, check again in next task iteration - // simulates sleeping until ready, but continually iterating until ready instead - drop(txn); - return Ok(made_progress); + // Sleep until then + let time_left = time_valid - now; + tokio::time::sleep(time_left).await; } + let mut txn = self.db.txn(); + let _consumed_block = CosignedBlocks::try_recv(&mut txn); + // Set the cosigned block LatestCosignedBlockNumber::set(&mut txn, &block_number); - txn.commit(); + made_progress = true; } diff --git a/coordinator/cosign/src/intend.rs b/coordinator/cosign/src/intend.rs index 832a24271..c2417ee49 100644 --- a/coordinator/cosign/src/intend.rs +++ b/coordinator/cosign/src/intend.rs @@ -80,11 +80,8 @@ impl ContinuallyRan for CosignIntendTask { fn run_iteration(&mut self) -> impl Send + Future> { async move { let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1); - let latest_block_number = self - .serai - .latest_finalized_block_number() - .await - .map_err(|e| format!("RPC error fetching latest finalized block number: {e}"))?; + let latest_block_number = + self.serai.latest_finalized_block_number().await.map_err(|e| format!("{e:?}"))?; if latest_block_number < start_block_number { return Ok(false); @@ -97,14 +94,9 @@ impl ContinuallyRan for CosignIntendTask { .serai .block_by_number(block_number) .await - .map_err(|e| format!("RPC error fetching block #{block_number}: {e}"))? + .map_err(|e| format!("{e}"))? .ok_or_else(|| "couldn't get block which should've been finalized".to_owned())?; - - let events = self - .serai - .events(block.header.hash()) - .await - .map_err(|e| format!("RPC error fetching events for block #{block_number}: {e}"))?; + let events = self.serai.events(block.header.hash()).await.map_err(|e| format!("{e}"))?; let mut has_events = HasEvents::No; @@ -115,9 +107,8 @@ impl ContinuallyRan for CosignIntendTask { if block.header.builds_upon() != builds_upon.clone().calculate(serai_client_serai::abi::BLOCK_HEADER_BRANCH_TAG) { - // nothing to commit - drop(txn); - return Err(format!( + // Ephemeral error here, do not txn commit but reset progress + Err(format!( "node's block #{block_number} doesn't build upon the block #{} prior indexed", block_number - 1 ))?; @@ -141,27 +132,12 @@ impl ContinuallyRan for CosignIntendTask { validator_sets::Event::Allocation { validator, network, amount } => { let Ok(network) = ExternalNetworkId::try_from(*network) else { continue }; let existing = Stakes::get(&txn, network, *validator).unwrap_or(Amount(0)); - let new_stake = existing.0.checked_add(amount.0).ok_or_else(|| { - format!( - "stake overflow for validator {:?} on network {:?}: {} + {}", - validator, network, existing.0, amount.0 - ) - })?; - Stakes::set(&mut txn, network, *validator, &Amount(new_stake)); + Stakes::set(&mut txn, network, *validator, &Amount(existing.0 + amount.0)); } validator_sets::Event::Deallocation { validator, network, amount, timeline: _ } => { let Ok(network) = ExternalNetworkId::try_from(*network) else { continue }; - - let existing_stake = Stakes::get(&txn, network, *validator) - .ok_or_else(|| format!("unable to deallocate with no prior existing stake"))?; - - let new_stake = existing_stake.0.checked_sub(amount.0).ok_or_else(|| { - format!( - "stake underflow for validator {:?} on network {:?}: {} - {}", - validator, network, existing_stake.0, amount.0 - ) - })?; - Stakes::set(&mut txn, network, *validator, &Amount(new_stake)); + let existing = Stakes::get(&txn, network, *validator).unwrap_or(Amount(0)); + Stakes::set(&mut txn, network, *validator, &Amount(existing.0 - amount.0)); } validator_sets::Event::SetDecided { set, validators } => { let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue }; @@ -216,19 +192,16 @@ impl ContinuallyRan for CosignIntendTask { let mut sets = Vec::with_capacity(sets_and_keys_and_stakes.len()); let mut keys = HashMap::with_capacity(sets_and_keys_and_stakes.len()); let mut stakes = HashMap::with_capacity(sets_and_keys_and_stakes.len()); - let mut total_stake = 0u64; + let mut total_stake = 0; for (set, key, stake) in sets_and_keys_and_stakes { sets.push(set); keys.insert(set.network, key); stakes.insert(set.network, stake.0); - total_stake = total_stake - .checked_add(stake.0) - .ok_or_else(|| format!("total stake overflow: {} + {}", total_stake, stake.0))?; + total_stake += total_stake; } if total_stake == 0 { - // commit only per block finished otherwise reset db progress - drop(txn); - return Err(format!("cosigning sets for block #{block_number} had 0 stake in total"))?; + // Ephemeral error here, do not txn commit but reset progress + Err(format!("cosigning sets for block #{block_number} had 0 stake in total"))?; } let global_session_info = GlobalSession { @@ -293,8 +266,7 @@ impl ContinuallyRan for CosignIntendTask { ScanCosignFrom::set(&mut txn, &(block_number + 1)); // All-or-nothing, commit only per block finished otherwise reset db progress - // avoids partially adding db entries without committing the full expected db additions - // i.e. saving a SubstrateBlockHash initially but later failing mid-way + // for ephemeral errors txn.commit(); } diff --git a/coordinator/cosign/src/tests/delay.rs b/coordinator/cosign/src/tests/delay.rs index 85273daff..51a894261 100644 --- a/coordinator/cosign/src/tests/delay.rs +++ b/coordinator/cosign/src/tests/delay.rs @@ -1,10 +1,10 @@ -use std::time::Duration; +use std::time::{Duration, Instant}; use crate::{ LatestCosignedBlockNumber, delay::{ACKNOWLEDGEMENT_DELAY, CosignDelayTask, now_timestamp}, evaluator::CosignedBlocks, - tests::{IntoTask, Test}, + tests::{IntoTask, Test, wait_until}, }; use serai_db::{Db as _, DbTxn as _, MemDb}; @@ -13,11 +13,6 @@ fn now_secs() -> u64 { now_timestamp().as_secs() } -fn past_timestamp() -> u64 { - // A timestamp old enough that time_valid is already passed - now_secs().saturating_sub(2 * ACKNOWLEDGEMENT_DELAY.as_secs()) -} - struct DelayTest { db: MemDb, } @@ -37,35 +32,17 @@ impl IntoTask for DelayTest { } impl DelayTest { - fn assert_queue_is_empty(&self) { - assert_eq!(CosignedBlocks::peek(&self.db), None); + pub fn new() -> (Self, Instant) { + let start = std::time::Instant::now(); + let _ = env_logger::try_init(); + (Self::default(), start) } - fn assert_queue_is_not_empty(&self) { - assert_eq!(CosignedBlocks::peek(&self.db).is_some(), true); - } + async fn assert_task_iteration_completes_with(&self, latest_cosigned_block_number: u64) { + wait_until!(LatestCosignedBlockNumber::get(&self.db) => Some(latest_cosigned_block_number)); - fn assert_latest_cosigned_block_number_is_expected(&self, block_number: Option) { - assert_eq!(LatestCosignedBlockNumber::get(&self.db), block_number); - } - - // Assert everything that changed or should have changed after a task iteration run - fn assert_task_iteration_completes_with(&self, latest_cosigned_block_number: u64) { - // Assert LatestCosignedBlockNumber db points to latest block number after task run - self.assert_latest_cosigned_block_number_is_expected(Some(latest_cosigned_block_number)); // Assert CosignedBlocks queue items have been consumed after task run - self.assert_queue_is_empty(); - } - - // Assert nothing was added or remains after dbs are expected to be cleared - fn assert_task_iteration_db_is_clear(&self) { - self.assert_latest_cosigned_block_number_is_expected(None); - self.assert_queue_is_empty(); - } - - fn assert_task_iteration_fails(&self, latest_cosigned_block_number: Option) { - self.assert_latest_cosigned_block_number_is_expected(latest_cosigned_block_number); - self.assert_queue_is_not_empty(); + assert_eq!(CosignedBlocks::peek(&self.db), None); } } @@ -73,85 +50,64 @@ impl DelayTest { async fn delay_task_returns_false_with_no_messages() { let test = DelayTest::default(); let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, false).await; - test.assert_task_iteration_db_is_clear(); -} -#[tokio::test] -async fn delay_task_returns_false_with_genesis_block() { - let mut test = DelayTest::default(); - - { - let mut txn = test.db.txn(); - CosignedBlocks::send(&mut txn, &(0u64, now_secs())); - txn.commit(); - } - - let mut task = test.into_task(); - - // let already_cosigned = LatestCosignedBlockNumber::get(&self.db).unwrap_or(0); - // the already_cosigned block number always defaults to 0, so "genesis" - // is always considered cosigned, made_progress returns false Test::assert_task_run_iteration_and_check_progress(&mut task, false).await; - test.assert_task_iteration_db_is_clear(); + + assert_eq!(LatestCosignedBlockNumber::get(&test.db), None); + assert_eq!(CosignedBlocks::peek(&test.db), None); } #[tokio::test] -async fn delay_task_updates_latest_cosigned_block_number() { - let mut test = DelayTest::default(); +async fn delay_task_updates_latest_cosigned_block_number_after_ack_delay() { + let (mut test, start) = DelayTest::new(); { let mut txn = test.db.txn(); // blocks with the same timestamps // nothing unusual happens, the task follow block numbers - let past = past_timestamp(); - CosignedBlocks::send(&mut txn, &(0u64, past)); - CosignedBlocks::send(&mut txn, &(1u64, past)); - CosignedBlocks::send(&mut txn, &(2u64, past)); + let now = now_secs(); + CosignedBlocks::send(&mut txn, &(0, now)); + CosignedBlocks::send(&mut txn, &(1, now)); + CosignedBlocks::send(&mut txn, &(2, now)); txn.commit(); } - let mut task = test.into_task(); + let task = test.into_task(); + let handle = Test::spawn_task_continually_running(task, vec![]); - // returns made_progress as true - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - // confirmed the last block as 2 - test.assert_task_iteration_completes_with(2u64); + test.assert_task_iteration_completes_with(2).await; - let mut test = DelayTest::default(); + log::info!("Blocks 0-2 processed after {:?}", start.elapsed()); { let mut txn = test.db.txn(); // timestamps out of order // nothing unusual happens, the task stil follows block numbers - let past = past_timestamp(); - CosignedBlocks::send(&mut txn, &(3u64, past)); - CosignedBlocks::send(&mut txn, &(4u64, past - 1)); - CosignedBlocks::send(&mut txn, &(5u64, past - 2)); + let now = now_secs(); + CosignedBlocks::send(&mut txn, &(3, now)); + CosignedBlocks::send(&mut txn, &(4, now - 1)); + CosignedBlocks::send(&mut txn, &(5, now - 2)); txn.commit(); } - let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - test.assert_task_iteration_completes_with(5u64); + test.assert_task_iteration_completes_with(5).await; - // Test with increasing timestamps (all in the past, so they process immediately) - let mut test = DelayTest::default(); + log::info!("Blocks 3-5 processed after {:?}", start.elapsed()); { let mut txn = test.db.txn(); // timestamps increasing in order // nothing unusual happens, the task stil follows block numbers - let past = past_timestamp(); - CosignedBlocks::send(&mut txn, &(6u64, past)); - CosignedBlocks::send(&mut txn, &(7u64, past + 1)); - CosignedBlocks::send(&mut txn, &(8u64, past + 2)); + let now = now_secs(); + CosignedBlocks::send(&mut txn, &(6, now)); + CosignedBlocks::send(&mut txn, &(7, now + 1)); + CosignedBlocks::send(&mut txn, &(8, now + 2)); txn.commit(); } - let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - test.assert_task_iteration_completes_with(8u64); + test.assert_task_iteration_completes_with(8).await; + + log::info!("Blocks 6-8 processed after {:?}", start.elapsed()); } #[tokio::test] @@ -160,12 +116,13 @@ async fn delay_task_does_not_regress_and_skips_if_not_a_later_block() { { let mut txn = test.db.txn(); - CosignedBlocks::send(&mut txn, &(1u64, past_timestamp())); - CosignedBlocks::send(&mut txn, &(2u64, past_timestamp())); + CosignedBlocks::send(&mut txn, &(1, now_secs())); + CosignedBlocks::send(&mut txn, &(2, now_secs())); // Sent out of order below - CosignedBlocks::send(&mut txn, &(4u64, past_timestamp())); - CosignedBlocks::send(&mut txn, &(3u64, past_timestamp())); + CosignedBlocks::send(&mut txn, &(4, now_secs())); + // 3 will be skipped after 4 was processed + CosignedBlocks::send(&mut txn, &(3, now_secs())); txn.commit(); } @@ -174,17 +131,14 @@ async fn delay_task_does_not_regress_and_skips_if_not_a_later_block() { // returns made_progress as true Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - // Queue order: 1, 2, 4, 3 - // Block 1, 2 and 4 processed, block 3 skipped - // This is unlikely to actually happen in practice but it needs to be tested that it does what it is // meant to do, which is that if we've already acknowledged a later block, consume and skip - test.assert_task_iteration_completes_with(4u64); + test.assert_task_iteration_completes_with(4).await; { let mut txn = test.db.txn(); // Sends the same previous block number - CosignedBlocks::send(&mut txn, &(4u64, past_timestamp())); + CosignedBlocks::send(&mut txn, &(4, now_secs())); txn.commit(); } @@ -193,116 +147,5 @@ async fn delay_task_does_not_regress_and_skips_if_not_a_later_block() { // No progress was made since the same block number was skipped, // made_progress returns false Test::assert_task_run_iteration_and_check_progress(&mut task, false).await; - test.assert_task_iteration_completes_with(4u64); - - { - let mut txn = test.db.txn(); - // Sends the same previous block number - CosignedBlocks::send(&mut txn, &(4u64, past_timestamp())); - // This time ensure progress is made beyond 4 - CosignedBlocks::send(&mut txn, &(5u64, past_timestamp())); - txn.commit(); - } - - let mut task = test.into_task(); - // Had a duplicate, but made 1 block worth of progress - // made_progress returns true - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - // confirmed the last block as 5 - test.assert_task_iteration_completes_with(5u64); -} - -#[tokio::test] -async fn delay_task_does_not_ack_before_acknowledgement_delay() { - let mut test = DelayTest::default(); - - { - let mut txn = test.db.txn(); - CosignedBlocks::send(&mut txn, &(1u64, now_secs())); - txn.commit(); - } - - let mut task = test.into_task(); - - // First iteration returns early - not ready yet - Test::assert_task_run_iteration_and_check_progress(&mut task, false).await; - - test.assert_latest_cosigned_block_number_is_expected(None); - test.assert_queue_is_not_empty(); // Message still in queue - - // Wait for the delay to pass - tokio::time::sleep(ACKNOWLEDGEMENT_DELAY + Duration::from_secs(1)).await; - - // Now iteration should succeed - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - test.assert_task_iteration_completes_with(1u64); -} - -#[tokio::test] -async fn delay_task_with_zero_timestamp_processes_immediately() { - let mut test = DelayTest::default(); - - { - let mut txn = test.db.txn(); - CosignedBlocks::send(&mut txn, &(1u64, 0u64)); - txn.commit(); - } - - let mut task = test.into_task(); - - // This should complete immediately without sleeping - // Since 0 as timestamp will always be an older date than the current time as timestamp - // and since the ACK time is considered to be passed, there is no sleep time to do - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - - test.assert_task_iteration_completes_with(1u64); -} - -#[tokio::test] -async fn delay_task_with_max_timestamp_returns_error() { - let mut test = DelayTest::default(); - - { - let mut txn = test.db.txn(); - CosignedBlocks::send(&mut txn, &(0u64, past_timestamp())); - CosignedBlocks::send(&mut txn, &(1u64, past_timestamp())); - CosignedBlocks::send(&mut txn, &(2u64, u64::MAX)); - CosignedBlocks::send(&mut txn, &(3u64, past_timestamp())); - txn.commit(); - } - - let mut task = test.into_task(); - - // When timestamp is u64::MAX, adding ACKNOWLEDGEMENT_DELAY would overflow - // The task should return an error instead of panicking - Test::assert_task_run_and_failed_with(&mut task, "overflow").await; - - // since returned an error 3u64 should still be in queue - test.assert_task_iteration_fails(Some(1u64)); -} - -#[tokio::test] -async fn delay_task_with_far_future_timestamp_returns_early() { - let mut test = DelayTest::default(); - - { - let mut txn = test.db.txn(); - CosignedBlocks::send(&mut txn, &(0u64, past_timestamp())); - CosignedBlocks::send(&mut txn, &(1u64, past_timestamp())); - - let far_future = now_secs() + 1_000_000; - CosignedBlocks::send(&mut txn, &(2u64, far_future)); - - CosignedBlocks::send(&mut txn, &(3u64, past_timestamp())); - txn.commit(); - } - - let mut task = test.into_task(); - - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - - // It processed blocks 0 and 1, then returned early on block 2 - test.assert_latest_cosigned_block_number_is_expected(Some(1)); - // Block 2,3 stay in queue until the time is valid - test.assert_queue_is_not_empty(); + test.assert_task_iteration_completes_with(4).await; } From 7071732b3682b24cffab6f85f0b5f42bea86ae97 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Tue, 6 Jan 2026 11:41:09 -0300 Subject: [PATCH 13/71] feat(cosign): types tests --- coordinator/cosign/types/src/lib.rs | 4 ++++ .../tests/types.rs => types/src/tests/mod.rs} | 16 ++++++++++++++-- 2 files changed, 18 insertions(+), 2 deletions(-) rename coordinator/cosign/{src/tests/types.rs => types/src/tests/mod.rs} (85%) diff --git a/coordinator/cosign/types/src/lib.rs b/coordinator/cosign/types/src/lib.rs index 8ef592d7d..55c02903d 100644 --- a/coordinator/cosign/types/src/lib.rs +++ b/coordinator/cosign/types/src/lib.rs @@ -5,6 +5,10 @@ use borsh::{BorshSerialize, BorshDeserialize}; use serai_primitives::{BlockHash, crypto::Public, network_id::ExternalNetworkId}; +#[cfg(test)] +/// Test helpers and fixtures. +pub mod tests; + /// The schnorrkel context to used when signing a cosign. pub const COSIGN_CONTEXT: &[u8] = b"/serai/coordinator/cosign"; diff --git a/coordinator/cosign/src/tests/types.rs b/coordinator/cosign/types/src/tests/mod.rs similarity index 85% rename from coordinator/cosign/src/tests/types.rs rename to coordinator/cosign/types/src/tests/mod.rs index dba674c4c..aa01dcc9c 100644 --- a/coordinator/cosign/src/tests/types.rs +++ b/coordinator/cosign/types/src/tests/mod.rs @@ -1,5 +1,17 @@ -use crate::{BlockHash, Cosign, CosignIntent, ExternalNetworkId, Public, SignedCosign}; -use crate::tests::{sign_cosign, sr25519_fixture}; +use crate::{BlockHash, COSIGN_CONTEXT, Cosign, CosignIntent, ExternalNetworkId, Public, SignedCosign}; + +pub(crate) fn sr25519_fixture() -> schnorrkel::Keypair { + schnorrkel::MiniSecretKey::from_bytes(&[0xff; 32]) + .expect("fixed seed should be valid") + .expand_to_keypair(schnorrkel::ExpansionMode::Ed25519) +} + +pub(crate) fn sign_cosign(cosign: Cosign, keypair: &schnorrkel::Keypair) -> SignedCosign { + SignedCosign { + cosign: cosign.clone(), + signature: keypair.sign_simple(COSIGN_CONTEXT, &cosign.signature_message()).to_bytes(), + } +} #[test] fn cosign_intent_to_cosign() { From 2f143666ed3c617d16da436472d3a68311fde86c Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Tue, 6 Jan 2026 15:11:41 -0300 Subject: [PATCH 14/71] refactor(cosign): simpliy, cleanup & get tests ready --- coordinator/cosign/Cargo.toml | 12 +- coordinator/cosign/src/evaluator.rs | 53 +- coordinator/cosign/src/intend.rs | 21 +- coordinator/cosign/src/lib.rs | 23 +- coordinator/cosign/src/tests/cosigning.rs | 894 +++++++++++++++++++++- coordinator/cosign/src/tests/delay.rs | 4 +- coordinator/cosign/src/tests/evaluator.rs | 627 ++++++++++++++- coordinator/cosign/src/tests/intend.rs | 123 +-- coordinator/cosign/src/tests/mod.rs | 114 ++- coordinator/cosign/types/Cargo.toml | 4 + coordinator/cosign/types/src/lib.rs | 2 +- coordinator/cosign/types/src/tests/mod.rs | 35 +- coordinator/src/main.rs | 16 +- 13 files changed, 1688 insertions(+), 240 deletions(-) diff --git a/coordinator/cosign/Cargo.toml b/coordinator/cosign/Cargo.toml index a23ae6e90..86d727738 100644 --- a/coordinator/cosign/Cargo.toml +++ b/coordinator/cosign/Cargo.toml @@ -31,15 +31,7 @@ serai-db = { path = "../../common/db", version = "0.1.1" } serai-task = { path = "../../common/task", version = "0.1" } serai-cosign-types = { path = "./types" } -schnorrkel = { version = "0.11", default-features = false, features = ["std"], optional = true } -rand_core = { version = "0.6", default-features = false, features = ["std", "getrandom"], optional = true } [dev-dependencies] -rand_core = { version = "0.6", default-features = false, features = ["std", "getrandom"] } -schnorrkel = { version = "0.11", default-features = false, features = ["std"] } -serai-substrate-tests = { path = "../../tests/substrate" } -k256 = { version = "0.13", default-features = false, features = ["std", "ecdsa"] } -tokio = { version = "1", default-features = false, features = ["time", "test-util"] } - -[features] -tests = ["schnorrkel", "rand_core"] +env_logger = { version = "0.11", default-features = false } +serai-cosign-types = { path = "./types", features = ["test-helpers"] } diff --git a/coordinator/cosign/src/evaluator.rs b/coordinator/cosign/src/evaluator.rs index 6e43328af..4e1b14f8d 100644 --- a/coordinator/cosign/src/evaluator.rs +++ b/coordinator/cosign/src/evaluator.rs @@ -114,20 +114,11 @@ impl ContinuallyRan for CosignEvaluatorTask ContinuallyRan for CosignEvaluatorTask { - let mut weight_cosigned = 0u64; - + let mut weight_cosigned = 0; for set in global_session_info.sets { // Check if we have the cosign from this set if NetworksLatestCosignedBlock::get(&txn, global_session, set.network) .map(|signed_cosign| signed_cosign.cosign.block_number) == Some(block_number) { - // Since we have this cosign, add the set's weight to the weight which has cosigned - let stake = global_session_info.stakes.get(&set.network).ok_or_else(|| { - "ValidatorSet in global session yet didn't have its stake".to_owned() - })?; - - weight_cosigned = weight_cosigned - .checked_add(*stake) - .ok_or_else(|| "weight_cosigned overflow".to_owned())?; + // Since have this cosign, add the set's weight to the weight which has cosigned + weight_cosigned += + global_session_info.stakes.get(&set.network).ok_or_else(|| { + "ValidatorSet in global session yet didn't have its stake".to_owned() + })?; } } // Check if the sum weight doesn't cross the required threshold @@ -165,12 +152,10 @@ impl ContinuallyRan for CosignEvaluatorTask ContinuallyRan for CosignEvaluatorTask = None; for set in global_session_info.sets { @@ -208,12 +193,10 @@ impl ContinuallyRan for CosignEvaluatorTask= block_number { - let stake = global_session_info.stakes.get(&set.network).ok_or_else(|| { - "ValidatorSet in global session yet didn't have its stake".to_owned() - })?; - weight_cosigned = weight_cosigned - .checked_add(*stake) - .ok_or_else(|| "weight_cosigned overflow".to_owned())?; + weight_cosigned += + global_session_info.stakes.get(&set.network).ok_or_else(|| { + "ValidatorSet in global session yet didn't have its stake".to_owned() + })?; } // Update the lowest block common to all of these cosigns @@ -236,12 +219,10 @@ impl ContinuallyRan for CosignEvaluatorTask ContinuallyRan for CosignIntendTask { fn run_iteration(&mut self) -> impl Send + Future> { async move { let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1); - let latest_block_number = - self.serai.latest_finalized_block_number().await.map_err(|e| format!("{e:?}"))?; + let latest_block_number = self + .serai + .latest_finalized_block_number() + .await + .map_err(|e| format!("RPC error fetching latest finalized block number: {e}"))?; if latest_block_number < start_block_number { return Ok(false); @@ -94,9 +97,14 @@ impl ContinuallyRan for CosignIntendTask { .serai .block_by_number(block_number) .await - .map_err(|e| format!("{e}"))? + .map_err(|e| format!("RPC error fetching block #{block_number}: {e}"))? .ok_or_else(|| "couldn't get block which should've been finalized".to_owned())?; - let events = self.serai.events(block.header.hash()).await.map_err(|e| format!("{e}"))?; + + let events = self + .serai + .events(block.header.hash()) + .await + .map_err(|e| format!("RPC error fetching events for block #{block_number}: {e}"))?; let mut has_events = HasEvents::No; @@ -136,7 +144,8 @@ impl ContinuallyRan for CosignIntendTask { } validator_sets::Event::Deallocation { validator, network, amount, timeline: _ } => { let Ok(network) = ExternalNetworkId::try_from(*network) else { continue }; - let existing = Stakes::get(&txn, network, *validator).unwrap_or(Amount(0)); + let existing = Stakes::get(&txn, network, *validator) + .expect("unable to deallocate with no prior existing stake"); Stakes::set(&mut txn, network, *validator, &Amount(existing.0 - amount.0)); } validator_sets::Event::SetDecided { set, validators } => { @@ -197,7 +206,7 @@ impl ContinuallyRan for CosignIntendTask { sets.push(set); keys.insert(set.network, key); stakes.insert(set.network, stake.0); - total_stake += total_stake; + total_stake += stake.0; } if total_stake == 0 { // Ephemeral error here, do not txn commit but reset progress diff --git a/coordinator/cosign/src/lib.rs b/coordinator/cosign/src/lib.rs index 10488cb7d..88326b498 100644 --- a/coordinator/cosign/src/lib.rs +++ b/coordinator/cosign/src/lib.rs @@ -4,7 +4,11 @@ #![allow(clippy::std_instead_of_alloc, clippy::std_instead_of_core)] use core::{fmt::Debug, future::Future}; -use std::{collections::HashMap, sync::Arc, time::Instant}; +use std::{ + collections::HashMap, + sync::Arc, + time::{Duration, Instant}, +}; use serai_client_serai::Serai; @@ -41,6 +45,13 @@ use delay::LatestCosignedBlockNumber; /// Test helpers and fixtures. pub mod tests; +/// The interval at which the cosigning loop runs. +#[cfg(not(test))] +pub const COSIGN_LOOP_INTERVAL: Duration = Duration::from_secs(5); +/// The interval at which the cosigning loop runs (shortened for tests). +#[cfg(test)] +pub const COSIGN_LOOP_INTERVAL: Duration = Duration::from_millis(10); + /// Abstraction over the Serai RPC client so tests can inject custom behaviour. pub trait SeraiRpc: Clone + Send + Sync + 'static { /// Return the latest finalized block number. @@ -268,7 +279,7 @@ impl Cosigning { /// The latest cosigned block number. pub fn latest_cosigned_block_number(getter: &impl Get) -> Result { if FaultedSession::get(getter).is_some() { - return Err(Faulted)?; + Err(Faulted)?; } Ok(LatestCosignedBlockNumber::get(getter).unwrap_or(0)) @@ -352,7 +363,7 @@ impl Cosigning { NetworksLatestCosignedBlock::get(&self.db, cosign.global_session, network) { if existing.cosign.block_number >= cosign.block_number { - return Err(IntakeCosignError::StaleCosign)?; + Err(IntakeCosignError::StaleCosign)?; } } } @@ -364,7 +375,7 @@ impl Cosigning { // Check the cosigned block number is in range to the global session if cosign.block_number < global_session.start_block_number { // Cosign is for a block predating the global session - return Err(IntakeCosignError::BeforeGlobalSessionStart)?; + Err(IntakeCosignError::BeforeGlobalSessionStart)?; } if !faulty { // This prevents a malicious validator set, on the same chain, from producing a cosign after @@ -372,7 +383,7 @@ impl Cosigning { if let Some(last_block) = GlobalSessionsLastBlock::get(&self.db, cosign.global_session) { if cosign.block_number > last_block { // Cosign is for a block after the last block this global session should have signed - return Err(IntakeCosignError::AfterGlobalSessionEnd)?; + Err(IntakeCosignError::AfterGlobalSessionEnd)?; } } } @@ -382,7 +393,7 @@ impl Cosigning { let key = *global_session.keys.get(&network).ok_or(IntakeCosignError::NonParticipatingNetwork)?; if !signed_cosign.verify_signature(key) { - return Err(IntakeCosignError::InvalidSignature)?; + Err(IntakeCosignError::InvalidSignature)?; } } diff --git a/coordinator/cosign/src/tests/cosigning.rs b/coordinator/cosign/src/tests/cosigning.rs index eeb55e204..b3675299d 100644 --- a/coordinator/cosign/src/tests/cosigning.rs +++ b/coordinator/cosign/src/tests/cosigning.rs @@ -1,10 +1,12 @@ -use std::{collections::HashMap, time::Instant}; +use std::{ + collections::HashMap, + time::{Duration, Instant}, +}; use borsh::{BorshDeserialize, BorshSerialize}; use blake2::{Blake2s256, Digest}; -use serai_cosign_types::COSIGN_CONTEXT; use serai_db::{Db as _, DbTxn, MemDb}; use serai_client_serai::abi::primitives::{ @@ -18,13 +20,46 @@ use crate::{ BROADCAST_FREQUENCY, Cosign, CosignIntent, Cosigning, Faulted, FaultedSession, Faults, GlobalSession, GlobalSessions, GlobalSessionsLastBlock, IntakeCosignError, NetworksLatestCosignedBlock, SeraiRpc, SignedCosign, SubstrateBlockHash, - delay::LatestCosignedBlockNumber, + delay::{ACKNOWLEDGEMENT_DELAY, LatestCosignedBlockNumber}, evaluator::CurrentlyEvaluatedGlobalSession, - tests::{TestRequest, intend::Serai, sign_cosign, sr25519_fixture}, + intend::GlobalSessionsChannel, + tests::{TestRequest, intend::Serai}, }; use crate::intend::IntendedCosigns; +use serai_cosign_types::tests::{ + fixture_public_key, public_key_from_seed, sign_cosign_with_fixture, sign_cosign_with_seed, +}; + +const FIXTURE_SEED: [u8; 32] = [0xff; 32]; + +struct Sr25519Fixture { + seed: [u8; 32], +} + +impl Sr25519Fixture { + fn public_bytes(&self) -> [u8; 32] { + if self.seed == FIXTURE_SEED { + fixture_public_key() + } else { + public_key_from_seed(self.seed) + } + } +} + +fn sr25519_fixture() -> Sr25519Fixture { + Sr25519Fixture { seed: FIXTURE_SEED } +} + +fn sign_cosign(cosign: Cosign, fixture: &Sr25519Fixture) -> SignedCosign { + if fixture.seed == FIXTURE_SEED { + sign_cosign_with_fixture(cosign) + } else { + sign_cosign_with_seed(cosign, fixture.seed) + } +} + #[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] struct TestGlobalSession { start_block_number: u64, @@ -58,8 +93,8 @@ fn session_fixture() -> TestGlobalSession { let mut keys = HashMap::new(); let mut stakes = HashMap::new(); - let keypair = sr25519_fixture(); - let pubkey = Public(keypair.public.to_bytes()); + let fixture = sr25519_fixture(); + let pubkey = Public(fixture.public_bytes()); keys.insert(network, pubkey); stakes.insert(network, 100); @@ -111,8 +146,851 @@ fn global_session_id_generation() { } #[test] -fn cosigns_to_rebroadcast_empty_without_state() { +fn temporal_returns_true_for_temporal_errors() { + assert!(IntakeCosignError::NotYetIndexedBlock.temporal()); + assert!(IntakeCosignError::StaleCosign.temporal()); + assert!(IntakeCosignError::UnrecognizedGlobalSession.temporal()); + assert!(IntakeCosignError::FutureGlobalSession.temporal()); +} + +#[test] +fn temporal_returns_false_for_non_temporal_errors() { + assert!(!IntakeCosignError::BeforeGlobalSessionStart.temporal()); + assert!(!IntakeCosignError::AfterGlobalSessionEnd.temporal()); + assert!(!IntakeCosignError::NonParticipatingNetwork.temporal()); + assert!(!IntakeCosignError::InvalidSignature.temporal()); +} + +#[tokio::test] +async fn spawn_creates_cosigning_instance() { let db = MemDb::new(); - let cosigning = Cosigning::new(db); + let serai = Serai::default(); + let (request, _calls) = TestRequest::new(false); + let cosigning = Cosigning::spawn(db, serai, request, vec![]); + assert!(cosigning.cosigns_to_rebroadcast().is_empty()); } + +#[tokio::test] +async fn spawn_with_tasks_to_run_upon_cosigning() { + use serai_task::Task; + + let db = MemDb::new(); + let serai = Serai::default(); + let (request, _calls) = TestRequest::new(false); + + let (_task, task_handle) = Task::new(); + let tasks_to_run = vec![task_handle]; + + let cosigning = Cosigning::spawn(db.clone(), serai, request, tasks_to_run); + + assert!(cosigning.cosigns_to_rebroadcast().is_empty()); +} + +#[tokio::test] +async fn spawn_initializes_cosigning_instance_correctly() { + let db = MemDb::new(); + let serai = Serai::default(); + let (request, _calls) = TestRequest::new(false); + + let cosigning = Cosigning::spawn(db.clone(), serai, request, vec![]); + + assert!(cosigning.cosigns_to_rebroadcast().is_empty()); + + let latest = Cosigning::::latest_cosigned_block_number(&db); + assert!(latest.is_ok()); + assert_eq!(latest.unwrap(), 0); +} + +#[tokio::test] +async fn spawn_tasks_chain_correctly() { + let db = MemDb::new(); + let serai = Serai::default(); + let (request, _calls) = TestRequest::new(false); + + let _cosigning = Cosigning::spawn(db.clone(), serai, request, vec![]); + + tokio::time::sleep(Duration::from_millis(10)).await; + + let latest = Cosigning::::latest_cosigned_block_number(&db); + assert!(latest.is_ok()); +} + +#[test] +fn latest_cosigned_block_number_defaults_to_zero() { + let db = MemDb::new(); + assert_eq!(Cosigning::::latest_cosigned_block_number(&db).unwrap(), 0); +} + +#[test] +fn latest_cosigned_block_number_errors_when_faulted() { + let mut db = MemDb::new(); + { + let mut txn = db.txn(); + FaultedSession::set(&mut txn, &[1u8; 32]); + txn.commit(); + } + assert!(matches!(Cosigning::::latest_cosigned_block_number(&db), Err(Faulted))); +} + +#[test] +fn latest_cosigned_block_number_returns_stored_value() { + let mut db = MemDb::new(); + { + let mut txn = db.txn(); + LatestCosignedBlockNumber::set(&mut txn, &42u64); + txn.commit(); + } + assert_eq!(Cosigning::::latest_cosigned_block_number(&db).unwrap(), 42); +} + +#[test] +fn cosigned_block_returns_none_beyond_latest() { + let mut db = MemDb::new(); + { + let mut txn = db.txn(); + LatestCosignedBlockNumber::set(&mut txn, &5u64); + txn.commit(); + } + assert_eq!(Cosigning::::cosigned_block(&db, 6).unwrap(), None); +} + +#[test] +fn cosigned_block_returns_hash_when_in_range() { + let mut db = MemDb::new(); + let block_hash = BlockHash([9u8; 32]); + { + let mut txn = db.txn(); + LatestCosignedBlockNumber::set(&mut txn, &5u64); + SubstrateBlockHash::set(&mut txn, 3, &block_hash); + txn.commit(); + } + assert_eq!(Cosigning::::cosigned_block(&db, 3).unwrap(), Some(block_hash)); +} + +#[test] +fn cosigned_block_errors_when_faulted() { + let mut db = MemDb::new(); + { + let mut txn = db.txn(); + FaultedSession::set(&mut txn, &[1u8; 32]); + txn.commit(); + } + assert!(matches!(Cosigning::::cosigned_block(&db, 0), Err(Faulted))); +} + +#[tokio::test] +async fn cosigning_cosigned_block_returns_correct_hash() { + let mut db = MemDb::new(); + let block_hash_5 = BlockHash([42u8; 32]); + let block_hash_10 = BlockHash([43u8; 32]); + + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, 5, &block_hash_5); + SubstrateBlockHash::set(&mut txn, 10, &block_hash_10); + LatestCosignedBlockNumber::set(&mut txn, &10u64); + txn.commit(); + } + + let result = Cosigning::::cosigned_block(&db, 5); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), Some(block_hash_5)); + + let result_10 = Cosigning::::cosigned_block(&db, 10); + assert!(result_10.is_ok()); + assert_eq!(result_10.unwrap(), Some(block_hash_10)); + + let result_11 = Cosigning::::cosigned_block(&db, 11); + assert!(result_11.is_ok()); + assert_eq!(result_11.unwrap(), None); +} + +#[test] +fn notable_cosigns_empty_without_cosigns() { + let db = MemDb::new(); + let cosigns = Cosigning::::notable_cosigns(&db, [1u8; 32]); + assert!(cosigns.is_empty()); +} + +#[test] +fn notable_cosigns_returns_cosigns_for_session() { + let session = session_fixture(); + let id = session.id(); + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_number = 1; + let block_hash = BlockHash([9u8; 32]); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &block_hash); + txn.commit(); + } + + let cosign = + Cosign { global_session: id, block_number, block_hash, cosigner: ExternalNetworkId::Bitcoin }; + let signed = sign_cosign(cosign, &keypair); + + let mut cosigning = Cosigning::new(db.clone()); + cosigning.intake_cosign(&signed).unwrap(); + + let notable = Cosigning::::notable_cosigns(&db, id); + assert_eq!(notable.len(), 1); + assert_eq!(notable[0].cosign.block_number, block_number); + assert_eq!(notable[0].cosign.block_hash, block_hash); + assert_eq!(notable[0].cosign.cosigner, ExternalNetworkId::Bitcoin); +} + +#[test] +fn cosigns_to_rebroadcast_excludes_cosigns_from_different_global_session() { + let session = session_fixture(); + let id = session.id(); + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_number = 1; + let our_hash = BlockHash([1u8; 32]); + let faulty_hash = BlockHash([2u8; 32]); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &our_hash); + txn.commit(); + } + + let faulty_cosign = Cosign { + global_session: id, + block_number, + block_hash: faulty_hash, + cosigner: ExternalNetworkId::Bitcoin, + }; + let faulty_signed = sign_cosign(faulty_cosign, &keypair); + + let mut cosigning = Cosigning::new(db.clone()); + cosigning.intake_cosign(&faulty_signed).unwrap(); + + let different_session_id = [99u8; 32]; + let different_cosign = Cosign { + global_session: different_session_id, + block_number, + block_hash: our_hash, + cosigner: ExternalNetworkId::Bitcoin, + }; + let different_signed = sign_cosign(different_cosign, &keypair); + { + let mut txn = db.txn(); + NetworksLatestCosignedBlock::set(&mut txn, id, ExternalNetworkId::Bitcoin, &different_signed); + txn.commit(); + } + + let cosigning = Cosigning::new(db); + let rebroadcast = cosigning.cosigns_to_rebroadcast(); + + assert_eq!( + rebroadcast.len(), + 1, + "should only include faults, not cosigns from different sessions" + ); + assert_eq!(rebroadcast[0].cosign.block_hash, faulty_hash); + assert_eq!(rebroadcast[0].cosign.global_session, id); +} + +#[test] +fn cosigns_to_rebroadcast_returns_latest_cosigns_when_not_faulted() { + let session = session_fixture(); + let id = session.id(); + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_number = 1; + let block_hash = BlockHash([9u8; 32]); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &block_hash); + txn.commit(); + } + + let cosign = + Cosign { global_session: id, block_number, block_hash, cosigner: ExternalNetworkId::Bitcoin }; + let signed = sign_cosign(cosign, &keypair); + + let mut cosigning = Cosigning::new(db.clone()); + cosigning.intake_cosign(&signed).unwrap(); + + let rebroadcast = cosigning.cosigns_to_rebroadcast(); + assert_eq!(rebroadcast.len(), 1); + assert_eq!(rebroadcast[0].cosign.block_number, block_number); + assert_eq!(rebroadcast[0].cosign.block_hash, block_hash); +} + +#[test] +fn cosigns_to_rebroadcast_returns_faults_and_honest_when_faulted() { + let session = session_fixture(); + let id = session.id(); + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_number = 1; + let our_hash = BlockHash([1u8; 32]); + let faulty_hash = BlockHash([2u8; 32]); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &our_hash); + txn.commit(); + } + + let faulty_cosign = Cosign { + global_session: id, + block_number, + block_hash: faulty_hash, + cosigner: ExternalNetworkId::Bitcoin, + }; + let faulty_signed = sign_cosign(faulty_cosign, &keypair); + + let mut cosigning = Cosigning::new(db.clone()); + cosigning.intake_cosign(&faulty_signed).unwrap(); + + let honest_cosign = Cosign { + global_session: id, + block_number, + block_hash: our_hash, + cosigner: ExternalNetworkId::Bitcoin, + }; + let honest_signed = sign_cosign(honest_cosign, &keypair); + { + let mut txn = db.txn(); + NetworksLatestCosignedBlock::set(&mut txn, id, ExternalNetworkId::Bitcoin, &honest_signed); + txn.commit(); + } + + let cosigning = Cosigning::new(db); + let rebroadcast = cosigning.cosigns_to_rebroadcast(); + + assert!(rebroadcast.iter().any(|c| c.cosign.block_hash == faulty_hash)); + assert!(rebroadcast.iter().any(|c| c.cosign.block_hash == our_hash)); +} + +#[test] +fn intake_cosign_rejects_not_yet_indexed_block() { + let db = MemDb::new(); + let keypair = sr25519_fixture(); + + let cosign = Cosign { + global_session: [1u8; 32], + block_number: 1, + block_hash: BlockHash([9u8; 32]), + cosigner: ExternalNetworkId::Bitcoin, + }; + let signed = sign_cosign(cosign, &keypair); + + let mut cosigning = Cosigning::new(db); + assert!(matches!(cosigning.intake_cosign(&signed), Err(IntakeCosignError::NotYetIndexedBlock))); +} + +#[test] +fn intake_cosign_accepts_valid_cosign() { + let session = session_fixture(); + let id = session.id(); + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_number = 1; + let block_hash = BlockHash([9u8; 32]); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &block_hash); + txn.commit(); + } + + let cosign = + Cosign { global_session: id, block_number, block_hash, cosigner: ExternalNetworkId::Bitcoin }; + let signed = sign_cosign(cosign, &keypair); + + let mut cosigning = Cosigning::new(db); + assert!(cosigning.intake_cosign(&signed).is_ok()); +} + +#[test] +fn intake_cosign_rejects_stale_cosign() { + let session = session_fixture(); + let id = session.id(); + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_hash = BlockHash([9u8; 32]); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, 1, &block_hash); + SubstrateBlockHash::set(&mut txn, 2, &BlockHash([2u8; 32])); + txn.commit(); + } + + let first_cosign = Cosign { + global_session: id, + block_number: 2, + block_hash: BlockHash([2u8; 32]), + cosigner: ExternalNetworkId::Bitcoin, + }; + let first_signed = sign_cosign(first_cosign, &keypair); + + let mut cosigning = Cosigning::new(db.clone()); + cosigning.intake_cosign(&first_signed).unwrap(); + + let stale_cosign = Cosign { + global_session: id, + block_number: 1, + block_hash, + cosigner: ExternalNetworkId::Bitcoin, + }; + let stale_signed = sign_cosign(stale_cosign, &keypair); + + assert!(matches!(cosigning.intake_cosign(&stale_signed), Err(IntakeCosignError::StaleCosign))); +} + +#[test] +fn intake_cosign_rejects_unrecognized_global_session() { + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + let block_number = 1; + let block_hash = BlockHash([9u8; 32]); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &block_hash); + txn.commit(); + } + + let cosign = Cosign { + global_session: [99u8; 32], + block_number, + block_hash, + cosigner: ExternalNetworkId::Bitcoin, + }; + let signed = sign_cosign(cosign, &keypair); + + let mut cosigning = Cosigning::new(db); + assert!(matches!( + cosigning.intake_cosign(&signed), + Err(IntakeCosignError::UnrecognizedGlobalSession) + )); +} + +#[test] +fn intake_cosign_rejects_before_global_session_start() { + let mut session = session_fixture(); + session.start_block_number = 10; + let id = session.id(); + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + { + let mut txn = db.txn(); + GlobalSessions::set(&mut txn, id, &session.to_global()); + CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, session.to_global())); + LatestCosignedBlockNumber::set(&mut txn, &10u64); + + SubstrateBlockHash::set(&mut txn, 5, &BlockHash([5u8; 32])); + txn.commit(); + } + + let cosign = Cosign { + global_session: id, + block_number: 5, + block_hash: BlockHash([5u8; 32]), + cosigner: ExternalNetworkId::Bitcoin, + }; + let signed = sign_cosign(cosign, &keypair); + + let mut cosigning = Cosigning::new(db); + assert!(matches!( + cosigning.intake_cosign(&signed), + Err(IntakeCosignError::BeforeGlobalSessionStart) + )); +} + +#[test] +fn intake_cosign_rejects_after_global_session_end() { + let session = session_fixture(); + let id = session.id(); + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + { + let mut txn = db.txn(); + + GlobalSessionsLastBlock::set(&mut txn, id, &5u64); + + SubstrateBlockHash::set(&mut txn, 10, &BlockHash([10u8; 32])); + txn.commit(); + } + + let cosign = Cosign { + global_session: id, + block_number: 10, + block_hash: BlockHash([10u8; 32]), + cosigner: ExternalNetworkId::Bitcoin, + }; + let signed = sign_cosign(cosign, &keypair); + + let mut cosigning = Cosigning::new(db); + assert!(matches!( + cosigning.intake_cosign(&signed), + Err(IntakeCosignError::AfterGlobalSessionEnd) + )); +} + +#[test] +fn intake_cosign_rejects_invalid_signature() { + let session = session_fixture(); + let id = session.id(); + // Use a different keypair than the one in session_fixture + let wrong_keypair = Sr25519Fixture { seed: [99u8; 32] }; + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_number = 1; + let block_hash = BlockHash([9u8; 32]); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &block_hash); + txn.commit(); + } + + let cosign = + Cosign { global_session: id, block_number, block_hash, cosigner: ExternalNetworkId::Bitcoin }; + let signed = sign_cosign(cosign, &wrong_keypair); + + let mut cosigning = Cosigning::new(db); + assert!(matches!(cosigning.intake_cosign(&signed), Err(IntakeCosignError::InvalidSignature))); +} + +#[test] +fn intake_cosign_rejects_future_global_session() { + let mut session = session_fixture(); + session.start_block_number = 10; + let id = session.id(); + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + { + let mut txn = db.txn(); + GlobalSessions::set(&mut txn, id, &session.to_global()); + CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, session.to_global())); + + LatestCosignedBlockNumber::set(&mut txn, &5u64); + SubstrateBlockHash::set(&mut txn, 10, &BlockHash([10u8; 32])); + txn.commit(); + } + + let cosign = Cosign { + global_session: id, + block_number: 10, + block_hash: BlockHash([10u8; 32]), + cosigner: ExternalNetworkId::Bitcoin, + }; + let signed = sign_cosign(cosign, &keypair); + + let mut cosigning = Cosigning::new(db); + assert!(matches!(cosigning.intake_cosign(&signed), Err(IntakeCosignError::FutureGlobalSession))); +} + +#[test] +fn intake_cosign_handles_faulty_cosign() { + let session = session_fixture(); + let id = session.id(); + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_number = 1; + let our_hash = BlockHash([1u8; 32]); + let faulty_hash = BlockHash([2u8; 32]); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &our_hash); + txn.commit(); + } + + let cosign = Cosign { + global_session: id, + block_number, + block_hash: faulty_hash, + cosigner: ExternalNetworkId::Bitcoin, + }; + let signed = sign_cosign(cosign, &keypair); + + let mut cosigning = Cosigning::new(db.clone()); + + assert!(cosigning.intake_cosign(&signed).is_ok()); + + let faults: Option> = Faults::get(&db, id); + assert!(faults.is_some()); + assert_eq!(faults.as_ref().unwrap().len(), 1); + assert_eq!(faults.unwrap()[0].cosign.block_hash, faulty_hash); + + let faulted: Option<[u8; 32]> = FaultedSession::get(&db); + assert_eq!(faulted, Some(id)); +} + +#[test] +fn intake_cosign_accepts_newer_cosign_when_existing_is_older() { + let session = session_fixture(); + let id = session.id(); + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, 1, &BlockHash([1u8; 32])); + SubstrateBlockHash::set(&mut txn, 2, &BlockHash([2u8; 32])); + txn.commit(); + } + + let first_cosign = Cosign { + global_session: id, + block_number: 1, + block_hash: BlockHash([1u8; 32]), + cosigner: ExternalNetworkId::Bitcoin, + }; + let first_signed = sign_cosign(first_cosign, &keypair); + + let mut cosigning = Cosigning::new(db.clone()); + cosigning.intake_cosign(&first_signed).unwrap(); + + let newer_cosign = Cosign { + global_session: id, + block_number: 2, + block_hash: BlockHash([2u8; 32]), + cosigner: ExternalNetworkId::Bitcoin, + }; + let newer_signed = sign_cosign(newer_cosign, &keypair); + + assert!(cosigning.intake_cosign(&newer_signed).is_ok()); + + let latest = NetworksLatestCosignedBlock::get(&db, id, ExternalNetworkId::Bitcoin).unwrap(); + assert_eq!(latest.cosign.block_number, 2); +} + +#[test] +fn intake_cosign_accepts_cosign_at_global_session_last_block() { + let session = session_fixture(); + let id = session.id(); + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + { + let mut txn = db.txn(); + GlobalSessionsLastBlock::set(&mut txn, id, &5u64); + for i in 1..=5 { + SubstrateBlockHash::set(&mut txn, i, &BlockHash([i as u8; 32])); + } + txn.commit(); + } + + let mut cosigning = Cosigning::new(db.clone()); + + let cosign = Cosign { + global_session: id, + block_number: 5, + block_hash: BlockHash([5u8; 32]), + cosigner: ExternalNetworkId::Bitcoin, + }; + let signed = sign_cosign(cosign, &keypair); + + assert!(cosigning.intake_cosign(&signed).is_ok()); + + let latest = NetworksLatestCosignedBlock::get(&db, id, ExternalNetworkId::Bitcoin).unwrap(); + assert_eq!(latest.cosign.block_number, 5); +} + +#[test] +fn intake_cosign_ignores_duplicate_fault_from_same_network() { + let session = session_fixture(); + let id = session.id(); + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_number = 1; + let our_hash = BlockHash([1u8; 32]); + let faulty_hash_1 = BlockHash([2u8; 32]); + let faulty_hash_2 = BlockHash([3u8; 32]); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &our_hash); + txn.commit(); + } + + let faulty_cosign_1 = Cosign { + global_session: id, + block_number, + block_hash: faulty_hash_1, + cosigner: ExternalNetworkId::Bitcoin, + }; + let faulty_signed_1 = sign_cosign(faulty_cosign_1, &keypair); + + let mut cosigning = Cosigning::new(db.clone()); + assert!(cosigning.intake_cosign(&faulty_signed_1).is_ok()); + + let faults_after_first = Faults::get(&db, id).unwrap(); + assert_eq!(faults_after_first.len(), 1); + assert_eq!(faults_after_first[0].cosign.block_hash, faulty_hash_1); + + let faulty_cosign_2 = Cosign { + global_session: id, + block_number, + block_hash: faulty_hash_2, + cosigner: ExternalNetworkId::Bitcoin, + }; + let faulty_signed_2 = sign_cosign(faulty_cosign_2, &keypair); + + assert!(cosigning.intake_cosign(&faulty_signed_2).is_ok()); + + let faults_after_second = Faults::get(&db, id).unwrap(); + assert_eq!(faults_after_second.len(), 1, "duplicate fault from same network should not be added"); + assert_eq!(faults_after_second[0].cosign.block_hash, faulty_hash_1); +} + +#[test] +fn intake_cosign_rejects_non_participating_network() { + let session = session_fixture(); + let id = session.id(); + + let eth_keypair = Sr25519Fixture { seed: [77u8; 32] }; + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_number = 1; + let block_hash = BlockHash([9u8; 32]); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &block_hash); + txn.commit(); + } + + let cosign = + Cosign { global_session: id, block_number, block_hash, cosigner: ExternalNetworkId::Ethereum }; + let signed = sign_cosign(cosign, ð_keypair); + + let mut cosigning = Cosigning::new(db); + assert!(matches!( + cosigning.intake_cosign(&signed), + Err(IntakeCosignError::NonParticipatingNetwork) + )); +} + +#[test] +fn intake_cosign_records_fault_below_threshold() { + let network1 = ExternalNetworkId::Bitcoin; + let network2 = ExternalNetworkId::Ethereum; + let set1 = ExternalValidatorSet { network: network1, session: Session(0) }; + let set2 = ExternalValidatorSet { network: network2, session: Session(0) }; + + let keypair1 = sr25519_fixture(); + let keypair2 = Sr25519Fixture { seed: [88u8; 32] }; + + let mut keys = HashMap::new(); + let mut stakes = HashMap::new(); + + keys.insert(network1, Public(keypair1.public_bytes())); + keys.insert(network2, Public(keypair2.public_bytes())); + + stakes.insert(network1, 10); + stakes.insert(network2, 90); + + let session = TestGlobalSession { + start_block_number: 1, + sets: vec![set1, set2], + keys, + stakes, + total_stake: 100, + }; + let id = session.id(); + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_number = 1; + let our_hash = BlockHash([1u8; 32]); + let faulty_hash = BlockHash([2u8; 32]); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &our_hash); + txn.commit(); + } + + let faulty_cosign = + Cosign { global_session: id, block_number, block_hash: faulty_hash, cosigner: network1 }; + let faulty_signed = sign_cosign(faulty_cosign, &keypair1); + + let mut cosigning = Cosigning::new(db.clone()); + assert!(cosigning.intake_cosign(&faulty_signed).is_ok()); + + let faults = Faults::get(&db, id).unwrap(); + assert_eq!(faults.len(), 1); + assert_eq!(faults[0].cosign.block_hash, faulty_hash); + + let faulted = FaultedSession::get(&db); + assert_eq!(faulted, None, "session should not be faulted when weight is below 17% threshold"); +} + +#[test] +fn intended_cosigns_empty_returns_empty() { + let mut db = MemDb::new(); + let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + let mut txn = db.txn(); + assert!(Cosigning::::intended_cosigns(&mut txn, set).is_empty()); + txn.commit(); +} + +#[test] +fn intended_cosigns_receives_sent_intent() { + let mut db = MemDb::new(); + let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + + let intent = CosignIntent { + global_session: [1u8; 32], + block_number: 5, + block_hash: BlockHash([5u8; 32]), + notable: true, + }; + + { + let mut txn = db.txn(); + IntendedCosigns::send(&mut txn, set, &intent); + txn.commit(); + } + + { + let mut txn = db.txn(); + let got = Cosigning::::intended_cosigns(&mut txn, set); + txn.commit(); + assert_eq!(got.len(), 1); + assert_eq!(got[0].global_session, intent.global_session); + assert_eq!(got[0].block_number, intent.block_number); + assert_eq!(got[0].block_hash, intent.block_hash); + assert!(got[0].notable); + } +} diff --git a/coordinator/cosign/src/tests/delay.rs b/coordinator/cosign/src/tests/delay.rs index 51a894261..e5200607b 100644 --- a/coordinator/cosign/src/tests/delay.rs +++ b/coordinator/cosign/src/tests/delay.rs @@ -2,7 +2,7 @@ use std::time::{Duration, Instant}; use crate::{ LatestCosignedBlockNumber, - delay::{ACKNOWLEDGEMENT_DELAY, CosignDelayTask, now_timestamp}, + delay::{CosignDelayTask, now_timestamp}, evaluator::CosignedBlocks, tests::{IntoTask, Test, wait_until}, }; @@ -73,7 +73,7 @@ async fn delay_task_updates_latest_cosigned_block_number_after_ack_delay() { } let task = test.into_task(); - let handle = Test::spawn_task_continually_running(task, vec![]); + let _handle = Test::spawn_task_continually_running(task, vec![]); test.assert_task_iteration_completes_with(2).await; diff --git a/coordinator/cosign/src/tests/evaluator.rs b/coordinator/cosign/src/tests/evaluator.rs index 8b551ad83..e6cbddb31 100644 --- a/coordinator/cosign/src/tests/evaluator.rs +++ b/coordinator/cosign/src/tests/evaluator.rs @@ -1,13 +1,11 @@ use std::{ collections::HashMap, - sync::{ - Arc, - atomic::{AtomicUsize, Ordering}, - }, + sync::atomic::Ordering, time::{Duration, Instant}, }; use serai_db::{DbTxn, Db as _, MemDb}; +use serai_task::ContinuallyRan; use serai_client_serai::abi::primitives::{ crypto::Public, validator_sets::{ExternalValidatorSet, Session}, @@ -22,7 +20,6 @@ use crate::{ intend::{BlockEventData, BlockEvents, GlobalSessionsChannel}, tests::{IntoTask, Test, TestRequest}, }; -use crate::RequestNotableCosigns; pub(crate) struct EvaluatorTest { pub(crate) db: MemDb, @@ -95,12 +92,6 @@ impl EvaluatorTest { self.assert_cosigned_blocks_range(start_block, end_block); } - fn assert_task_iteration_failed_at(&mut self, block_number: u64) { - self.assert_no_global_sessions_channel(); - self.assert_has_block_events(); - self.assert_cosigned_blocks_range(block_number - 1, block_number - 1); - } - const GLOBAL_SESSION: [u8; 32] = [1u8; 32]; /// Initializes a global session with the hardcoded test ID and the given start block number. @@ -134,21 +125,25 @@ async fn evaluator_task_returns_false_with_no_block_events() { } #[tokio::test] -async fn evaluator_task_returns_false_with_genesis_block() { +async fn evaluator_task_processes_blocks_with_no_events() { let mut test = EvaluatorTest::default(); + test.init_global_session(0); { let mut txn = test.db.txn(); BlockEvents::send(&mut txn, &BlockEventData { block_number: 0, has_events: HasEvents::No }); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 2, has_events: HasEvents::No }); txn.commit(); } let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, false).await; + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; + test.assert_task_iteration_completed(0, 2); } #[tokio::test] -async fn evaluator_task_processes_blocks_with_no_events() { +async fn evaluator_task_errors_on_notable_events_without_cosign() { let mut test = EvaluatorTest::default(); test.init_global_session(0); @@ -156,17 +151,92 @@ async fn evaluator_task_processes_blocks_with_no_events() { let mut txn = test.db.txn(); BlockEvents::send(&mut txn, &BlockEventData { block_number: 0, has_events: HasEvents::No }); BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 2, has_events: HasEvents::No }); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 2, has_events: HasEvents::Notable }, + ); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 3, has_events: HasEvents::No }); txn.commit(); } let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - test.assert_task_iteration_completed(1, 2); + Test::assert_task_run_and_failed_with(&mut task, "wasn't yet cosigned").await; + // When iteration fails, nothing is committed - block events are consumed but CosignedBlocks is empty + test.assert_no_global_sessions_channel(); + test.assert_has_block_events(); + + { + let mut txn = test.db.txn(); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 2, has_events: HasEvents::Notable }, + ); + txn.commit(); + } + + let mut task: CosignEvaluatorTask = test.into_task().into(); + task.last_request_for_cosigns = Instant::now() - Duration::from_secs(5); + + Test::assert_task_run_and_failed_with(&mut task, "wasn't yet cosigned").await; + test.assert_no_global_sessions_channel(); + test.assert_has_block_events(); +} + +fn signed_cosign( + global_session: [u8; 32], + cosigner: ExternalNetworkId, + block_number: u64, +) -> SignedCosign { + SignedCosign { + cosign: Cosign { global_session, block_number, block_hash: BlockHash([0u8; 32]), cosigner }, + signature: [0u8; 64], + } } #[tokio::test] -async fn evaluator_task_errors_on_notable_events_without_cosign() { +async fn evaluator_task_errors_on_notable_events_without_stakes() { + let mut test = EvaluatorTest::default(); + + let global_session = { + let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + + let mut keys = HashMap::new(); + keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); + + let stakes = HashMap::new(); + + let info = + GlobalSession { start_block_number: 0, sets: vec![set], keys, stakes, total_stake: 1u64 }; + + let mut txn = test.db.txn(); + GlobalSessionsChannel::send(&mut txn, &(EvaluatorTest::GLOBAL_SESSION, info)); + txn.commit(); + + EvaluatorTest::GLOBAL_SESSION + }; + + { + let mut txn = test.db.txn(); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + ExternalNetworkId::Bitcoin, + &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 1), + ); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 1, has_events: HasEvents::Notable }, + ); + txn.commit(); + } + + let mut task = test.into_task(); + Test::assert_task_run_and_failed_with(&mut task, "didn't have its stake").await; +} + +#[tokio::test] +async fn evaluator_task_errors_on_non_notable_events_without_cosign() { let mut test = EvaluatorTest::default(); test.init_global_session(0); @@ -176,7 +246,7 @@ async fn evaluator_task_errors_on_notable_events_without_cosign() { BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); BlockEvents::send( &mut txn, - &BlockEventData { block_number: 2, has_events: HasEvents::Notable }, + &BlockEventData { block_number: 2, has_events: HasEvents::NonNotable }, ); BlockEvents::send(&mut txn, &BlockEventData { block_number: 3, has_events: HasEvents::No }); txn.commit(); @@ -184,14 +254,16 @@ async fn evaluator_task_errors_on_notable_events_without_cosign() { let mut task = test.into_task(); Test::assert_task_run_and_failed_with(&mut task, "wasn't yet cosigned").await; - test.assert_task_iteration_failed_at(2); + // When iteration fails, nothing is committed + test.assert_no_global_sessions_channel(); + test.assert_has_block_events(); { let mut txn = test.db.txn(); BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); BlockEvents::send( &mut txn, - &BlockEventData { block_number: 2, has_events: HasEvents::Notable }, + &BlockEventData { block_number: 2, has_events: HasEvents::NonNotable }, ); txn.commit(); } @@ -200,5 +272,516 @@ async fn evaluator_task_errors_on_notable_events_without_cosign() { task.last_request_for_cosigns = Instant::now() - Duration::from_secs(5); Test::assert_task_run_and_failed_with(&mut task, "wasn't yet cosigned").await; - // test.assert_task_iteration_failed_at(2); +} + +#[tokio::test] +async fn evaluator_task_errors_on_request_notable_cosigns_failure() { + let mut test = EvaluatorTest::default(); + test.init_global_session(0); + + { + let mut txn = test.db.txn(); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 1, has_events: HasEvents::Notable }, + ); + txn.commit(); + } + + let (request, calls) = TestRequest::new(true); + let mut task = CosignEvaluatorTask { + db: test.db.clone(), + request, + last_request_for_cosigns: Instant::now() - REQUEST_COSIGNS_SPACING - Duration::from_secs(5), + }; + + Test::assert_task_run_and_failed_with(&mut task, "RequestError").await; + assert_eq!(calls.load(Ordering::SeqCst), 1, "request_notable_cosigns should have been called"); +} + +#[tokio::test] +async fn evaluator_task_errors_on_request_non_notable_cosigns_failure() { + let mut test = EvaluatorTest::default(); + test.init_global_session(0); + + { + let mut txn = test.db.txn(); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 1, has_events: HasEvents::NonNotable }, + ); + txn.commit(); + } + + let (request, calls) = TestRequest::new(true); + let mut task = CosignEvaluatorTask { + db: test.db.clone(), + request, + last_request_for_cosigns: Instant::now() - REQUEST_COSIGNS_SPACING - Duration::from_secs(5), + }; + + Test::assert_task_run_and_failed_with(&mut task, "RequestError").await; + assert_eq!(calls.load(Ordering::SeqCst), 1, "request_notable_cosigns should have been called"); +} + +#[tokio::test] +async fn evaluator_task_processes_notable_events_when_cosigned() { + let mut test = EvaluatorTest::default(); + let global_session = test.init_global_session(0); + + { + let mut txn = test.db.txn(); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + ExternalNetworkId::Bitcoin, + &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 1), + ); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 1, has_events: HasEvents::Notable }, + ); + txn.commit(); + } + + let mut task = test.into_task(); + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; + + let (block_number, _time) = CosignedBlocks::peek(&test.db).expect("expected cosigned block"); + assert_eq!(block_number, 1); +} + +#[tokio::test] +async fn evaluator_task_non_notable_uses_cached_known_cosign() { + let mut test = EvaluatorTest::default(); + let global_session = test.init_global_session(0); + + { + let mut txn = test.db.txn(); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + ExternalNetworkId::Bitcoin, + &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 10), + ); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 1, has_events: HasEvents::NonNotable }, + ); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 2, has_events: HasEvents::NonNotable }, + ); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 3, has_events: HasEvents::NonNotable }, + ); + txn.commit(); + } + + let mut task = test.into_task(); + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; + + // All three blocks should be marked as cosigned + test.assert_cosigned_blocks_range(1, 3); +} + +#[tokio::test] +async fn evaluator_task_non_notable_with_cosign_returns_some() { + let mut test = EvaluatorTest::default(); + let global_session = test.init_global_session(0); + + { + let mut txn = test.db.txn(); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + ExternalNetworkId::Bitcoin, + &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 5), + ); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 1, has_events: HasEvents::NonNotable }, + ); + txn.commit(); + } + + let mut task = test.into_task(); + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; + + test.assert_cosigned_blocks_range(1, 1); +} + +#[tokio::test] +async fn evaluator_task_non_notable_cosign_too_low_does_not_add_weight() { + let mut test = EvaluatorTest::default(); + let global_session = test.init_global_session(0); + + { + let mut txn = test.db.txn(); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + ExternalNetworkId::Bitcoin, + &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 1), + ); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 5, has_events: HasEvents::NonNotable }, + ); + txn.commit(); + } + + let mut task = test.into_task(); + Test::assert_task_run_and_failed_with(&mut task, "wasn't yet cosigned").await; +} + +#[tokio::test] +async fn evaluator_task_errors_on_non_notable_events_without_stakes() { + let mut test = EvaluatorTest::default(); + + let global_session = { + let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + + let mut keys = HashMap::new(); + keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); + + let stakes = HashMap::new(); + + let info = + GlobalSession { start_block_number: 0, sets: vec![set], keys, stakes, total_stake: 1u64 }; + + let mut txn = test.db.txn(); + GlobalSessionsChannel::send(&mut txn, &(EvaluatorTest::GLOBAL_SESSION, info)); + txn.commit(); + + EvaluatorTest::GLOBAL_SESSION + }; + + { + let mut txn = test.db.txn(); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + ExternalNetworkId::Bitcoin, + &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 5), + ); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 1, has_events: HasEvents::NonNotable }, + ); + txn.commit(); + } + + let mut task = test.into_task(); + Test::assert_task_run_and_failed_with(&mut task, "didn't have its stake").await; +} + +#[tokio::test] +async fn evaluator_task_non_notable_computes_lowest_common_block() { + let mut test = EvaluatorTest::default(); + + let global_session = { + let sets = vec![ + ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }, + ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }, + ]; + + let mut keys = HashMap::new(); + keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); + keys.insert(ExternalNetworkId::Ethereum, Public([2u8; 32])); + + let mut stakes = HashMap::new(); + stakes.insert(ExternalNetworkId::Bitcoin, 50u64); + stakes.insert(ExternalNetworkId::Ethereum, 50u64); + + let info = GlobalSession { start_block_number: 0, sets, keys, stakes, total_stake: 100u64 }; + + let mut txn = test.db.txn(); + GlobalSessionsChannel::send(&mut txn, &(EvaluatorTest::GLOBAL_SESSION, info)); + txn.commit(); + + EvaluatorTest::GLOBAL_SESSION + }; + + { + let mut txn = test.db.txn(); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + ExternalNetworkId::Bitcoin, + &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 10), + ); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + ExternalNetworkId::Ethereum, + &signed_cosign(global_session, ExternalNetworkId::Ethereum, 5), + ); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 1, has_events: HasEvents::NonNotable }, + ); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 2, has_events: HasEvents::NonNotable }, + ); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 3, has_events: HasEvents::NonNotable }, + ); + txn.commit(); + } + + let mut task = test.into_task(); + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; + + test.assert_cosigned_blocks_range(1, 3); +} + +#[tokio::test] +#[should_panic(expected = "candidate's start block number exceeds our block number")] +async fn evaluator_task_panics_when_session_starts_after_block() { + let mut test = EvaluatorTest::default(); + + { + let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + + let mut keys = HashMap::new(); + keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); + + let mut stakes = HashMap::new(); + stakes.insert(ExternalNetworkId::Bitcoin, 1u64); + + let info = + GlobalSession { start_block_number: 10, sets: vec![set], keys, stakes, total_stake: 1u64 }; + + let mut txn = test.db.txn(); + CurrentlyEvaluatedGlobalSession::set(&mut txn, &(EvaluatorTest::GLOBAL_SESSION, info)); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 5, has_events: HasEvents::No }); + txn.commit(); + } + + let mut task = test.into_task(); + let _ = task.run_iteration().await; +} + +#[tokio::test] +#[should_panic(expected = "currently_evaluated_global_session_strict wasn't called incrementally")] +async fn evaluator_task_panics_when_called_non_incrementally() { + let mut test = EvaluatorTest::default(); + + { + let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + let mut keys = HashMap::new(); + keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); + let mut stakes = HashMap::new(); + stakes.insert(ExternalNetworkId::Bitcoin, 1u64); + let info = + GlobalSession { start_block_number: 0, sets: vec![set], keys, stakes, total_stake: 1u64 }; + + let mut txn = test.db.txn(); + GlobalSessionsChannel::send(&mut txn, &([1u8; 32], info)); + txn.commit(); + } + + { + let set = ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }; + let mut keys = HashMap::new(); + keys.insert(ExternalNetworkId::Ethereum, Public([2u8; 32])); + let mut stakes = HashMap::new(); + stakes.insert(ExternalNetworkId::Ethereum, 1u64); + let info = + GlobalSession { start_block_number: 5, sets: vec![set], keys, stakes, total_stake: 1u64 }; + + let mut txn = test.db.txn(); + GlobalSessionsChannel::send(&mut txn, &([2u8; 32], info)); + txn.commit(); + } + + { + let mut txn = test.db.txn(); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 10, has_events: HasEvents::No }); + txn.commit(); + } + + let mut task = test.into_task(); + let _ = task.run_iteration().await; +} + +#[tokio::test] +async fn evaluator_task_advances_global_session_at_start_block() { + let mut test = EvaluatorTest::default(); + + let session1 = [1u8; 32]; + { + let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + let mut keys = HashMap::new(); + keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); + let mut stakes = HashMap::new(); + stakes.insert(ExternalNetworkId::Bitcoin, 1u64); + let info = + GlobalSession { start_block_number: 0, sets: vec![set], keys, stakes, total_stake: 1u64 }; + + let mut txn = test.db.txn(); + GlobalSessionsChannel::send(&mut txn, &(session1, info)); + txn.commit(); + } + + let session2 = [2u8; 32]; + { + let set = ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }; + let mut keys = HashMap::new(); + keys.insert(ExternalNetworkId::Ethereum, Public([2u8; 32])); + let mut stakes = HashMap::new(); + stakes.insert(ExternalNetworkId::Ethereum, 1u64); + let info = + GlobalSession { start_block_number: 3, sets: vec![set], keys, stakes, total_stake: 1u64 }; + + let mut txn = test.db.txn(); + GlobalSessionsChannel::send(&mut txn, &(session2, info)); + txn.commit(); + } + + { + let mut txn = test.db.txn(); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 2, has_events: HasEvents::No }); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 3, has_events: HasEvents::No }); + txn.commit(); + } + + let mut task = test.into_task(); + Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; + + test.assert_cosigned_blocks_range(1, 3); + + let current = + CurrentlyEvaluatedGlobalSession::get(&test.db).expect("should have current session"); + assert_eq!(current.0, session2, "should have transitioned to session 2"); + assert_eq!(current.1.start_block_number, 3, "session 2 should start at block 3"); +} + +#[tokio::test] +#[should_panic(expected = "attempt to add with overflow")] +async fn evaluator_task_errors_on_weight_overflow_notable() { + let mut test = EvaluatorTest::default(); + + let global_session = { + let sets = vec![ + ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }, + ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }, + ]; + + let mut keys = HashMap::new(); + keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); + keys.insert(ExternalNetworkId::Ethereum, Public([2u8; 32])); + + let mut stakes = HashMap::new(); + stakes.insert(ExternalNetworkId::Bitcoin, u64::MAX); + stakes.insert(ExternalNetworkId::Ethereum, 1u64); + + let info = GlobalSession { start_block_number: 0, sets, keys, stakes, total_stake: u64::MAX }; + + let mut txn = test.db.txn(); + GlobalSessionsChannel::send(&mut txn, &(EvaluatorTest::GLOBAL_SESSION, info)); + txn.commit(); + + EvaluatorTest::GLOBAL_SESSION + }; + + { + let mut txn = test.db.txn(); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + ExternalNetworkId::Bitcoin, + &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 1), + ); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + ExternalNetworkId::Ethereum, + &signed_cosign(global_session, ExternalNetworkId::Ethereum, 1), + ); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 1, has_events: HasEvents::Notable }, + ); + txn.commit(); + } + + let mut task = test.into_task(); + Test::assert_task_run_and_failed_with(&mut task, "weight_cosigned overflow").await; +} + +#[tokio::test] +#[should_panic(expected = "attempt to add with overflow")] +async fn evaluator_task_errors_on_weight_overflow_non_notable() { + let mut test = EvaluatorTest::default(); + + let global_session = { + let sets = vec![ + ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }, + ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }, + ]; + + let mut keys = HashMap::new(); + keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); + keys.insert(ExternalNetworkId::Ethereum, Public([2u8; 32])); + + let mut stakes = HashMap::new(); + stakes.insert(ExternalNetworkId::Bitcoin, u64::MAX); + stakes.insert(ExternalNetworkId::Ethereum, 1u64); + + let info = GlobalSession { start_block_number: 0, sets, keys, stakes, total_stake: u64::MAX }; + + let mut txn = test.db.txn(); + GlobalSessionsChannel::send(&mut txn, &(EvaluatorTest::GLOBAL_SESSION, info)); + txn.commit(); + + EvaluatorTest::GLOBAL_SESSION + }; + + { + let mut txn = test.db.txn(); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + ExternalNetworkId::Bitcoin, + &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 5), + ); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + ExternalNetworkId::Ethereum, + &signed_cosign(global_session, ExternalNetworkId::Ethereum, 5), + ); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 1, has_events: HasEvents::NonNotable }, + ); + txn.commit(); + } + + let mut task = test.into_task(); + Test::assert_task_run_and_failed_with(&mut task, "weight_cosigned overflow").await; +} + +#[tokio::test] +async fn evaluator_task_errors_when_no_global_session_in_channel() { + let mut test = EvaluatorTest::default(); + + { + let mut txn = test.db.txn(); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); + txn.commit(); + } + + let mut task = test.into_task(); + Test::assert_task_run_and_failed_with(&mut task, "but none declared in channel yet").await; + + test.assert_no_currently_evaluated_global_session(); + test.assert_no_cosigned_blocks(); + test.assert_no_global_sessions_channel(); } diff --git a/coordinator/cosign/src/tests/intend.rs b/coordinator/cosign/src/tests/intend.rs index 2020d0dcf..50467da1a 100644 --- a/coordinator/cosign/src/tests/intend.rs +++ b/coordinator/cosign/src/tests/intend.rs @@ -6,6 +6,7 @@ use std::{ use blake2::{Blake2b256, Digest}; use serai_db::{Db as _, DbTxn, MemDb}; +use serai_task::ContinuallyRan; use serai_client_serai::{ Events, @@ -789,38 +790,7 @@ async fn intend_task_handles_allocation_events() { } #[tokio::test] -async fn intend_task_handles_allocation_events_overflow() { - let mut test = IntendTest::default(); - - let validator = SeraiAddress([0x01; 32]); - - let block1_hash = test.serai.make_block(1); - let allocations_block1 = [(validator, ExternalNetworkId::Bitcoin, u64::MAX)]; - test.serai.set_events(block1_hash, events_from_allocations(&allocations_block1)); - - // Capture builds_upon after block 1 - let builds_upon_after_block_1 = test.serai.builds_upon.clone(); - - // Block 2: Allocate more u64::MAX amount - should cause overflow error - let block2_hash = test.serai.make_block(2); - test.serai.set_events( - block2_hash, - vec![allocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), u64::MAX)], - ); - - let mut task = test.into_task(); - - Test::assert_task_run_and_failed_with(&mut task, "stake overflow").await; - - test.assert_stakes_is_expected(ExternalNetworkId::Bitcoin, validator, Some(Amount(u64::MAX))); - test.assert_task_iterations_with_no_events_failed_at(2, &builds_upon_after_block_1); - - // Stake still stores the values from block 1, before the failure - let all_allocations: Vec<_> = allocations_block1.iter().copied().collect(); - test.assert_stakes_from_allocations_is_expected(&all_allocations); -} - -#[tokio::test] +#[should_panic(expected = "no prior existing stake")] async fn intend_task_handles_deallocation_without_prior_allocation() { let mut test = IntendTest::default(); @@ -829,15 +799,12 @@ async fn intend_task_handles_deallocation_without_prior_allocation() { let block1_hash = test.serai.make_block(1); test.serai.set_events( block1_hash, - // Deallocate without any prior allocation should error + // Deallocate without any prior allocation should panic vec![deallocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 100)], ); let mut task = test.into_task(); - Test::assert_task_run_and_failed_with(&mut task, "no prior existing stake").await; - - // No stakes should be recorded since the operation failed - test.assert_global_db_is_clear_after_block(1); + task.run_iteration().await.unwrap(); } #[tokio::test] @@ -863,27 +830,6 @@ async fn intend_task_handles_deallocation_event() { test.assert_task_iteration_per_block_with_no_events_ran(1); } -#[tokio::test] -async fn intend_task_handles_deallocation_underflow_error() { - let mut test = IntendTest::default(); - - let validator = SeraiAddress([0x01; 32]); - - let block1_hash = test.serai.make_block(1); - test.serai.set_events( - block1_hash, - vec![ - allocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 50), - deallocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 200), - ], - ); - - let mut task = test.into_task(); - Test::assert_task_run_and_failed_with(&mut task, "stake underflow").await; - - test.assert_global_db_is_clear_after_block(1); -} - #[tokio::test] async fn intend_task_handles_set_decided_event_with_empty_validators() { let mut test = IntendTest::default(); @@ -1071,67 +1017,6 @@ async fn intend_task_handles_set_keys_event_error_if_notable_block_has_no_stake( Test::assert_task_run_and_failed_with(&mut task, "had 0 stake").await; } -#[tokio::test] -async fn intend_task_handles_notable_event_errors_with_total_stake_overflow() { - let mut test = IntendTest::default(); - - let validator1 = SeraiAddress([0x01; 32]); - let validator2 = SeraiAddress([0x02; 32]); - - let set0_btc = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - let vset0_btc = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; - let set0_eth = ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }; - let vset0_eth = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Ethereum), session: Session(0) }; - - // Block 1: Allocate near-max stake to validator1 on Bitcoin - let block1_hash = test.serai.make_block(1); - test.serai.set_events( - block1_hash, - vec![ - allocation_event( - validator1, - NetworkId::External(ExternalNetworkId::Bitcoin), - u64::MAX - 1000, - ), - set_decided_event(vset0_btc, vec![(validator1, KeyShares::ONE)]), - set_keys_event(set0_btc), - ], - ); - - // Capture builds_upon after block 1 - let builds_upon_after_block_1 = test.serai.builds_upon.clone(); - - // Block 2: Allocate more stake on Ethereum - this should cause total_stake overflow - let block2_hash = test.serai.make_block(2); - test.serai.set_events( - block2_hash, - vec![ - allocation_event(validator2, NetworkId::External(ExternalNetworkId::Ethereum), 2000), - set_decided_event(vset0_eth, vec![(validator2, KeyShares::ONE)]), - set_keys_event(set0_eth), - ], - ); - - let mut task = test.into_task(); - - // Run should fail on block 2 due to total_stake overflow (after successfully processing block 1) - Test::assert_task_run_and_failed_with(&mut task, "total stake overflow").await; - - // Verify block 1 was processed successfully before the error on block 2 - test.assert_stakes_is_expected( - ExternalNetworkId::Bitcoin, - validator1, - Some(Amount(u64::MAX - 1000)), - ); - test.assert_latest_set_is_expected( - ExternalNetworkId::Bitcoin, - Some(&Set { session: Session(0), key: Public([0xff; 32]), stake: Amount(u64::MAX - 1000) }), - ); - test.assert_task_iterations_with_no_events_failed_at(2, &builds_upon_after_block_1); -} - #[tokio::test] async fn intend_task_handles_burn_with_instruction_events() { let mut test = IntendTest::default(); diff --git a/coordinator/cosign/src/tests/mod.rs b/coordinator/cosign/src/tests/mod.rs index b198fd635..da257d3cd 100644 --- a/coordinator/cosign/src/tests/mod.rs +++ b/coordinator/cosign/src/tests/mod.rs @@ -10,9 +10,6 @@ mod delay; #[cfg(test)] mod cosigning; -#[cfg(test)] -mod types; - use std::{ sync::{ Arc, @@ -20,11 +17,92 @@ use std::{ }, }; -use serai_cosign_types::{COSIGN_CONTEXT, Cosign, SignedCosign}; -use serai_task::ContinuallyRan; +use serai_task::{ContinuallyRan, Task, TaskHandle}; use crate::RequestNotableCosigns; +/// Waits until a condition is met, with a timeout. +/// +/// Polls the condition at `interval` and panics if `timeout` is exceeded. +/// +/// # Examples +/// ```ignore +/// // Simple condition (no value printed on timeout) +/// wait_until!(some_condition()); +/// +/// // With comparison - prints actual value on timeout +/// wait_until!(LatestCosignedBlockNumber::get(&db) => Some(3)); +/// +/// // With custom timeout +/// wait_until!(value_expr => expected, Duration::from_secs(30)); +/// ``` +#[allow(unused_macro_rules)] +macro_rules! wait_until { + // Simple condition without value printing + ($condition:expr) => { + wait_until!(@simple $condition, Duration::from_secs(60), Duration::from_millis(10)) + }; + ($condition:expr, $timeout:expr) => { + wait_until!(@simple $condition, $timeout, Duration::from_millis(10)) + }; + ($condition:expr, $timeout:expr, $interval:expr) => { + wait_until!(@simple $condition, $timeout, $interval) + }; + // Comparison form: wait_until!(actual_expr => expected_value) + // Prints actual value on timeout + ($actual:expr => $expected:expr) => { + wait_until!(@compare $actual, $expected, Duration::from_secs(60), Duration::from_millis(10)) + }; + ($actual:expr => $expected:expr, $timeout:expr) => { + wait_until!(@compare $actual, $expected, $timeout, Duration::from_millis(10)) + }; + ($actual:expr => $expected:expr, $timeout:expr, $interval:expr) => { + wait_until!(@compare $actual, $expected, $timeout, $interval) + }; + // Internal: simple condition + (@simple $condition:expr, $timeout:expr, $interval:expr) => { + tokio::select! { + _ = async { + loop { + if $condition { + break; + } + tokio::time::sleep($interval).await; + } + } => {} + _ = tokio::time::sleep($timeout) => { + panic!("timeout waiting for condition: {}", stringify!($condition)); + } + } + }; + // Internal: comparison with value printing + (@compare $actual:expr, $expected:expr, $timeout:expr, $interval:expr) => {{ + let expected = $expected; + let mut last_actual = None; + tokio::select! { + _ = async { + loop { + let actual = $actual; + if actual == expected { + break; + } + last_actual = Some(actual); + tokio::time::sleep($interval).await; + } + } => {} + _ = tokio::time::sleep($timeout) => { + panic!( + "timeout waiting for {} to equal {:?}, last value was {:?}", + stringify!($actual), + expected, + last_actual + ); + } + } + }}; +} +pub(crate) use wait_until; + pub(crate) struct Test; impl Test { pub(crate) async fn assert_task_run_iteration_and_check_progress( @@ -39,6 +117,19 @@ impl Test { let err_str = format!("{err:?}"); assert!(err_str.contains(error), "{err_str}"); } + + /// Spawns a task to run continuously in the background, returning its handle. + /// + /// This allows testing a task while it runs as expected (with the full `continually_run` + /// loop including delays and error handling). Drop the returned `TaskHandle` to stop the task. + pub fn spawn_task_continually_running( + task_runner: T, + dependents: Vec, + ) -> TaskHandle { + let (task, task_handle) = Task::new(); + tokio::spawn(task_runner.continually_run(task, dependents)); + task_handle + } } pub(crate) trait IntoTask { @@ -81,16 +172,3 @@ impl RequestNotableCosigns for TestRequest { } } } - -pub(crate) fn sr25519_fixture() -> schnorrkel::Keypair { - schnorrkel::MiniSecretKey::from_bytes(&[0xff; 32]) - .expect("fixed seed should be valid") - .expand_to_keypair(schnorrkel::ExpansionMode::Ed25519) -} - -pub(crate) fn sign_cosign(cosign: Cosign, keypair: &schnorrkel::Keypair) -> SignedCosign { - SignedCosign { - cosign: cosign.clone(), - signature: keypair.sign_simple(COSIGN_CONTEXT, &cosign.signature_message()).to_bytes(), - } -} diff --git a/coordinator/cosign/types/Cargo.toml b/coordinator/cosign/types/Cargo.toml index d0e51f12f..1285c9c48 100644 --- a/coordinator/cosign/types/Cargo.toml +++ b/coordinator/cosign/types/Cargo.toml @@ -17,6 +17,10 @@ rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true +[features] +default = [] +test-helpers = [] + [dependencies] schnorrkel = { version = "0.11", default-features = false, features = ["std"] } diff --git a/coordinator/cosign/types/src/lib.rs b/coordinator/cosign/types/src/lib.rs index 55c02903d..01499eba3 100644 --- a/coordinator/cosign/types/src/lib.rs +++ b/coordinator/cosign/types/src/lib.rs @@ -5,7 +5,7 @@ use borsh::{BorshSerialize, BorshDeserialize}; use serai_primitives::{BlockHash, crypto::Public, network_id::ExternalNetworkId}; -#[cfg(test)] +#[cfg(any(test, feature = "test-helpers"))] /// Test helpers and fixtures. pub mod tests; diff --git a/coordinator/cosign/types/src/tests/mod.rs b/coordinator/cosign/types/src/tests/mod.rs index aa01dcc9c..8a34de432 100644 --- a/coordinator/cosign/types/src/tests/mod.rs +++ b/coordinator/cosign/types/src/tests/mod.rs @@ -1,18 +1,47 @@ -use crate::{BlockHash, COSIGN_CONTEXT, Cosign, CosignIntent, ExternalNetworkId, Public, SignedCosign}; +use crate::{COSIGN_CONTEXT, Cosign, SignedCosign}; -pub(crate) fn sr25519_fixture() -> schnorrkel::Keypair { +#[cfg(test)] +use crate::{BlockHash, CosignIntent, ExternalNetworkId, Public}; + +fn sr25519_fixture() -> schnorrkel::Keypair { schnorrkel::MiniSecretKey::from_bytes(&[0xff; 32]) .expect("fixed seed should be valid") .expand_to_keypair(schnorrkel::ExpansionMode::Ed25519) } -pub(crate) fn sign_cosign(cosign: Cosign, keypair: &schnorrkel::Keypair) -> SignedCosign { +fn sr25519_fixture_from_seed(seed: [u8; 32]) -> schnorrkel::Keypair { + schnorrkel::MiniSecretKey::from_bytes(&seed) + .expect("seed should be valid") + .expand_to_keypair(schnorrkel::ExpansionMode::Ed25519) +} + +fn sign_cosign(cosign: Cosign, keypair: &schnorrkel::Keypair) -> SignedCosign { SignedCosign { cosign: cosign.clone(), signature: keypair.sign_simple(COSIGN_CONTEXT, &cosign.signature_message()).to_bytes(), } } +/// Returns the public key bytes from the test fixture keypair (seed [0xff; 32]) +pub fn fixture_public_key() -> [u8; 32] { + sr25519_fixture().public.to_bytes() +} + +/// Returns the public key bytes for a keypair with the given seed +pub fn public_key_from_seed(seed: [u8; 32]) -> [u8; 32] { + sr25519_fixture_from_seed(seed).public.to_bytes() +} + +/// Creates a SignedCosign using the test fixture keypair (seed [0xff; 32]) +pub fn sign_cosign_with_fixture(cosign: Cosign) -> SignedCosign { + sign_cosign(cosign, &sr25519_fixture()) +} + +/// Creates a SignedCosign using a keypair derived from the given seed +pub fn sign_cosign_with_seed(cosign: Cosign, seed: [u8; 32]) -> SignedCosign { + sign_cosign(cosign, &sr25519_fixture_from_seed(seed)) +} + #[test] fn cosign_intent_to_cosign() { let intent = CosignIntent { diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 98e454e3b..5bb171a4a 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -30,7 +30,7 @@ use message_queue::{Service, client::MessageQueue}; use serai_task::{Task, TaskHandle, ContinuallyRan as _}; -use serai_cosign::{Faulted, SignedCosign, Cosigning}; +use serai_cosign::{COSIGN_LOOP_INTERVAL, Faulted, SignedCosign, Cosigning}; use serai_coordinator_substrate::{ CanonicalEventStream, EphemeralEventStream, SignSlashReport, SetKeysTask, SignedBatches, PublishBatchTask, SlashReports, PublishSlashReportTask, @@ -88,8 +88,6 @@ fn spawn_cosigning( ) { let mut cosigning = Cosigning::spawn(db.clone(), serai, p2p.clone(), tasks_to_run_upon_cosigning); tokio::spawn(async move { - const COSIGN_LOOP_INTERVAL: Duration = Duration::from_secs(5); - let last_cosign_rebroadcast = Instant::now(); loop { // Intake our own cosigns @@ -133,8 +131,8 @@ fn spawn_cosigning( } } - let time_till_cosign_rebroadcast = (last_cosign_rebroadcast + - serai_cosign::BROADCAST_FREQUENCY) + let time_till_cosign_rebroadcast = (last_cosign_rebroadcast + + serai_cosign::BROADCAST_FREQUENCY) .saturating_duration_since(Instant::now()); tokio::select! { () = tokio::time::sleep(time_till_cosign_rebroadcast) => { @@ -381,8 +379,8 @@ async fn main() { // Remove retired Tributaries from ActiveTributaries let mut active_tributaries = ActiveTributaries::get(&txn).unwrap_or(vec![]); active_tributaries.retain(|tributary| { - RetiredTributary::get(&txn, tributary.set.network).map(|session| session.0) < - Some(tributary.set.session.0) + RetiredTributary::get(&txn, tributary.set.network).map(|session| session.0) + < Some(tributary.set.session.0) }); ActiveTributaries::set(&mut txn, &active_tributaries); @@ -407,8 +405,8 @@ async fn main() { let mut key_bytes = serai_key.to_bytes(); // Schnorrkel SecretKey is the key followed by 32 bytes of entropy for nonces let mut expanded_key = Zeroizing::new([0; 64]); - expanded_key.as_mut_slice()[.. 32].copy_from_slice(&key_bytes); - OsRng.fill_bytes(&mut expanded_key.as_mut_slice()[32 ..]); + expanded_key.as_mut_slice()[..32].copy_from_slice(&key_bytes); + OsRng.fill_bytes(&mut expanded_key.as_mut_slice()[32..]); key_bytes.zeroize(); Zeroizing::new( schnorrkel::SecretKey::from_bytes(expanded_key.as_slice()).unwrap().to_keypair(), From 490647ee4b4e25794254919ffc147bdcb23579b1 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Tue, 6 Jan 2026 15:24:30 -0300 Subject: [PATCH 15/71] chore(cosign): merge issues --- Cargo.lock | 32 +++++++++++++++++------ coordinator/cosign/src/tests/cosigning.rs | 17 +++++------- coordinator/cosign/src/tests/intend.rs | 8 +++--- 3 files changed, 34 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bedded819..831b28bc1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2627,6 +2627,15 @@ dependencies = [ "syn 2.0.113", ] +[[package]] +name = "env_filter" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bf3c259d255ca70051b30e2e95b5446cdb8949ac4cd22c0d7fd634d89f568e2" +dependencies = [ + "log", +] + [[package]] name = "env_logger" version = "0.10.2" @@ -2637,6 +2646,16 @@ dependencies = [ "log", ] +[[package]] +name = "env_logger" +version = "0.11.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c863f0904021b108aa8b2f55046443e6b1ebde8fd4a15c399893aae4fa069f" +dependencies = [ + "env_filter", + "log", +] + [[package]] name = "environmental" version = "1.1.4" @@ -8097,7 +8116,7 @@ dependencies = [ "ciphersuite 0.4.2", "dalek-ff-group 0.5.0", "dkg-musig", - "env_logger", + "env_logger 0.10.2", "frost-schnorrkel", "hex", "log", @@ -8216,14 +8235,11 @@ version = "0.1.0" dependencies = [ "blake2 0.11.0-rc.3", "borsh", - "k256", + "env_logger 0.11.8", "log", - "rand_core 0.6.4", - "schnorrkel", "serai-client-serai", "serai-cosign-types", "serai-db", - "serai-substrate-tests", "serai-task", "tokio", ] @@ -8343,7 +8359,7 @@ dependencies = [ name = "serai-ethereum-relayer" version = "0.1.0" dependencies = [ - "env_logger", + "env_logger 0.10.2", "log", "serai-db", "serai-env", @@ -8403,7 +8419,7 @@ dependencies = [ "borsh", "ciphersuite 0.4.2", "dalek-ff-group 0.5.0", - "env_logger", + "env_logger 0.10.2", "flexible-transcript", "hex", "log", @@ -8592,7 +8608,7 @@ dependencies = [ "borsh", "ciphersuite 0.4.2", "dkg-evrf", - "env_logger", + "env_logger 0.10.2", "hex", "log", "serai-cosign-types", diff --git a/coordinator/cosign/src/tests/cosigning.rs b/coordinator/cosign/src/tests/cosigning.rs index b3675299d..d4b353a43 100644 --- a/coordinator/cosign/src/tests/cosigning.rs +++ b/coordinator/cosign/src/tests/cosigning.rs @@ -1,7 +1,4 @@ -use std::{ - collections::HashMap, - time::{Duration, Instant}, -}; +use std::{collections::HashMap, time::Duration}; use borsh::{BorshDeserialize, BorshSerialize}; @@ -17,17 +14,15 @@ use serai_client_serai::abi::primitives::{ }; use crate::{ - BROADCAST_FREQUENCY, Cosign, CosignIntent, Cosigning, Faulted, FaultedSession, Faults, - GlobalSession, GlobalSessions, GlobalSessionsLastBlock, IntakeCosignError, - NetworksLatestCosignedBlock, SeraiRpc, SignedCosign, SubstrateBlockHash, - delay::{ACKNOWLEDGEMENT_DELAY, LatestCosignedBlockNumber}, + Cosign, CosignIntent, Cosigning, Faulted, FaultedSession, Faults, GlobalSession, GlobalSessions, + GlobalSessionsLastBlock, IntakeCosignError, NetworksLatestCosignedBlock, SignedCosign, + SubstrateBlockHash, + delay::LatestCosignedBlockNumber, evaluator::CurrentlyEvaluatedGlobalSession, - intend::GlobalSessionsChannel, + intend::IntendedCosigns, tests::{TestRequest, intend::Serai}, }; -use crate::intend::IntendedCosigns; - use serai_cosign_types::tests::{ fixture_public_key, public_key_from_seed, sign_cosign_with_fixture, sign_cosign_with_seed, }; diff --git a/coordinator/cosign/src/tests/intend.rs b/coordinator/cosign/src/tests/intend.rs index 50467da1a..8599af4a4 100644 --- a/coordinator/cosign/src/tests/intend.rs +++ b/coordinator/cosign/src/tests/intend.rs @@ -11,7 +11,7 @@ use serai_task::ContinuallyRan; use serai_client_serai::{ Events, abi::{ - Block, Event, Header, HeaderV1, BLOCK_HEADER_BRANCH_TAG, BLOCK_HEADER_LEAF_TAG, coins, + Block, Event, Header, HeaderV1, BLOCK_BRANCH_TAG, BLOCK_LEAF_TAG, coins, primitives::{ BlockHash, address::{ExternalAddress, SeraiAddress}, @@ -132,7 +132,7 @@ impl Serai { let block = Block { header: Header::V1(HeaderV1 { number, - builds_upon: self.builds_upon.clone().calculate(BLOCK_HEADER_BRANCH_TAG), + builds_upon: self.builds_upon.clone().calculate(BLOCK_BRANCH_TAG), unix_time_in_millis: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis() as u64, transactions_commitment: UnbalancedMerkleTree::EMPTY, @@ -145,8 +145,8 @@ impl Serai { let block_hash = block.header.hash(); self.builds_upon.append( - BLOCK_HEADER_BRANCH_TAG, - Blake2b256::new_with_prefix([BLOCK_HEADER_LEAF_TAG]) + BLOCK_BRANCH_TAG, + Blake2b256::new_with_prefix([BLOCK_LEAF_TAG]) .chain_update(block_hash.0) .finalize() .into(), From 4a1cf91fe33f7e1421353faaea4e8b916f8a3a1c Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Tue, 6 Jan 2026 15:26:53 -0300 Subject: [PATCH 16/71] misc --- coordinator/cosign/types/src/tests/mod.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/coordinator/cosign/types/src/tests/mod.rs b/coordinator/cosign/types/src/tests/mod.rs index 8a34de432..9fac1ea9b 100644 --- a/coordinator/cosign/types/src/tests/mod.rs +++ b/coordinator/cosign/types/src/tests/mod.rs @@ -22,22 +22,18 @@ fn sign_cosign(cosign: Cosign, keypair: &schnorrkel::Keypair) -> SignedCosign { } } -/// Returns the public key bytes from the test fixture keypair (seed [0xff; 32]) pub fn fixture_public_key() -> [u8; 32] { sr25519_fixture().public.to_bytes() } -/// Returns the public key bytes for a keypair with the given seed pub fn public_key_from_seed(seed: [u8; 32]) -> [u8; 32] { sr25519_fixture_from_seed(seed).public.to_bytes() } -/// Creates a SignedCosign using the test fixture keypair (seed [0xff; 32]) pub fn sign_cosign_with_fixture(cosign: Cosign) -> SignedCosign { sign_cosign(cosign, &sr25519_fixture()) } -/// Creates a SignedCosign using a keypair derived from the given seed pub fn sign_cosign_with_seed(cosign: Cosign, seed: [u8; 32]) -> SignedCosign { sign_cosign(cosign, &sr25519_fixture_from_seed(seed)) } From 52158abc16f502fca9386f7d2ff2316ce7cab3ee Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Tue, 6 Jan 2026 15:27:52 -0300 Subject: [PATCH 17/71] Revert "misc" This reverts commit 4a1cf91fe33f7e1421353faaea4e8b916f8a3a1c. --- coordinator/cosign/types/src/tests/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/coordinator/cosign/types/src/tests/mod.rs b/coordinator/cosign/types/src/tests/mod.rs index 9fac1ea9b..8a34de432 100644 --- a/coordinator/cosign/types/src/tests/mod.rs +++ b/coordinator/cosign/types/src/tests/mod.rs @@ -22,18 +22,22 @@ fn sign_cosign(cosign: Cosign, keypair: &schnorrkel::Keypair) -> SignedCosign { } } +/// Returns the public key bytes from the test fixture keypair (seed [0xff; 32]) pub fn fixture_public_key() -> [u8; 32] { sr25519_fixture().public.to_bytes() } +/// Returns the public key bytes for a keypair with the given seed pub fn public_key_from_seed(seed: [u8; 32]) -> [u8; 32] { sr25519_fixture_from_seed(seed).public.to_bytes() } +/// Creates a SignedCosign using the test fixture keypair (seed [0xff; 32]) pub fn sign_cosign_with_fixture(cosign: Cosign) -> SignedCosign { sign_cosign(cosign, &sr25519_fixture()) } +/// Creates a SignedCosign using a keypair derived from the given seed pub fn sign_cosign_with_seed(cosign: Cosign, seed: [u8; 32]) -> SignedCosign { sign_cosign(cosign, &sr25519_fixture_from_seed(seed)) } From 3fdfa445499880ac8742268472a31576bedda7ec Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Tue, 6 Jan 2026 15:32:31 -0300 Subject: [PATCH 18/71] misc --- coordinator/cosign/src/delay.rs | 1 - coordinator/cosign/src/evaluator.rs | 3 --- coordinator/cosign/src/lib.rs | 6 ++---- 3 files changed, 2 insertions(+), 8 deletions(-) diff --git a/coordinator/cosign/src/delay.rs b/coordinator/cosign/src/delay.rs index c658eba9b..f07a45c14 100644 --- a/coordinator/cosign/src/delay.rs +++ b/coordinator/cosign/src/delay.rs @@ -46,7 +46,6 @@ impl ContinuallyRan for CosignDelayTask { // Peek the next block to mark as cosigned, without consuming yet let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else { - // Queue was empty -> nothing to commit, txn gets dropped break; }; diff --git a/coordinator/cosign/src/evaluator.rs b/coordinator/cosign/src/evaluator.rs index 4e1b14f8d..2ba3ac094 100644 --- a/coordinator/cosign/src/evaluator.rs +++ b/coordinator/cosign/src/evaluator.rs @@ -151,7 +151,6 @@ impl ContinuallyRan for CosignEvaluatorTask ContinuallyRan for CosignEvaluatorTask = None; - for set in global_session_info.sets { // Check if this set cosigned this block or not let Some(cosign) = @@ -191,7 +189,6 @@ impl ContinuallyRan for CosignEvaluatorTask= block_number { weight_cosigned += global_session_info.stakes.get(&set.network).ok_or_else(|| { diff --git a/coordinator/cosign/src/lib.rs b/coordinator/cosign/src/lib.rs index 88326b498..6933e1d19 100644 --- a/coordinator/cosign/src/lib.rs +++ b/coordinator/cosign/src/lib.rs @@ -231,8 +231,6 @@ impl IntakeCosignError { /// The interface to manage cosigning with. pub struct Cosigning { db: D, - // The task system stops a task once all its handles are dropped. Keep these alive for as long as - // this cosigning service should run. _task_handles: Vec, } impl Cosigning { @@ -353,7 +351,7 @@ impl Cosigning { // Check our indexed blockchain includes a block with this block number let Some(our_block_hash) = SubstrateBlockHash::get(&self.db, cosign.block_number) else { - return Err(IntakeCosignError::NotYetIndexedBlock)?; + Err(IntakeCosignError::NotYetIndexedBlock)? }; let faulty = cosign.block_hash != our_block_hash; @@ -369,7 +367,7 @@ impl Cosigning { } let Some(global_session) = GlobalSessions::get(&self.db, cosign.global_session) else { - return Err(IntakeCosignError::UnrecognizedGlobalSession)?; + Err(IntakeCosignError::UnrecognizedGlobalSession)? }; // Check the cosigned block number is in range to the global session From d25dbbce253c39bc8267f74d9e0809899a8f72ad Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Tue, 6 Jan 2026 15:36:12 -0300 Subject: [PATCH 19/71] misc2 --- coordinator/cosign/src/evaluator.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/coordinator/cosign/src/evaluator.rs b/coordinator/cosign/src/evaluator.rs index 2ba3ac094..a7998ebfd 100644 --- a/coordinator/cosign/src/evaluator.rs +++ b/coordinator/cosign/src/evaluator.rs @@ -250,7 +250,6 @@ impl ContinuallyRan for CosignEvaluatorTask Date: Fri, 20 Feb 2026 18:10:12 -0300 Subject: [PATCH 20/71] feat: adding rpc harness --- coordinator/cosign/Cargo.toml | 5 +- coordinator/cosign/src/delay.rs | 43 +- coordinator/cosign/src/evaluator.rs | 279 ++-- coordinator/cosign/src/intend.rs | 305 +++-- coordinator/cosign/src/lib.rs | 83 +- coordinator/cosign/src/tests/cosigning.rs | 42 +- coordinator/cosign/src/tests/delay.rs | 10 +- coordinator/cosign/src/tests/evaluator.rs | 60 +- coordinator/cosign/src/tests/intend.rs | 1310 +++---------------- coordinator/cosign/src/tests/mod.rs | 39 +- substrate/abi/src/modules/validator_sets.rs | 2 - substrate/client/serai/src/lib.rs | 20 +- substrate/primitives/src/lib.rs | 2 +- 13 files changed, 675 insertions(+), 1525 deletions(-) diff --git a/coordinator/cosign/Cargo.toml b/coordinator/cosign/Cargo.toml index 86d727738..2caf5ebc0 100644 --- a/coordinator/cosign/Cargo.toml +++ b/coordinator/cosign/Cargo.toml @@ -32,6 +32,9 @@ serai-task = { path = "../../common/task", version = "0.1" } serai-cosign-types = { path = "./types" } +[features] +dev = [] + [dev-dependencies] -env_logger = { version = "0.11", default-features = false } +env_logger = { version = "0.10", default-features = false } serai-cosign-types = { path = "./types", features = ["test-helpers"] } diff --git a/coordinator/cosign/src/delay.rs b/coordinator/cosign/src/delay.rs index f07a45c14..aeb4c299b 100644 --- a/coordinator/cosign/src/delay.rs +++ b/coordinator/cosign/src/delay.rs @@ -4,23 +4,25 @@ use std::time::{Duration, SystemTime}; use serai_db::*; use serai_task::{DoesNotError, ContinuallyRan}; -use crate::evaluator::CosignedBlocks; +use crate::{evaluator::CosignedBlocks, latest_cosigned_block_number}; +#[cfg(not(any(test, feature = "dev")))] /// How often callers should broadcast the cosigns flagged for rebroadcasting. -#[cfg(not(test))] pub const BROADCAST_FREQUENCY: Duration = Duration::from_secs(60); -#[cfg(not(test))] -const SYNCHRONY_EXPECTATION: Duration = Duration::from_secs(10); +#[cfg(any(test, feature = "dev"))] /// How often callers should broadcast the cosigns flagged for rebroadcasting. -#[cfg(test)] pub const BROADCAST_FREQUENCY: Duration = Duration::from_secs(6); -#[cfg(test)] + +#[cfg(not(any(test, feature = "dev")))] +const SYNCHRONY_EXPECTATION: Duration = Duration::from_secs(10); +#[cfg(any(test, feature = "dev"))] const SYNCHRONY_EXPECTATION: Duration = Duration::from_secs(1); + pub(crate) const ACKNOWLEDGEMENT_DELAY: Duration = Duration::from_secs(BROADCAST_FREQUENCY.as_secs() + SYNCHRONY_EXPECTATION.as_secs()); pub(crate) fn now_timestamp() -> Duration { - SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap_or(Duration::ZERO) + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).expect("error getting current timestamp") } create_db!( @@ -41,6 +43,7 @@ impl ContinuallyRan for CosignDelayTask { fn run_iteration(&mut self) -> impl Send + Future> { async move { let mut made_progress = false; + loop { let mut txn = self.db.txn(); @@ -49,29 +52,35 @@ impl ContinuallyRan for CosignDelayTask { break; }; - // If we've already acknowledged a later block, consume and skip (don't wait). - let already_cosigned = LatestCosignedBlockNumber::get(&txn).unwrap_or(0); - if block_number <= already_cosigned { - // Clear block from queue + let latest_cosigned_block_number = LatestCosignedBlockNumber::get(getter).unwrap_or(0); + + #[cfg(not(coverage))] + log::debug!( + "beginning delay: block_number={block_number}, time_evaluated={time_evaluated}, latest_cosigned_block_number={latest_cosigned_block_number}", + ); + + if block_number <= latest_cosigned_block_number { + // If we've already acknowledged a later block, consume and skip (don't sleep). txn.commit(); continue; } // Calculate when we should mark it as valid - let time_valid = Duration::from_secs(time_evaluated) + ACKNOWLEDGEMENT_DELAY; - let now = now_timestamp(); + let now_timestamp = now_timestamp().as_secs(); + let time_valid_timestamp = time_evaluated + ACKNOWLEDGEMENT_DELAY.as_secs(); // drop txn during sleep drop(txn); - if time_valid > now { + if time_valid_timestamp > now_timestamp { // Sleep until then - let time_left = time_valid - now; - tokio::time::sleep(time_left).await; + let time_left = time_valid_timestamp - now_timestamp; + tokio::time::sleep(Duration::from_secs(time_left)).await; } let mut txn = self.db.txn(); - let _consumed_block = CosignedBlocks::try_recv(&mut txn); + // Consume block to continue + CosignedBlocks::try_recv(&mut txn); // Set the cosigned block LatestCosignedBlockNumber::set(&mut txn, &block_number); txn.commit(); diff --git a/coordinator/cosign/src/evaluator.rs b/coordinator/cosign/src/evaluator.rs index a7998ebfd..4f0c080a2 100644 --- a/coordinator/cosign/src/evaluator.rs +++ b/coordinator/cosign/src/evaluator.rs @@ -1,17 +1,19 @@ use core::future::Future; -use std::time::{Duration, Instant, SystemTime}; +use std::time::{Duration, Instant}; +use serai_client_serai::abi::primitives::network_id::ExternalNetworkId; use serai_db::*; use serai_task::ContinuallyRan; use crate::{ - HasEvents, GlobalSession, NetworksLatestCosignedBlock, RequestNotableCosigns, - intend::{GlobalSessionsChannel, BlockEventData, BlockEvents}, + GlobalSession, HasEvents, NetworksLatestCosignedBlock, RequestNotableCosigns, + delay::now_timestamp, + intend::{BlockEventData, BlockEvents, GlobalSessionsChannel}, }; -#[cfg(not(test))] +#[cfg(not(any(test, feature = "dev")))] pub(crate) const REQUEST_COSIGNS_SPACING: Duration = Duration::from_secs(60); -#[cfg(test)] +#[cfg(any(test, feature = "dev"))] pub(crate) const REQUEST_COSIGNS_SPACING: Duration = Duration::from_secs(6); const COSIGN_COMMIT_THRESHOLD: u64 = 83; @@ -30,15 +32,15 @@ db_channel!( } ); -// This is a strict function which won't panic, even with a malicious Serai node, so long as: -// - It's called incrementally (with an increment of 1) -// - It's only called for block numbers we've completed indexing on within the intend task -// - It's only called for block numbers after a global session has started -// - The global sessions channel is populated as the block declaring the session is indexed -// Which all hold true within the context of this task and the intend task. -// -// This function will also ensure the currently evaluated global session is incremented once we -// finish evaluation of the prior session. +/// This is a strict function which won't panic, even with a malicious Serai node, so long as: +/// - It's called incrementally (with an increment of 1) +/// - It's only called for block numbers we've completed indexing on within the intend task +/// - It's only called for block numbers after a global session has started +/// - The global sessions channel is populated as the block declaring the session is indexed +/// Which all hold true within the context of this task and the intend task. +/// +/// This function will also ensure the currently evaluated global session is incremented once we +/// finish evaluation of the prior session. fn currently_evaluated_global_session_strict( txn: &mut impl DbTxn, block_number: u64, @@ -47,18 +49,16 @@ fn currently_evaluated_global_session_strict( let existing = match CurrentlyEvaluatedGlobalSession::get(txn) { Some(existing) => existing, None => { - let first = GlobalSessionsChannel::try_recv(txn).ok_or_else(|| { - format!( - "fetching global session for block #{block_number} but none declared in channel yet" - ) - })?; + let first = GlobalSessionsChannel::try_recv(txn) + .expect("fetching latest global session yet none declared"); CurrentlyEvaluatedGlobalSession::set(txn, &first); first } }; assert!( existing.1.start_block_number <= block_number, - "candidate's start block number exceeds our block number" + "candidate's start block number {:#?} exceeds our block number {block_number}", + existing.1.start_block_number ); existing }; @@ -93,11 +93,99 @@ fn should_request_cosigns(last_request_for_cosigns: &mut Instant) -> bool { true } -// Calculate the minimum threshold required for cosigning +//// Calculate the minimum threshold required for cosigning fn cosign_threshold(total_stake: u64) -> u64 { ((total_stake * COSIGN_COMMIT_THRESHOLD) / 100) + 1 } +/// Evaluate non-notable cosigns, returning (weight_cosigned, lowest_common_block). +fn evaluate_non_notable_cosigns( + getter: &impl Get, + block_number: u64, + global_session: [u8; 32], + global_session_info: &GlobalSession, +) -> Result<(u64, Option), String> { + /* + LatestCosign is populated with the latest cosigns for each network which don't + exceed the latest global session we've evaluated the start of. This current block + is during the latest global session we've evaluated the start of. + */ + + let mut weight_cosigned = 0; + let mut lowest_common_block: Option = None; + + for set in &global_session_info.sets { + // Check if this set cosigned this block or not + let Some(signed_cosign) = NetworksLatestCosignedBlock::get(getter, global_session, set.network) + else { + continue; + }; + + if signed_cosign.cosign.block_number >= block_number { + weight_cosigned += global_session_info + .stakes + .get(&set.network) + .ok_or_else(|| "ValidatorSet in global session yet didn't have its stake".to_owned())?; + } + + // Update the lowest block common to all of these cosigns + lowest_common_block = lowest_common_block + .map(|existing| existing.min(signed_cosign.cosign.block_number)) + .or(Some(signed_cosign.cosign.block_number)); + } + + Ok((weight_cosigned, lowest_common_block)) +} + +fn commit_cosigned_block( + mut txn: impl DbTxn, + block_number: u64, + label: &str, +) -> Result<(), String> { + CosignedBlocks::send(&mut txn, &(block_number, now_timestamp().as_secs())); + txn.commit(); + + #[cfg(not(coverage))] + if (block_number % 500) == 0 { + log::debug!("marking {label} #{block_number} as cosigned"); + } + + Ok(()) +} + +/// If the cosign threshold isn't met, request cosigns and return an error. +async fn ensure_cosigned( + weight_cosigned: u64, + total_stake: u64, + block_number: u64, + global_session: [u8; 32], + last_request_for_cosigns: &mut Instant, + request: &(impl RequestNotableCosigns + Sync), + label: &str, +) -> Result<(), String> { + if weight_cosigned >= cosign_threshold(total_stake) { + return Ok(()); + } + + if should_request_cosigns(last_request_for_cosigns) { + request + .request_notable_cosigns(global_session) + .await + .map_err(|e| format!("RPC error fetching notable cosigns: {e:?}"))?; + } + + Err(format!("{label} block (#{block_number}) wasn't yet cosigned. this should resolve shortly")) +} + +fn latest_cosign_block_number( + getter: &impl Get, + global_session: [u8; 32], + network: ExternalNetworkId, +) -> Option { + NetworksLatestCosignedBlock::get(getter, global_session, network) + .map(|signed_cosign| signed_cosign.cosign.block_number) +} + /// A task to determine if a block has been cosigned and we should handle it. pub(crate) struct CosignEvaluatorTask { pub(crate) db: D, @@ -105,13 +193,14 @@ pub(crate) struct CosignEvaluatorTask { pub(crate) last_request_for_cosigns: Instant, } -impl ContinuallyRan for CosignEvaluatorTask { +impl ContinuallyRan for CosignEvaluatorTask { type Error = String; fn run_iteration(&mut self) -> impl Send + Future> { async move { let mut known_cosign = None; let mut made_progress = false; + loop { let mut txn = self.db.txn(); let Some(BlockEventData { block_number, has_events }) = BlockEvents::try_recv(&mut txn) @@ -119,7 +208,36 @@ impl ContinuallyRan for CosignEvaluatorTask block_number < session.start_block_number, + // // No session declared yet — all queued blocks are pre-session + // None => true, + // }; + // if skip { + // debug_assert!( + // has_events == HasEvents::No, + // "pre-session block #{block_number} had events requiring cosigning" + // ); + // commit_cosigned_block(txn, block_number, "pre-session block")?; + // made_progress = true; + // continue; + // } + // } + + // Fetch the global session information. This must be called for ALL post-session blocks + // (including HasEvents::No) to maintain incrementality for session transitions. let (global_session, global_session_info) = currently_evaluated_global_session_strict(&mut txn, block_number)?; @@ -128,11 +246,10 @@ impl ContinuallyRan for CosignEvaluatorTask { let mut weight_cosigned = 0; + for set in global_session_info.sets { // Check if we have the cosign from this set - if NetworksLatestCosignedBlock::get(&txn, global_session, set.network) - .map(|signed_cosign| signed_cosign.cosign.block_number) - == Some(block_number) + if latest_cosign_block_number(&txn, global_session, set.network) == Some(block_number) { // Since have this cosign, add the set's weight to the weight which has cosigned weight_cosigned += @@ -141,24 +258,17 @@ impl ContinuallyRan for CosignEvaluatorTask ContinuallyRan for CosignEvaluatorTask = None; - for set in global_session_info.sets { - // Check if this set cosigned this block or not - let Some(cosign) = - NetworksLatestCosignedBlock::get(&txn, global_session, set.network) - else { - continue; - }; - if cosign.cosign.block_number >= block_number { - weight_cosigned += - global_session_info.stakes.get(&set.network).ok_or_else(|| { - "ValidatorSet in global session yet didn't have its stake".to_owned() - })?; - } - - // Update the lowest block common to all of these cosigns - lowest_common_block = lowest_common_block - .map(|existing| existing.min(cosign.cosign.block_number)) - .or(Some(cosign.cosign.block_number)); - } + let (weight_cosigned, lowest_common_block) = evaluate_non_notable_cosigns( + &txn, + block_number, + global_session, + &global_session_info, + )?; - // Check if the sum weight doesn't cross the required threshold - if weight_cosigned < cosign_threshold(global_session_info.total_stake) { - // Request the superseding notable cosigns over the network - // If this session hasn't yet produced notable cosigns, then we presume we'll see - // the desired non-notable cosigns as part of normal operations, without needing to - // explicitly request them - if should_request_cosigns(&mut self.last_request_for_cosigns) { - self - .request - .request_notable_cosigns(global_session) - .await - .map_err(|e| format!("{e:?}"))?; - } - - // We return an error so the delay before this task is run again increases - Err(format!( - "block (#{block_number}) wasn't yet cosigned. this should resolve shortly", - ))?; - } + ensure_cosigned( + weight_cosigned, + global_session_info.total_stake, + block_number, + global_session, + &mut self.last_request_for_cosigns, + &self.request, + "non-notable", + ) + .await?; // Update the cached result for the block we know is cosigned /* @@ -230,9 +310,6 @@ impl ContinuallyRan for CosignEvaluatorTask ContinuallyRan for CosignEvaluatorTask Amount, Validators: (set: ExternalValidatorSet) -> Vec, LatestSet: (network: ExternalNetworkId) -> Set, + GenesisTime: () -> u64, } ); @@ -53,138 +56,187 @@ db_channel! { } } -// Fetch the `ExternalValidatorSet`s, and their associated keys, used for cosigning as of this -// block. +/// Fetch the `ExternalValidatorSet`s, and their associated keys, used for cosigning as of this +/// block. fn cosigning_sets(getter: &impl Get) -> Vec<(ExternalValidatorSet, Public, Amount)> { - let mut sets = vec![]; - for network in ExternalNetworkId::all() { - let Some(Set { session, key, stake }) = LatestSet::get(getter, network) else { - // If this network doesn't have usable keys, move on - continue; - }; - - sets.push((ExternalValidatorSet { network, session }, key, stake)); - } - sets + ExternalNetworkId::all() + .filter_map(|network| { + let Set { session, key, stake } = LatestSet::get(getter, network)?; + Some((ExternalValidatorSet { network, session }, key, stake)) + }) + .collect() } /// A task to determine which blocks we should intend to cosign. -pub(crate) struct CosignIntendTask { +pub(crate) struct CosignIntendTask { pub(crate) db: D, - pub(crate) serai: S, + pub(crate) serai: Arc, } -impl ContinuallyRan for CosignIntendTask { +impl ContinuallyRan for CosignIntendTask { type Error = String; fn run_iteration(&mut self) -> impl Send + Future> { async move { - let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1); - let latest_block_number = self + let start_scan_block_number = ScanCosignFrom::get(&self.db).unwrap_or(0); + let latest_serai_block_number = self .serai .latest_finalized_block_number() .await + // Ephemeral RPC Err: task to re-run and continue trying .map_err(|e| format!("RPC error fetching latest finalized block number: {e}"))?; - if latest_block_number < start_block_number { + #[cfg(not(coverage))] + log::debug!( + "beginning scan: start={start_scan_block_number}, latest={latest_serai_block_number}" + ); + + if latest_serai_block_number < start_scan_block_number { + // made_progress = False + // Return, nothing new to progress with return Ok(false); } - for block_number in start_block_number..=latest_block_number { - let mut txn = self.db.txn(); + let mut made_progress = false; - let block = self + for block_number in start_scan_block_number ..= latest_serai_block_number { + let serai_block = self .serai .block_by_number(block_number) .await + // Ephemeral RPC Err: task to re-run and continue trying .map_err(|e| format!("RPC error fetching block #{block_number}: {e}"))? + // Ephemeral RPC Err: Block returned None even though serai reported as finalized + // task to re-run and continue trying .ok_or_else(|| "couldn't get block which should've been finalized".to_owned())?; - let events = self + let serai_block_hash = serai_block.header.hash(); + let serai_block_events = self .serai - .events(block.header.hash()) + .events(&serai_block_hash) .await + // Ephemeral RPC Err: task to re-run and continue trying .map_err(|e| format!("RPC error fetching events for block #{block_number}: {e}"))?; - let mut has_events = HasEvents::No; + #[cfg(not(coverage))] + log::debug!("iterating over block_number={block_number}, hash={serai_block_hash:?}"); + let mut txn = self.db.txn(); let mut builds_upon = BuildsUpon::get(&txn).unwrap_or(IncrementalUnbalancedMerkleTree::new()); // Check we are indexing a linear chain - if block.header.builds_upon() - != builds_upon.clone().calculate(serai_client_serai::abi::BLOCK_BRANCH_TAG) + if serai_block.header.builds_upon() != + builds_upon.clone().calculate(serai_client_serai::abi::BLOCK_BRANCH_TAG) { - // Ephemeral error here, do not txn commit but reset progress + // Ephemeral RPC Err: + // serai.block_by_number(block_number) may return a different chain history (fork) + // but the prior indexed block was already finalized, so we MUST build upon it + // task to re-run and continue trying until on the finalized chain Err(format!( "node's block #{block_number} doesn't build upon the block #{} prior indexed", block_number - 1 ))?; } - let block_hash = block.header.hash(); - SubstrateBlockHash::set(&mut txn, block_number, &block_hash); + + SubstrateBlockHash::set(&mut txn, block_number, &serai_block_hash); builds_upon.append( serai_client_serai::abi::BLOCK_BRANCH_TAG, Blake2b256::new_with_prefix([serai_client_serai::abi::BLOCK_LEAF_TAG]) - .chain_update(block_hash.0) + .chain_update(serai_block_hash.0) .finalize() .into(), ); BuildsUpon::set(&mut txn, &builds_upon); + let mut has_events = HasEvents::No; + let vset_events = serai_block_events.validator_sets(); + // Update the stakes - for tx_events in events.events() { - for event in tx_events { - match event { - Event::ValidatorSets(event) => match event { - validator_sets::Event::Allocation { validator, network, amount } => { - let Ok(network) = ExternalNetworkId::try_from(*network) else { continue }; - let existing = Stakes::get(&txn, network, *validator).unwrap_or(Amount(0)); - Stakes::set(&mut txn, network, *validator, &Amount(existing.0 + amount.0)); - } - validator_sets::Event::Deallocation { validator, network, amount, timeline: _ } => { - let Ok(network) = ExternalNetworkId::try_from(*network) else { continue }; - let existing = Stakes::get(&txn, network, *validator) - .expect("unable to deallocate with no prior existing stake"); - Stakes::set(&mut txn, network, *validator, &Amount(existing.0 - amount.0)); - } - validator_sets::Event::SetDecided { set, validators } => { - let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue }; - if validators.len() > 0 { - Validators::set( - &mut txn, - set, - &validators.iter().map(|(validator, _key_shares)| *validator).collect(), - ); - } - } - validator_sets::Event::SetKeys { set, key_pair } => { - has_events = HasEvents::Notable; - - let validators = Validators::take(&mut txn, *set) - .filter(|v| !v.is_empty()) - .ok_or_else(|| "set which wasn't decided set keys".to_string())?; - - let mut stake = 0; - for validator in validators { - stake += Stakes::get(&txn, set.network, validator).unwrap_or(Amount(0)).0; - } - LatestSet::set( - &mut txn, - set.network, - &Set { session: set.session, key: key_pair.0, stake: Amount(stake) }, - ); - } - _ => continue, - }, - Event::Coins(event) => match event { - coins::Event::BurnWithInstruction { .. } => { - has_events = HasEvents::NonNotable; - } - _ => continue, - }, - _ => continue, - } + for event in vset_events.allocation_events() { + let Event::Allocation { validator, network, amount } = event else { + unreachable!("event from `allocation_events` wasn't `Event::Allocation`") + }; + let Ok(network) = ExternalNetworkId::try_from(*network) else { + // Not an ExternalNetworkId, possible Serai network allocation + // safe to skip this allocation event + continue; + }; + + let existing = Stakes::get(&txn, network, *validator).unwrap_or(Amount(0)); + let new_amount = Amount(existing.0 + amount.0); + Stakes::set(&mut txn, network, *validator, &new_amount); + } + for event in vset_events.deallocation_events() { + let Event::Deallocation { validator, network, amount, timeline: _ } = event else { + unreachable!("event from `deallocation_events` wasn't `Event::Deallocation`") + }; + let Ok(network) = ExternalNetworkId::try_from(*network) else { + // Not an ExternalNetworkId, possible Serai network allocation + // safe to skip this deallocation event + continue; + }; + + let existing = Stakes::get(&txn, network, *validator) + // critical panic: + // this is a critical issue and will not be solved after re-tries, + // missing Stakes from previous blocks will remain missing until re-indexed (if encountered) + // halt the process + .expect("unable to deallocate with no prior existing stake"); + + Stakes::set(&mut txn, network, *validator, &Amount(existing.0 - amount.0)); + } + + // Handle decided sets + for event in vset_events.set_decided_events() { + let Event::SetDecided { set, validators } = event else { + unreachable!("event from `set_decided_events` wasn't `Event::SetDecided`") + }; + + let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue }; + + if validators.is_empty() { + // Maybe ephemeral: event blocks from RPC returned empty set list + // could resolve after retry. or will get forever stuck. + Err(format!("validator set from Event::SetDecided was empty"))?; + } + + Validators::set( + &mut txn, + set, + &validators.iter().map(|(validator, _key_shares)| *validator).collect(), + ); + } + + // Handle declarations of the latest set + for event in vset_events.set_keys_events() { + let Event::SetKeys { set, key_pair } = event else { + unreachable!("event from `set_keys_events` wasn't `Event::SetKeys`") + }; + has_events = HasEvents::Notable; + + let validators = Validators::take(&mut txn, *set) + // critical panic: + // this is a critical issue and will not be solved after re-tries, + // missing Validators from previous blocks will remain missing until re-indexed (if encountered) + // halt the process + .expect("set which wasn't decided set keys"); + + let stake: u64 = validators + .iter() + .map(|v| Stakes::get(&txn, set.network, *v).unwrap_or(Amount(0)).0) + .sum(); + LatestSet::set( + &mut txn, + set.network, + &Set { session: set.session, key: key_pair.0, stake: Amount(stake) }, + ); + } + + // Handle burn with instruction events (makes block non-notable if not already notable) + if has_events == HasEvents::No { + if serai_block_events.coins().burn_with_instruction_events().next().is_some() { + has_events = HasEvents::NonNotable; } } @@ -193,27 +245,57 @@ impl ContinuallyRan for CosignIntendTask { // If this is notable, it creates a new global session, which we index into the database // now if has_events == HasEvents::Notable { - let sets_and_keys_and_stakes = cosigning_sets(&txn); - let global_session = GlobalSession::id( - sets_and_keys_and_stakes.iter().map(|(set, _key, _stake)| *set).collect(), + let new_sets_and_keys_and_stakes = cosigning_sets(&txn); + let new_global_session = GlobalSession::id( + new_sets_and_keys_and_stakes.iter().map(|(set, _key, _stake)| *set).collect(), ); - let mut sets = Vec::with_capacity(sets_and_keys_and_stakes.len()); - let mut keys = HashMap::with_capacity(sets_and_keys_and_stakes.len()); - let mut stakes = HashMap::with_capacity(sets_and_keys_and_stakes.len()); + let mut sets = Vec::with_capacity(new_sets_and_keys_and_stakes.len()); + let mut keys = HashMap::with_capacity(new_sets_and_keys_and_stakes.len()); + let mut stakes = HashMap::with_capacity(new_sets_and_keys_and_stakes.len()); let mut total_stake = 0; - for (set, key, stake) in sets_and_keys_and_stakes { + for (set, key, stake) in new_sets_and_keys_and_stakes { sets.push(set); keys.insert(set.network, key); stakes.insert(set.network, stake.0); total_stake += stake.0; } + + if GenesisTime::get(&txn).is_none() { + let time = serai_block.header.unix_time_in_millis(); + if time > 0 { + GenesisTime::set(&mut txn, &time); + } + } + if total_stake == 0 { - // Ephemeral error here, do not txn commit but reset progress - Err(format!("cosigning sets for block #{block_number} had 0 stake in total"))?; + let genesis_time = GenesisTime::get(&txn) + // critical panic: + // this is a critical issue and will not be solved after re-tries, + // missing GenesisTime from previous blocks will remain missing until re-indexed (if encountered) + // halt the process + .expect("no genesis time for block #{block_number}"); + let time_elapsed_since_genesis = + serai_block.header.unix_time_in_millis().saturating_sub(genesis_time); + let genesis_period_end_timestamp = + genesis_time + u64::try_from(GENESIS_LIQUIDITY_PERIOD.as_millis()).unwrap(); + + if time_elapsed_since_genesis >= genesis_period_end_timestamp { + // critical panic: + // this is a critical issue and will not be solved after re-tries, + // missing Stakes from previous blocks will remain missing until re-indexed (if encountered) + // halt the process + panic!("cosigning sets for block #{block_number} had 0 stake in total, while stake is required"); + } + + // Genesis era not ended period: assign equal stake to each validator set + for set in &sets { + stakes.insert(set.network, 1); + } + total_stake = u64::try_from(sets.len()).unwrap(); } - let global_session_info = GlobalSession { + let next_global_session_info = GlobalSession { // This session starts cosigning after this block, as this block must be cosigned by // the existing validators start_block_number: block_number + 1, @@ -222,12 +304,12 @@ impl ContinuallyRan for CosignIntendTask { stakes, total_stake, }; - GlobalSessions::set(&mut txn, global_session, &global_session_info); + GlobalSessions::set(&mut txn, new_global_session, &next_global_session_info); if let Some(ending_global_session) = global_session_for_this_block { GlobalSessionsLastBlock::set(&mut txn, ending_global_session, &block_number); } - LatestGlobalSessionIntended::set(&mut txn, &global_session); - GlobalSessionsChannel::send(&mut txn, &(global_session, global_session_info)); + LatestGlobalSessionIntended::set(&mut txn, &new_global_session); + GlobalSessionsChannel::send(&mut txn, &(new_global_session, next_global_session_info)); } // If there isn't anyone available to cosign this block, meaning it'll never be cosigned, @@ -240,19 +322,19 @@ impl ContinuallyRan for CosignIntendTask { match has_events { HasEvents::Notable | HasEvents::NonNotable => { let global_session_for_this_block = global_session_for_this_block + // critical panic: basically unreachable given the condition above this match .expect("global session for this block was None but still attempting to cosign it"); - let global_session_info = - GlobalSessions::get(&txn, global_session_for_this_block).ok_or_else(|| { - format!( - "global session {:?} intended for block #{block_number} wasn't saved to the database", - global_session_for_this_block - ) - })?; + + // The GlobalSession that is ending + let ending_global_session_info = + GlobalSessions::get(&txn, global_session_for_this_block) + // critical panic: something that went wrong above + .expect("last global session intended wasn't saved to the database"); // Tell each set of their expectation to cosign this block - for set in global_session_info.sets { + for set in ending_global_session_info.sets { #[cfg(not(coverage))] - log::debug!("{set:?} will be cosigning block #{block_number}"); + log::debug!("set will cosign block: set={set:?}, block_number={block_number}"); IntendedCosigns::send( &mut txn, @@ -260,7 +342,7 @@ impl ContinuallyRan for CosignIntendTask { &CosignIntent { global_session: global_session_for_this_block, block_number, - block_hash, + block_hash: serai_block_hash, notable: has_events == HasEvents::Notable, }, ); @@ -269,17 +351,20 @@ impl ContinuallyRan for CosignIntendTask { HasEvents::No => {} } + #[cfg(not(coverage))] + log::debug!("finished iterating: has_events={has_events:?}"); + // Populate a singular feed with every block's status for the evaluator to work off of BlockEvents::send(&mut txn, &(BlockEventData { block_number, has_events })); // Mark this block as handled, meaning we should scan from the next block moving on ScanCosignFrom::set(&mut txn, &(block_number + 1)); - // All-or-nothing, commit only per block finished otherwise reset db progress - // for ephemeral errors + // Commit for every block that did progress, on failure restarts from the next block txn.commit(); + made_progress = true; } - Ok(true) + Ok(made_progress) } } } diff --git a/coordinator/cosign/src/lib.rs b/coordinator/cosign/src/lib.rs index 6933e1d19..0de70ab54 100644 --- a/coordinator/cosign/src/lib.rs +++ b/coordinator/cosign/src/lib.rs @@ -16,15 +16,8 @@ use blake2::{Digest as _, Blake2s256}; use borsh::{BorshSerialize, BorshDeserialize}; -use serai_client_serai::{ - abi::{ - primitives::{ - BlockHash, crypto::Public, network_id::ExternalNetworkId, - validator_sets::ExternalValidatorSet, - }, - Block, - }, - Events, +use serai_client_serai::abi::primitives::{ + BlockHash, crypto::Public, network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet, }; use serai_db::*; @@ -45,52 +38,13 @@ use delay::LatestCosignedBlockNumber; /// Test helpers and fixtures. pub mod tests; +#[cfg(not(any(test, feature = "dev")))] /// The interval at which the cosigning loop runs. -#[cfg(not(test))] pub const COSIGN_LOOP_INTERVAL: Duration = Duration::from_secs(5); -/// The interval at which the cosigning loop runs (shortened for tests). -#[cfg(test)] +#[cfg(any(test, feature = "dev"))] +/// The interval at which the cosigning loop runs. pub const COSIGN_LOOP_INTERVAL: Duration = Duration::from_millis(10); -/// Abstraction over the Serai RPC client so tests can inject custom behaviour. -pub trait SeraiRpc: Clone + Send + Sync + 'static { - /// Return the latest finalized block number. - fn latest_finalized_block_number(&self) -> impl Send + Future>; - - /// Fetch a block by its number. - fn block_by_number( - &self, - block: u64, - ) -> impl Send + Future, String>>; - - /// Fetch all events associated with the provided block hash. - fn events(&self, block: BlockHash) -> impl Send + Future>; -} - -#[cfg(not(coverage))] -impl SeraiRpc for Arc { - fn latest_finalized_block_number(&self) -> impl Send + Future> { - let serai = self.clone(); - async move { serai.as_ref().latest_finalized_block_number().await.map_err(|e| format!("{e:?}")) } - } - - fn block_by_number( - &self, - block: u64, - ) -> impl Send + Future, String>> { - let serai = self.clone(); - async move { serai.as_ref().block_by_number(block).await.map_err(|e| format!("{e:?}")) } - } - - fn events(&self, block: BlockHash) -> impl Send + Future> { - let serai = self.clone(); - async move { - let events = serai.as_ref().events(block).await.map_err(|e| format!("{e:?}"))?; - Ok(events) - } - } -} - /// A 'global session', defined as all validator sets used for cosigning at a given moment. /// /// We evaluate cosign faults within a global session. This ensures even if cosigners cosign @@ -118,7 +72,7 @@ pub(crate) struct GlobalSession { pub(crate) total_stake: u64, } impl GlobalSession { - fn id(mut cosigners: Vec) -> [u8; 32] { + pub(crate) fn id(mut cosigners: Vec) -> [u8; 32] { cosigners.sort_by_key(|a| borsh::to_vec(a).unwrap()); Blake2s256::digest(borsh::to_vec(&cosigners).unwrap()).into() } @@ -216,14 +170,14 @@ impl IntakeCosignError { /// If this error is temporal to the local view pub fn temporal(&self) -> bool { match self { - IntakeCosignError::NotYetIndexedBlock - | IntakeCosignError::StaleCosign - | IntakeCosignError::UnrecognizedGlobalSession - | IntakeCosignError::FutureGlobalSession => true, - IntakeCosignError::BeforeGlobalSessionStart - | IntakeCosignError::AfterGlobalSessionEnd - | IntakeCosignError::NonParticipatingNetwork - | IntakeCosignError::InvalidSignature => false, + IntakeCosignError::NotYetIndexedBlock | + IntakeCosignError::StaleCosign | + IntakeCosignError::UnrecognizedGlobalSession | + IntakeCosignError::FutureGlobalSession => true, + IntakeCosignError::BeforeGlobalSessionStart | + IntakeCosignError::AfterGlobalSessionEnd | + IntakeCosignError::NonParticipatingNetwork | + IntakeCosignError::InvalidSignature => false, } } } @@ -246,9 +200,9 @@ impl Cosigning { /// /// The database specified must only be used with a singular instance of the Serai network, and /// only used once at any given time. - pub fn spawn( + pub fn spawn( db: D, - serai: S, + serai: Arc, request: R, tasks_to_run_upon_cosigning: Vec, ) -> Self { @@ -288,12 +242,13 @@ impl Cosigning { getter: &impl Get, block_number: u64, ) -> Result, Faulted> { - if block_number > Self::latest_cosigned_block_number(getter)? { + if block_number == 0 || block_number > Self::latest_cosigned_block_number(getter)? { return Ok(None); } Ok(Some( - SubstrateBlockHash::get(getter, block_number).expect("cosigned block but didn't index it"), + SubstrateBlockHash::get(getter, block_number) + .expect(&format!("cosigned block {} but didn't index it", block_number)), )) } diff --git a/coordinator/cosign/src/tests/cosigning.rs b/coordinator/cosign/src/tests/cosigning.rs index d4b353a43..e4293df90 100644 --- a/coordinator/cosign/src/tests/cosigning.rs +++ b/coordinator/cosign/src/tests/cosigning.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, time::Duration}; +use std::{collections::HashMap, sync::Arc, time::Duration}; use borsh::{BorshDeserialize, BorshSerialize}; @@ -6,27 +6,35 @@ use blake2::{Blake2s256, Digest}; use serai_db::{Db as _, DbTxn, MemDb}; -use serai_client_serai::abi::primitives::{ - BlockHash, - crypto::Public, - network_id::ExternalNetworkId, - validator_sets::{ExternalValidatorSet, Session}, +use serai_simulator_node::{SimulatorNode, SimulatorState}; + +use serai_client_serai::{ + Serai, + abi::primitives::{ + BlockHash, + crypto::Public, + network_id::ExternalNetworkId, + validator_sets::{ExternalValidatorSet, Session}, + }, }; use crate::{ Cosign, CosignIntent, Cosigning, Faulted, FaultedSession, Faults, GlobalSession, GlobalSessions, GlobalSessionsLastBlock, IntakeCosignError, NetworksLatestCosignedBlock, SignedCosign, - SubstrateBlockHash, - delay::LatestCosignedBlockNumber, - evaluator::CurrentlyEvaluatedGlobalSession, - intend::IntendedCosigns, - tests::{TestRequest, intend::Serai}, + SubstrateBlockHash, delay::LatestCosignedBlockNumber, evaluator::CurrentlyEvaluatedGlobalSession, + intend::IntendedCosigns, tests::TestRequest, }; use serai_cosign_types::tests::{ fixture_public_key, public_key_from_seed, sign_cosign_with_fixture, sign_cosign_with_seed, }; +async fn setup_mock_serai() -> (SimulatorNode, Arc) { + let node = SimulatorNode::start(SimulatorState::default()).await; + let serai = Arc::new(Serai::new(node.url()).unwrap()); + (node, serai) +} + const FIXTURE_SEED: [u8; 32] = [0xff; 32]; struct Sr25519Fixture { @@ -159,7 +167,7 @@ fn temporal_returns_false_for_non_temporal_errors() { #[tokio::test] async fn spawn_creates_cosigning_instance() { let db = MemDb::new(); - let serai = Serai::default(); + let (_node, serai) = setup_mock_serai().await; let (request, _calls) = TestRequest::new(false); let cosigning = Cosigning::spawn(db, serai, request, vec![]); @@ -171,7 +179,7 @@ async fn spawn_with_tasks_to_run_upon_cosigning() { use serai_task::Task; let db = MemDb::new(); - let serai = Serai::default(); + let (_node, serai) = setup_mock_serai().await; let (request, _calls) = TestRequest::new(false); let (_task, task_handle) = Task::new(); @@ -185,7 +193,7 @@ async fn spawn_with_tasks_to_run_upon_cosigning() { #[tokio::test] async fn spawn_initializes_cosigning_instance_correctly() { let db = MemDb::new(); - let serai = Serai::default(); + let (_node, serai) = setup_mock_serai().await; let (request, _calls) = TestRequest::new(false); let cosigning = Cosigning::spawn(db.clone(), serai, request, vec![]); @@ -200,7 +208,7 @@ async fn spawn_initializes_cosigning_instance_correctly() { #[tokio::test] async fn spawn_tasks_chain_correctly() { let db = MemDb::new(); - let serai = Serai::default(); + let (_node, serai) = setup_mock_serai().await; let (request, _calls) = TestRequest::new(false); let _cosigning = Cosigning::spawn(db.clone(), serai, request, vec![]); @@ -271,7 +279,7 @@ fn cosigned_block_errors_when_faulted() { FaultedSession::set(&mut txn, &[1u8; 32]); txn.commit(); } - assert!(matches!(Cosigning::::cosigned_block(&db, 0), Err(Faulted))); + assert!(matches!(Cosigning::::cosigned_block(&db, 1), Err(Faulted))); } #[tokio::test] @@ -796,7 +804,7 @@ fn intake_cosign_accepts_cosign_at_global_session_last_block() { { let mut txn = db.txn(); GlobalSessionsLastBlock::set(&mut txn, id, &5u64); - for i in 1..=5 { + for i in 1 ..= 5 { SubstrateBlockHash::set(&mut txn, i, &BlockHash([i as u8; 32])); } txn.commit(); diff --git a/coordinator/cosign/src/tests/delay.rs b/coordinator/cosign/src/tests/delay.rs index e5200607b..c7f236d80 100644 --- a/coordinator/cosign/src/tests/delay.rs +++ b/coordinator/cosign/src/tests/delay.rs @@ -4,7 +4,7 @@ use crate::{ LatestCosignedBlockNumber, delay::{CosignDelayTask, now_timestamp}, evaluator::CosignedBlocks, - tests::{IntoTask, Test, wait_until}, + tests::{IntoTask, TaskTest, wait_until}, }; use serai_db::{Db as _, DbTxn as _, MemDb}; @@ -51,7 +51,7 @@ async fn delay_task_returns_false_with_no_messages() { let test = DelayTest::default(); let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, false).await; + TaskTest::task_runs_once_and_matches_progress(&mut task, false).await; assert_eq!(LatestCosignedBlockNumber::get(&test.db), None); assert_eq!(CosignedBlocks::peek(&test.db), None); @@ -73,7 +73,7 @@ async fn delay_task_updates_latest_cosigned_block_number_after_ack_delay() { } let task = test.into_task(); - let _handle = Test::spawn_task_continually_running(task, vec![]); + let _handle = TaskTest::spawn_task_continually_running(task, vec![]); test.assert_task_iteration_completes_with(2).await; @@ -129,7 +129,7 @@ async fn delay_task_does_not_regress_and_skips_if_not_a_later_block() { let mut task = test.into_task(); // returns made_progress as true - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; // This is unlikely to actually happen in practice but it needs to be tested that it does what it is // meant to do, which is that if we've already acknowledged a later block, consume and skip @@ -146,6 +146,6 @@ async fn delay_task_does_not_regress_and_skips_if_not_a_later_block() { // No progress was made since the same block number was skipped, // made_progress returns false - Test::assert_task_run_iteration_and_check_progress(&mut task, false).await; + TaskTest::task_runs_once_and_matches_progress(&mut task, false).await; test.assert_task_iteration_completes_with(4).await; } diff --git a/coordinator/cosign/src/tests/evaluator.rs b/coordinator/cosign/src/tests/evaluator.rs index e6cbddb31..945d84cd6 100644 --- a/coordinator/cosign/src/tests/evaluator.rs +++ b/coordinator/cosign/src/tests/evaluator.rs @@ -4,21 +4,24 @@ use std::{ time::{Duration, Instant}, }; -use serai_db::{DbTxn, Db as _, MemDb}; -use serai_task::ContinuallyRan; +use serai_cosign_types::SignedCosign; +use serai_db::{Db as _, DbTxn, MemDb}; use serai_client_serai::abi::primitives::{ + BlockHash, crypto::Public, + network_id::ExternalNetworkId, validator_sets::{ExternalValidatorSet, Session}, }; +use serai_task::ContinuallyRan; + use crate::{ - BlockHash, Cosign, ExternalNetworkId, GlobalSession, HasEvents, NetworksLatestCosignedBlock, - SignedCosign, + Cosign, GlobalSession, HasEvents, NetworksLatestCosignedBlock, evaluator::{ CosignEvaluatorTask, CosignedBlocks, CurrentlyEvaluatedGlobalSession, REQUEST_COSIGNS_SPACING, }, intend::{BlockEventData, BlockEvents, GlobalSessionsChannel}, - tests::{IntoTask, Test, TestRequest}, + tests::{IntoTask, TaskTest, TestRequest}, }; pub(crate) struct EvaluatorTest { @@ -48,7 +51,7 @@ impl EvaluatorTest { /// Asserts that cosigned blocks from start_block to end_block (inclusive) are present in order. fn assert_cosigned_blocks_range(&mut self, start_block: u64, end_block: u64) { let mut txn = self.db.txn(); - for expected_block in start_block..=end_block { + for expected_block in start_block ..= end_block { let (block_number, _time) = CosignedBlocks::try_recv(&mut txn) .unwrap_or_else(|| panic!("expected cosigned block {expected_block}")); assert_eq!(block_number, expected_block, "cosigned block mismatch"); @@ -120,7 +123,7 @@ impl EvaluatorTest { async fn evaluator_task_returns_false_with_no_block_events() { let test = EvaluatorTest::default(); let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, false).await; + TaskTest::task_runs_once_and_matches_progress(&mut task, false).await; test.assert_evaluator_db_is_clear(); } @@ -138,7 +141,7 @@ async fn evaluator_task_processes_blocks_with_no_events() { } let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; test.assert_task_iteration_completed(0, 2); } @@ -160,7 +163,7 @@ async fn evaluator_task_errors_on_notable_events_without_cosign() { } let mut task = test.into_task(); - Test::assert_task_run_and_failed_with(&mut task, "wasn't yet cosigned").await; + TaskTest::assert_task_run_and_failed_with(&mut task, "wasn't yet cosigned").await; // When iteration fails, nothing is committed - block events are consumed but CosignedBlocks is empty test.assert_no_global_sessions_channel(); test.assert_has_block_events(); @@ -178,7 +181,7 @@ async fn evaluator_task_errors_on_notable_events_without_cosign() { let mut task: CosignEvaluatorTask = test.into_task().into(); task.last_request_for_cosigns = Instant::now() - Duration::from_secs(5); - Test::assert_task_run_and_failed_with(&mut task, "wasn't yet cosigned").await; + TaskTest::assert_task_run_and_failed_with(&mut task, "wasn't yet cosigned").await; test.assert_no_global_sessions_channel(); test.assert_has_block_events(); } @@ -232,7 +235,7 @@ async fn evaluator_task_errors_on_notable_events_without_stakes() { } let mut task = test.into_task(); - Test::assert_task_run_and_failed_with(&mut task, "didn't have its stake").await; + TaskTest::assert_task_run_and_failed_with(&mut task, "didn't have its stake").await; } #[tokio::test] @@ -253,7 +256,7 @@ async fn evaluator_task_errors_on_non_notable_events_without_cosign() { } let mut task = test.into_task(); - Test::assert_task_run_and_failed_with(&mut task, "wasn't yet cosigned").await; + TaskTest::assert_task_run_and_failed_with(&mut task, "wasn't yet cosigned").await; // When iteration fails, nothing is committed test.assert_no_global_sessions_channel(); test.assert_has_block_events(); @@ -271,7 +274,7 @@ async fn evaluator_task_errors_on_non_notable_events_without_cosign() { let mut task: CosignEvaluatorTask = test.into_task().into(); task.last_request_for_cosigns = Instant::now() - Duration::from_secs(5); - Test::assert_task_run_and_failed_with(&mut task, "wasn't yet cosigned").await; + TaskTest::assert_task_run_and_failed_with(&mut task, "wasn't yet cosigned").await; } #[tokio::test] @@ -295,7 +298,7 @@ async fn evaluator_task_errors_on_request_notable_cosigns_failure() { last_request_for_cosigns: Instant::now() - REQUEST_COSIGNS_SPACING - Duration::from_secs(5), }; - Test::assert_task_run_and_failed_with(&mut task, "RequestError").await; + TaskTest::assert_task_run_and_failed_with(&mut task, "RequestError").await; assert_eq!(calls.load(Ordering::SeqCst), 1, "request_notable_cosigns should have been called"); } @@ -320,7 +323,7 @@ async fn evaluator_task_errors_on_request_non_notable_cosigns_failure() { last_request_for_cosigns: Instant::now() - REQUEST_COSIGNS_SPACING - Duration::from_secs(5), }; - Test::assert_task_run_and_failed_with(&mut task, "RequestError").await; + TaskTest::assert_task_run_and_failed_with(&mut task, "RequestError").await; assert_eq!(calls.load(Ordering::SeqCst), 1, "request_notable_cosigns should have been called"); } @@ -345,7 +348,7 @@ async fn evaluator_task_processes_notable_events_when_cosigned() { } let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; let (block_number, _time) = CosignedBlocks::peek(&test.db).expect("expected cosigned block"); assert_eq!(block_number, 1); @@ -380,7 +383,7 @@ async fn evaluator_task_non_notable_uses_cached_known_cosign() { } let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; // All three blocks should be marked as cosigned test.assert_cosigned_blocks_range(1, 3); @@ -407,7 +410,7 @@ async fn evaluator_task_non_notable_with_cosign_returns_some() { } let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; test.assert_cosigned_blocks_range(1, 1); } @@ -433,7 +436,7 @@ async fn evaluator_task_non_notable_cosign_too_low_does_not_add_weight() { } let mut task = test.into_task(); - Test::assert_task_run_and_failed_with(&mut task, "wasn't yet cosigned").await; + TaskTest::assert_task_run_and_failed_with(&mut task, "wasn't yet cosigned").await; } #[tokio::test] @@ -474,7 +477,7 @@ async fn evaluator_task_errors_on_non_notable_events_without_stakes() { } let mut task = test.into_task(); - Test::assert_task_run_and_failed_with(&mut task, "didn't have its stake").await; + TaskTest::assert_task_run_and_failed_with(&mut task, "didn't have its stake").await; } #[tokio::test] @@ -534,13 +537,13 @@ async fn evaluator_task_non_notable_computes_lowest_common_block() { } let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; test.assert_cosigned_blocks_range(1, 3); } #[tokio::test] -#[should_panic(expected = "candidate's start block number exceeds our block number")] +#[should_panic(expected = "candidate's start block number ")] async fn evaluator_task_panics_when_session_starts_after_block() { let mut test = EvaluatorTest::default(); @@ -652,7 +655,7 @@ async fn evaluator_task_advances_global_session_at_start_block() { } let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; test.assert_cosigned_blocks_range(1, 3); @@ -712,7 +715,7 @@ async fn evaluator_task_errors_on_weight_overflow_notable() { } let mut task = test.into_task(); - Test::assert_task_run_and_failed_with(&mut task, "weight_cosigned overflow").await; + TaskTest::assert_task_run_and_failed_with(&mut task, "weight_cosigned overflow").await; } #[tokio::test] @@ -765,10 +768,11 @@ async fn evaluator_task_errors_on_weight_overflow_non_notable() { } let mut task = test.into_task(); - Test::assert_task_run_and_failed_with(&mut task, "weight_cosigned overflow").await; + TaskTest::assert_task_run_and_failed_with(&mut task, "weight_cosigned overflow").await; } #[tokio::test] +#[should_panic(expected = "fetching latest global session yet none declared")] async fn evaluator_task_errors_when_no_global_session_in_channel() { let mut test = EvaluatorTest::default(); @@ -779,9 +783,5 @@ async fn evaluator_task_errors_when_no_global_session_in_channel() { } let mut task = test.into_task(); - Test::assert_task_run_and_failed_with(&mut task, "but none declared in channel yet").await; - - test.assert_no_currently_evaluated_global_session(); - test.assert_no_cosigned_blocks(); - test.assert_no_global_sessions_channel(); + let _ = task.run_iteration().await; } diff --git a/coordinator/cosign/src/tests/intend.rs b/coordinator/cosign/src/tests/intend.rs index 8599af4a4..13c42662f 100644 --- a/coordinator/cosign/src/tests/intend.rs +++ b/coordinator/cosign/src/tests/intend.rs @@ -1,48 +1,33 @@ -use core::future::Future; -use std::{ - collections::{HashMap, HashSet}, - time::{SystemTime, UNIX_EPOCH}, -}; +use std::{collections::HashMap, sync::Arc, time::Duration}; + +use rand_core::{OsRng, RngCore}; -use blake2::{Blake2b256, Digest}; use serai_db::{Db as _, DbTxn, MemDb}; -use serai_task::ContinuallyRan; + +use serai_simulator_node::{SimulatorNode, SimulatorState}; use serai_client_serai::{ - Events, + Serai, abi::{ - Block, Event, Header, HeaderV1, BLOCK_BRANCH_TAG, BLOCK_LEAF_TAG, coins, + Event, coins, primitives::{ BlockHash, address::{ExternalAddress, SeraiAddress}, - balance::{Amount, Balance, ExternalBalance}, - coin::{Coin, ExternalCoin}, + balance::{Amount, ExternalBalance}, + coin::ExternalCoin, crypto::{ExternalKey, KeyPair, Public}, instructions::{OutInstruction, OutInstructionWithBalance}, - merkle::{IncrementalUnbalancedMerkleTree, UnbalancedMerkleTree}, + merkle::IncrementalUnbalancedMerkleTree, network_id::{ExternalNetworkId, NetworkId}, validator_sets::{ExternalValidatorSet, KeyShares, Session, ValidatorSet}, }, - signals, validator_sets, + validator_sets, }, }; -use crate::{ - intend::{ - BlockEventData, BlockEvents, BuildsUpon, CosignIntendTask, GlobalSessionsChannel, - IntendedCosigns, LatestSet, ScanCosignFrom, Set, Stakes, Validators, - }, - tests::{IntoTask, Test}, - CosignIntent, GlobalSession, GlobalSessions, GlobalSessionsLastBlock, HasEvents, - LatestGlobalSessionIntended, SeraiRpc, SubstrateBlockHash, -}; +use crate::{intend::*, tests::*, *}; -fn set_keys_event(set: ExternalValidatorSet) -> Event { - Event::ValidatorSets(validator_sets::Event::SetKeys { - set, - key_pair: KeyPair(Public([0xff; 32]), ExternalKey(vec![0xff; 32].try_into().unwrap())), - }) -} +use super::SERAI_NODE_LOCK; fn set_decided_event(set: ValidatorSet, validators: Vec<(SeraiAddress, KeyShares)>) -> Event { Event::ValidatorSets(validator_sets::Event::SetDecided { set, validators }) @@ -77,194 +62,32 @@ fn burn_with_instruction_event(from: SeraiAddress) -> Event { }) } -fn events_from_allocations(allocations: &[(SeraiAddress, ExternalNetworkId, u64)]) -> Vec { - allocations - .iter() - .map(|(validator, network, amount)| { - allocation_event(*validator, NetworkId::External(*network), *amount) - }) - .collect() -} - -#[derive(Clone)] -pub(crate) struct Serai { - pub(crate) latest_finalized_error: Option, - pub(crate) block_by_number_error: HashMap, - pub(crate) events_error: HashMap, - pub(crate) blocks_by_number: HashMap, - pub(crate) events_by_hash: HashMap, - pub(crate) builds_upon: IncrementalUnbalancedMerkleTree, - pub(crate) missing_blocks: HashSet, -} - -impl Default for Serai { - fn default() -> Self { - Self { - latest_finalized_error: None, - block_by_number_error: HashMap::new(), - events_error: HashMap::new(), - blocks_by_number: HashMap::new(), - events_by_hash: HashMap::new(), - builds_upon: IncrementalUnbalancedMerkleTree::new(), - missing_blocks: HashSet::new(), - } - } -} - -impl Serai { - pub(crate) fn set_latest_finalized_error(&mut self, error: &str) { - self.latest_finalized_error = Some(error.to_string()); - } - - pub(crate) fn set_block_not_found(&mut self, block_number: u64) { - self.missing_blocks.insert(block_number); - } - - pub(crate) fn set_block_error(&mut self, block_number: u64, error: &str) { - self.block_by_number_error.insert(block_number, error.to_string()); - } - - pub(crate) fn set_events_error(&mut self, block_hash: BlockHash, error: &str) { - self.events_error.insert(block_hash, error.to_string()); - } - - pub(crate) fn make_block(&mut self, number: u64) -> BlockHash { - let block = Block { - header: Header::V1(HeaderV1 { - number, - builds_upon: self.builds_upon.clone().calculate(BLOCK_BRANCH_TAG), - unix_time_in_millis: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis() - as u64, - transactions_commitment: UnbalancedMerkleTree::EMPTY, - events_commitment: UnbalancedMerkleTree::EMPTY, - consensus_commitment: [0; 32], - }), - transactions: vec![], - }; - - let block_hash = block.header.hash(); - - self.builds_upon.append( - BLOCK_BRANCH_TAG, - Blake2b256::new_with_prefix([BLOCK_LEAF_TAG]) - .chain_update(block_hash.0) - .finalize() - .into(), - ); - - self.blocks_by_number.insert(number, block); - - block_hash - } - - pub(crate) fn set_events(&mut self, block_hash: BlockHash, events: Vec) { - self.events_by_hash.insert(block_hash, Events::with(events)); - } -} - -impl SeraiRpc for Serai { - fn latest_finalized_block_number(&self) -> impl Send + Future> { - let err = self.latest_finalized_error.clone(); - let latest = self.blocks_by_number.keys().copied().max().unwrap_or(0); - async move { - if let Some(e) = err { - return Err(e); - } - Ok(latest) - } - } - - fn block_by_number( - &self, - block: u64, - ) -> impl Send + Future, String>> { - let err = self.block_by_number_error.get(&block).cloned(); - let block_entry = self.blocks_by_number.get(&block).cloned(); - let is_missing = self.missing_blocks.contains(&block); - - async move { - if let Some(e) = err { - return Err(e); - } - if is_missing { - return Ok(None); - } - Ok(block_entry) - } - } - - fn events(&self, block: BlockHash) -> impl Send + Future> { - let err = self.events_error.get(&block).cloned(); - let events = self.events_by_hash.get(&block).cloned().unwrap_or_default(); - async move { - if let Some(e) = err { - return Err(e); - } - Ok(events) - } - } -} - -pub(crate) struct IntendTest { - pub(crate) serai: Serai, +/// Generic test struct for intend tests. +/// Uses `FakeSerai` for mock tests and can be extended for live tests. +pub(crate) struct IntendTestStruct { + pub(crate) serai: Arc, pub(crate) db: MemDb, } -impl Default for IntendTest { - fn default() -> Self { - Self { serai: Serai::default(), db: MemDb::default() } - } -} - -impl IntoTask for IntendTest { - type Task = CosignIntendTask; +impl IntoTask for IntendTestStruct { + type Task = CosignIntendTask; fn into_task(&self) -> Self::Task { CosignIntendTask { db: self.db.clone(), serai: self.serai.clone() } } } -impl IntendTest { +impl IntendTestStruct { fn assert_substrate_block_hash_exists(&self, block_number: u64) -> BlockHash { let block_hash = SubstrateBlockHash::get(&self.db, block_number); assert!(block_hash.is_some(), "no substrate blockhash for block {block_number}"); block_hash.expect("no substrate blockhash") } - fn assert_no_global_sessions_channel(&self) { - assert_eq!(GlobalSessionsChannel::peek(&self.db).is_none(), true); - } - - fn assert_no_block_events(&self) { - assert_eq!(BlockEvents::peek(&self.db).is_none(), true); - } - - fn assert_no_substrate_block_hash(&self, block_number: u64) { - let block_hash = SubstrateBlockHash::get(&self.db, block_number); - assert!(block_hash.is_none(), "expected no substrate blockhash for block {block_number}"); - } - fn assert_builds_upon_is_expected(&self, expected: &IncrementalUnbalancedMerkleTree) { assert_eq!(BuildsUpon::get(&self.db).as_ref(), Some(expected)); } - fn assert_no_builds_upon(&self) { - assert_eq!(BuildsUpon::get(&self.db), None); - } - - // Assert everything that changed or should have changed after a simple task iteration run with linear blocks - // (substrate block hashes are set and builds upon is expected) - fn assert_task_iteration_per_block(&self, block_number: u64) -> BlockHash { - let block_hash = self.assert_substrate_block_hash_exists(block_number); - self.assert_builds_upon_is_expected(&self.serai.builds_upon); - block_hash - } - - fn assert_task_iteration_per_block_clears(&self, block_number: u64) { - self.assert_no_substrate_block_hash(block_number); - self.assert_no_builds_upon(); - } - fn assert_block_events_is_expected(&mut self, expected: BlockEventData) { let mut txn = self.db.txn(); let actual = BlockEvents::try_recv(&mut txn); @@ -282,12 +105,6 @@ impl IntendTest { assert_eq!(ScanCosignFrom::get(&self.db), Some(expected)); } - fn assert_no_scan_cosign_from(&self) { - assert_eq!(ScanCosignFrom::get(&self.db), None); - } - - // Assert everything that changed or should have changed after task iteration is ran per block - // (BlockEventData points to current block and events, ScanCosignFrom is the next block) fn assert_task_iteration_per_block_concluded( &mut self, block_number: u64, @@ -297,986 +114,247 @@ impl IntendTest { self.assert_scan_cosign_from_is_expected(block_number + 1); } - fn assert_task_iteration_per_block_with_no_events_ran(&mut self, block_number: u64) { - self.assert_task_iteration_per_block(block_number); + /// Assert that the task processed `block_number` correctly (no events). + fn assert_task_iteration_per_block_with_no_events_ran( + &mut self, + block_number: u64, + expected_builds_upon: &IncrementalUnbalancedMerkleTree, + ) { + self.assert_substrate_block_hash_exists(block_number); + self.assert_builds_upon_is_expected(expected_builds_upon); self.assert_task_iteration_per_block_concluded(block_number, HasEvents::No); } - fn assert_task_iterations_with_no_events_ran(&mut self, start_block: u64, end_block: u64) { - for block_number in start_block..=end_block { - self.assert_task_iteration_per_block(block_number); - self.assert_block_events_is_expected(BlockEventData { - block_number, - has_events: HasEvents::No, - }); - } - - self.assert_scan_cosign_from_is_expected(end_block + 1); - } - - /// Asserts that blocks were processed successfully up to (but not including) failed_block. - /// Takes the expected `builds_upon` value (state after the last successful block was processed). + /// Assert blocks were processed up to (but not including) `failed_block`. fn assert_task_iterations_with_no_events_failed_at( &mut self, failed_block: u64, expected_builds_upon: &IncrementalUnbalancedMerkleTree, ) { - let prev_builds_upon = self.serai.builds_upon.clone(); - - self.serai.builds_upon = expected_builds_upon.clone(); - self.assert_task_iteration_per_block_with_no_events_ran(failed_block - 1); - - self.serai.builds_upon = prev_builds_upon; - } - - fn assert_stakes_is_expected( - &self, - network: ExternalNetworkId, - validator: SeraiAddress, - expected: Option, - ) { - assert_eq!(Stakes::get(&self.db, network, validator), expected); - } - - fn assert_no_stakes(&self, network: ExternalNetworkId, validator: SeraiAddress) { - assert_eq!(Stakes::get(&self.db, network, validator), None); - } - - /// Asserts stakes match the accumulated totals from a slice of allocations. - /// Groups by (network, validator) and sums amounts before asserting. - fn assert_stakes_from_allocations_is_expected( - &self, - allocations: &[(SeraiAddress, ExternalNetworkId, u64)], - ) { - let mut expected: HashMap<(ExternalNetworkId, SeraiAddress), u64> = HashMap::new(); - for (validator, network, amount) in allocations { - *expected.entry((*network, *validator)).or_default() += amount; - } - for ((network, validator), amount) in expected { - self.assert_stakes_is_expected(network, validator, Some(Amount(amount))); - } - } - - fn assert_global_session(actual: &GlobalSession, expected: &GlobalSession) { - assert_eq!(actual.start_block_number, expected.start_block_number); - assert_eq!(actual.sets, expected.sets); - assert_eq!(actual.keys, expected.keys); - assert_eq!(actual.stakes, expected.stakes); - assert_eq!(actual.total_stake, expected.total_stake); - } - - fn assert_validators_is_expected( - &self, - set: ExternalValidatorSet, - expected: Option>, - ) { - assert_eq!(Validators::get(&self.db, set), expected); - } - - fn assert_no_validators(&self, set: ExternalValidatorSet) { - assert_eq!(Validators::get(&self.db, set), None); - } - - fn assert_latest_set_is_expected(&self, network: ExternalNetworkId, expected: Option<&Set>) { - let actual = LatestSet::get(&self.db, network); - match (actual.as_ref(), expected) { - (Some(a), Some(e)) => { - assert_eq!(a.session, e.session); - assert_eq!(a.key, e.key); - assert_eq!(a.stake, e.stake); - } - (None, None) => {} - _ => panic!("LatestSet mismatch for {:?}", network), - } - } - - fn assert_no_latest_set(&self, network: ExternalNetworkId) { - assert_eq!(LatestSet::get(&self.db, network).is_none(), true); - } - - fn assert_global_sessions_get(&self, session_id: [u8; 32], expected: Option<&GlobalSession>) { - match (GlobalSessions::get(&self.db, session_id), expected) { - (Some(ref actual), Some(exp)) => Self::assert_global_session(actual, exp), - (None, None) => {} - (actual, exp) => { - panic!("GlobalSessions mismatch: got {:?}, expected {:?}", actual.is_some(), exp.is_some()) - } - } - } - - fn assert_global_sessions_last_block(&self, session_id: [u8; 32], expected: u64) { - assert_eq!(GlobalSessionsLastBlock::get(&self.db, session_id), Some(expected)); - } - - fn assert_latest_global_session_intended(&self, expected: Option<[u8; 32]>) { - assert_eq!(LatestGlobalSessionIntended::get(&self.db), expected); - } - - fn assert_no_latest_global_session_intended(&self) { - assert_eq!(LatestGlobalSessionIntended::get(&self.db), None); - } - - fn assert_global_sessions_channel_peek(&self, expected: Option<&([u8; 32], GlobalSession)>) { - let actual = GlobalSessionsChannel::peek(&self.db); - match (actual.as_ref(), expected) { - (Some((aid, asess)), Some((eid, esess))) => { - assert_eq!(aid, eid); - Self::assert_global_session(asess, esess); - } - (None, None) => {} - _ => panic!( - "GlobalSessionsChannel mismatch: got {:?}, expected {:?}", - actual.is_some(), - expected.is_some() - ), - } - } - - fn assert_intended_cosigns_peek(&self, set: ExternalValidatorSet, expected: CosignIntent) { - assert_eq!(IntendedCosigns::peek(&self.db, set), Some(expected)); - } - - fn assert_no_intended_cosigns(&self, set: ExternalValidatorSet) { - assert_eq!(IntendedCosigns::peek(&self.db, set).is_none(), true); - } - - /// Asserts that all DB entries are cleared (return None or are empty). - /// This is useful for verifying initial state or that cleanup worked correctly. - fn assert_db_cleared( - &self, - block_numbers: &[u64], - networks: &[ExternalNetworkId], - sets: &[ExternalValidatorSet], - session_ids: &[[u8; 32]], - stakes: &[(ExternalNetworkId, SeraiAddress)], - ) { - self.assert_global_db_is_clear(); - - for &block_number in block_numbers { - self.assert_no_substrate_block_hash(block_number); - } - - for &network in networks { - self.assert_no_latest_set(network); - } - - for &set in sets { - self.assert_no_validators(set); - self.assert_no_intended_cosigns(set); - } - - for &session_id in session_ids { - self.assert_global_sessions_get(session_id, None); - self.assert_no_global_sessions_last_block(session_id); - } - - for &(network, validator) in stakes { - self.assert_no_stakes(network, validator); - } - } - - /// Asserts that all global (parameterless) DB entries are cleared. - /// Use this for a quick check when you don't need to verify parameterized entries. - fn assert_global_db_is_clear(&self) { - // create_db! { Cosign {... - self.assert_no_latest_global_session_intended(); - - // create_db!( CosignIntend {... - self.assert_no_scan_cosign_from(); - self.assert_no_builds_upon(); - - // db_channel! { CosignIntendChannels {... - self.assert_no_global_sessions_channel(); - self.assert_no_block_events(); - } - - fn assert_global_db_is_clear_after_block(&self, block_number: u64) { - self.assert_global_db_is_clear(); - - // create_db! { Cosign {... - self.assert_no_substrate_block_hash(block_number); - } - - fn assert_no_global_sessions_last_block(&self, session_id: [u8; 32]) { - assert_eq!(GlobalSessionsLastBlock::get(&self.db, session_id), None); - } - - fn assert_task_iteration_per_block_with_notable_events_ran( - &mut self, - block_number: u64, - previous_session_id: Option<[u8; 32]>, - ) -> ([u8; 32], GlobalSession) { - let block_hash = self.assert_task_iteration_per_block(block_number); - - // First notable block has no prior session to cosign it, so it's treated as No - // Subsequent notable blocks have a prior session, so they're treated as Notable - let expected_has_events = - if previous_session_id.is_some() { HasEvents::Notable } else { HasEvents::No }; - self.assert_block_events_is_expected(BlockEventData { - block_number, - has_events: expected_has_events, - }); - - let mut txn = self.db.txn(); - let channel_entry = GlobalSessionsChannel::try_recv(&mut txn); - txn.commit(); - - let (session_id, session) = channel_entry.unwrap_or_else(|| { - panic!("GlobalSessionsChannel was empty, expected session for block {block_number}") - }); - - let stored_session = GlobalSessions::get(&self.db, session_id) - .expect("GlobalSessions should contain the session after notable block"); - Self::assert_global_session(&session, &stored_session); - - assert_eq!( - session.start_block_number, - block_number + 1, - "session should start at block after the notable block" - ); - - assert!(session.total_stake > 0, "session should have non-zero total stake"); - - // GlobalSessionsLastBlock is set for the previous session when a new session starts - if let Some(prev_id) = previous_session_id { - self.assert_global_sessions_last_block(prev_id, block_number); - } - - // IntendedCosigns are sent for the previous session's sets - if let Some(prev_id) = previous_session_id { - let prev_session = - GlobalSessions::get(&self.db, prev_id).expect("previous session should exist"); - for set in prev_session.sets { - self.assert_intended_cosigns_peek( - set, - CosignIntent { global_session: prev_id, block_number, block_hash, notable: true }, - ); - } - } - - (session_id, session) - } - - fn assert_task_iteration_per_block_with_non_notable_events_ran(&mut self, block_number: u64) { - let block_hash = self.assert_task_iteration_per_block(block_number); - self.assert_task_iteration_per_block_concluded(block_number, HasEvents::NonNotable); - - let active_session_id = LatestGlobalSessionIntended::get(&self.db) - .expect("NonNotable block requires an active session from a prior notable block"); - - let session = - GlobalSessions::get(&self.db, active_session_id).expect("active session should exist"); - for set in session.sets { - self.assert_intended_cosigns_peek( - set, - CosignIntent { - global_session: active_session_id, - block_number, - block_hash, - notable: false, - }, - ); - } + self.assert_task_iteration_per_block_with_no_events_ran(failed_block - 1, expected_builds_upon); } } -#[tokio::test] -async fn intend_task_returns_false_with_no_blocks() { - let test = IntendTest::default(); - let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, false).await; - test.assert_global_db_is_clear(); +/// Create a [`SimulatorNode`] and an [`IntendTestStruct`] connected to it. +async fn setup_mock_test() -> (SimulatorNode, IntendTestStruct) { + let node = SimulatorNode::start(SimulatorState::default()).await; + let serai = Arc::new(Serai::new(node.url()).unwrap()); + (node, IntendTestStruct { serai, db: MemDb::new() }) } #[tokio::test] -async fn intend_task_returns_false_with_genesis_block() { - let mut test = IntendTest::default(); - - test.serai.make_block(0); +async fn iterates_serai_blocks() { + let _lock = SERAI_NODE_LOCK.lock().await; + serai_test_harness::serai_test(async |serai| { + let serai = Arc::new(serai); + let mut task = CosignIntendTask { db: MemDb::new(), serai: serai.clone() }; - let mut task = test.into_task(); + // First run processes all currently finalized blocks (and/or at least genesis), progress = true + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - // In intend.rs let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1); - // will always default to the 1st block, and without a greater serai.latest_finalized_block_number() - // there will nothing to iterate, returning false as in "did not progress" - Test::assert_task_run_iteration_and_check_progress(&mut task, false).await; - test.assert_global_db_is_clear_after_block(0u64); -} + // The task has now consumed everything up to latest_finalized. Record that height. + let height_after_first_run = serai.latest_finalized_block_number().await.unwrap(); -#[tokio::test] -async fn intend_task_returns_true_with_linear_blocks() { - let mut test = IntendTest::default(); + // Second run: no new blocks beyond what was just processed, progress = false + TaskTest::task_runs_once_and_matches_progress(&mut task, false).await; - test.serai.make_block(1); - test.serai.make_block(2); - test.serai.make_block(3); + // Wait for at least 3 new finalized blocks beyond the first run's height + let target = height_after_first_run + 3; + serai_test_harness::wait_for_blocks(&serai, target, Duration::from_secs(60)).await; - let mut task = test.into_task(); - - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - test.assert_task_iterations_with_no_events_ran(1, 3); -} - -#[tokio::test] -async fn intend_task_errors_if_chain_is_not_linear() { - let mut test = IntendTest::default(); - - test.serai.make_block(1); - - // Capture builds_upon after block 1 (before block 2 modifies it) - let builds_upon_after_block_1 = test.serai.builds_upon.clone(); - - // Block #2 does not build upon block #1 - test.serai.builds_upon = IncrementalUnbalancedMerkleTree::new(); - - test.serai.make_block(2); - - let mut task = test.into_task(); - - Test::assert_task_run_and_failed_with(&mut task, "doesn't build upon").await; - - test.assert_task_iterations_with_no_events_failed_at(2, &builds_upon_after_block_1); - - // Now fix the chain: remove the broken block 2 and recreate it properly - test.serai.blocks_by_number.remove(&2); - test.serai.builds_upon = builds_upon_after_block_1; - - test.serai.make_block(2); - - let mut task = test.into_task(); - - // Re-run the task, block 2 properly builds upon block 1 - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - // block 1 was already asserted and cleared from queue, assert only block 2 now - test.assert_task_iteration_per_block_with_no_events_ran(2); -} - -#[tokio::test] -async fn intend_task_errors_if_block_not_found() { - let mut test = IntendTest::default(); - - test.serai.make_block(1); - - // Capture builds_upon after block 1 - let builds_upon_after_block_1 = test.serai.builds_upon.clone(); - - // Block 2 exists in terms of finalization, but returns None when fetched - test.serai.make_block(2); - test.serai.set_block_not_found(2); - - let mut task = test.into_task(); - Test::assert_task_run_and_failed_with( - &mut task, - "couldn't get block which should've been finalized", - ) + // Third run: processes multiple new blocks, progress = true + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + }) .await; - - test.assert_task_iterations_with_no_events_failed_at(2, &builds_upon_after_block_1); - - test.serai.missing_blocks.remove(&2); - - let mut task = test.into_task(); - - // Re-run the task, block 2 now fetched and processed - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - test.assert_task_iteration_per_block_with_no_events_ran(2); -} - -#[tokio::test] -async fn intend_task_handles_rpc_error_on_block_fetch() { - let mut test = IntendTest::default(); - - test.serai.make_block(1); - - // Capture builds_upon after block 1 - let builds_upon_after_block_1 = test.serai.builds_upon.clone(); - - // Block 2 exists in terms of finalization, but fetching it returns an error - test.serai.make_block(2); - test.serai.set_block_error(2, "connection refused"); - - let mut task = test.into_task(); - Test::assert_task_run_and_failed_with(&mut task, "RPC error fetching block").await; - - test.assert_task_iterations_with_no_events_failed_at(2, &builds_upon_after_block_1); - - test.serai.block_by_number_error.remove(&2); - - let mut task = test.into_task(); - - // Re-run the task, block 2 now fetched and processed - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - test.assert_task_iteration_per_block_with_no_events_ran(2); -} - -#[tokio::test] -async fn intend_task_handles_rpc_error_on_events_fetch() { - let mut test = IntendTest::default(); - - test.serai.make_block(1); - - // Capture builds_upon after block 1 - let builds_upon_after_block_1 = test.serai.builds_upon.clone(); - - // Block 2 exists in terms of finalization, but fetching it returns an event error - let block2_hash = test.serai.make_block(2); - test.serai.set_events_error(block2_hash, "timeout"); - - let mut task = test.into_task(); - Test::assert_task_run_and_failed_with(&mut task, "RPC error fetching events").await; - - test.assert_task_iterations_with_no_events_failed_at(2, &builds_upon_after_block_1); - - test.serai.events_error.remove(&block2_hash); - - let mut task = test.into_task(); - - // Re-run the task, block 2 now fetched and processed - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - test.assert_task_iteration_per_block_with_no_events_ran(2); } -#[tokio::test] -async fn intend_task_handles_rpc_error_on_latest_finalized() { - let mut test = IntendTest::default(); +mod errors { + use super::*; - test.serai.make_block(1); - test.serai.set_latest_finalized_error("network error"); + #[tokio::test] + async fn errors_if_chain_is_not_linear() { + let (node, mut test) = setup_mock_test().await; - let mut task = test.into_task(); - Test::assert_task_run_and_failed_with(&mut task, "RPC error fetching latest finalized").await; + node.make_block(0, vec![]).await; + node.make_block(1, vec![]).await; - test.serai.latest_finalized_error = None; + let builds_upon_after_block_1 = node.builds_upon().await; + node.make_non_linear_block(2, vec![]).await; - let mut task = test.into_task(); - - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - test.assert_task_iteration_per_block_with_no_events_ran(1); -} - -#[tokio::test] -async fn intend_task_handles_allocation_events() { - let mut test = IntendTest::default(); + let mut task = test.into_task(); - let validator1 = SeraiAddress([0x01; 32]); - let validator2 = SeraiAddress([0x02; 32]); + TaskTest::assert_task_run_and_failed_with(&mut task, "doesn't build upon").await; - let allocations_block1 = [ - (validator1, ExternalNetworkId::Bitcoin, 50), - (validator1, ExternalNetworkId::Bitcoin, 100), - (validator2, ExternalNetworkId::Bitcoin, 200), - (validator1, ExternalNetworkId::Ethereum, 150), - ]; - let block1_hash = test.serai.make_block(1); - test.serai.set_events(block1_hash, events_from_allocations(&allocations_block1)); - - let allocations_block2 = - [(validator2, ExternalNetworkId::Ethereum, 75), (validator1, ExternalNetworkId::Bitcoin, 25)]; - - let block2_hash = test.serai.make_block(2); - test.serai.set_events(block2_hash, events_from_allocations(&allocations_block2)); - - let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - - let all_allocations: Vec<_> = - allocations_block1.iter().chain(allocations_block2.iter()).copied().collect(); - - test.assert_stakes_from_allocations_is_expected(&all_allocations); - - // Both blocks have only allocation events which are a HasEvents::No - // not HasEvents::Notable neither HasEvents::NonNotable - test.assert_task_iterations_with_no_events_ran(1, 2); -} - -#[tokio::test] -#[should_panic(expected = "no prior existing stake")] -async fn intend_task_handles_deallocation_without_prior_allocation() { - let mut test = IntendTest::default(); - - let validator = SeraiAddress([0x01; 32]); - - let block1_hash = test.serai.make_block(1); - test.serai.set_events( - block1_hash, - // Deallocate without any prior allocation should panic - vec![deallocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 100)], - ); - - let mut task = test.into_task(); - task.run_iteration().await.unwrap(); -} - -#[tokio::test] -async fn intend_task_handles_deallocation_event() { - let mut test = IntendTest::default(); - - let validator = SeraiAddress([0x01; 32]); - - let block1_hash = test.serai.make_block(1); - test.serai.set_events( - block1_hash, - vec![ - allocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 100), - deallocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 30), - ], - ); - - // Create task after all blocks are set up - let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - - test.assert_stakes_is_expected(ExternalNetworkId::Bitcoin, validator, Some(Amount(70))); - test.assert_task_iteration_per_block_with_no_events_ran(1); -} - -#[tokio::test] -async fn intend_task_handles_set_decided_event_with_empty_validators() { - let mut test = IntendTest::default(); - - let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - let vset0 = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; - - let block1_hash = test.serai.make_block(1); - test.serai.set_events(block1_hash, vec![set_decided_event(vset0, vec![])]); - - let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - - // Verify that an empty validators vec results in no validators being stored - test.assert_validators_is_expected(set0, None); - - // SetDecided is a HasEvents::No type - test.assert_task_iteration_per_block_with_no_events_ran(1); -} - -#[tokio::test] -async fn intend_task_handles_set_decided_event() { - let mut test = IntendTest::default(); - - let validator1 = SeraiAddress([0x01; 32]); - let validator2 = SeraiAddress([0x02; 32]); - let validator3 = SeraiAddress([0x03; 32]); - - let set0_btc = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - let vset0_btc = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; - let set0_eth = ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }; - let vset0_eth = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Ethereum), session: Session(0) }; - - // Block 1: SetDecided for Bitcoin - let block1_hash = test.serai.make_block(1); - test.serai.set_events( - block1_hash, - vec![set_decided_event( - vset0_btc, - vec![ - (validator1, KeyShares::ONE), - (validator2, KeyShares::try_from(2).unwrap()), - (validator3, KeyShares::try_from(3).unwrap()), - ], - )], - ); - - // Block 2: SetDecided for Ethereum with different validators - let block2_hash = test.serai.make_block(2); - test.serai.set_events( - block2_hash, - vec![set_decided_event( - vset0_eth, - vec![ - (validator1, KeyShares::try_from(2).unwrap()), - (validator2, KeyShares::try_from(3).unwrap()), - ], - )], - ); - - let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - - // Verify validators are stored for each set - test.assert_validators_is_expected(set0_btc, Some(vec![validator1, validator2, validator3])); - test.assert_validators_is_expected(set0_eth, Some(vec![validator1, validator2])); - - // SetDecided is a HasEvents::No type, not HasEvents::Notable neither HasEvents::NonNotable - test.assert_task_iterations_with_no_events_ran(1, 2); -} - -#[tokio::test] -async fn intend_task_handles_set_keys_without_set_decided() { - let mut test = IntendTest::default(); - - let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - - // Block 1: SetKeys without prior SetDecided should error - let block1_hash = test.serai.make_block(1); - test.serai.set_events(block1_hash, vec![set_keys_event(set0)]); - - let mut task = test.into_task(); - Test::assert_task_run_and_failed_with(&mut task, "set which wasn't decided set keys").await; - - // No state should be recorded since the operation failed - test.assert_global_db_is_clear_after_block(1); -} - -#[tokio::test] -async fn intend_task_handles_set_keys_event() { - let mut test = IntendTest::default(); - - let validator1 = SeraiAddress([0x01; 32]); - let validator2 = SeraiAddress([0x02; 32]); - - let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - let vset0 = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; - let set1 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(1) }; - let vset1 = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(1) }; - - // Block 1: First SetKeys (creates session 0) - let block1_hash = test.serai.make_block(1); - test.serai.set_events( - block1_hash, - vec![ - allocation_event(validator1, NetworkId::External(ExternalNetworkId::Bitcoin), 100), - allocation_event(validator2, NetworkId::External(ExternalNetworkId::Bitcoin), 200), - set_decided_event( - vset0, - vec![(validator1, KeyShares::ONE), (validator2, KeyShares::try_from(2).unwrap())], - ), - set_keys_event(set0), - ], - ); - - // Block 2: Second SetKeys (creates session 1) - let block2_hash = test.serai.make_block(2); - test.serai.set_events( - block2_hash, - vec![ - set_decided_event( - vset1, - vec![(validator1, KeyShares::try_from(2).unwrap()), (validator2, KeyShares::ONE)], - ), - set_keys_event(set1), - ], - ); - - let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - - let expected_set = Set { session: Session(1), key: Public([0xff; 32]), stake: Amount(300) }; - test.assert_latest_set_is_expected(ExternalNetworkId::Bitcoin, Some(&expected_set)); + // Consume block 0's channel entry before checking block 1 + test.assert_block_events_is_expected(BlockEventData { + block_number: 0, + has_events: HasEvents::No, + }); + test.assert_task_iterations_with_no_events_failed_at(2, &builds_upon_after_block_1); - test.assert_validators_is_expected(set0, None); - test.assert_validators_is_expected(set1, None); + // Now fix the chain: remove the broken block 2 and recreate it properly + node.remove_block(2).await; + node.make_block(2, vec![]).await; - // Block 1: First notable block (no prior session) -> HasEvents::No - let (session0_id, _) = test.assert_task_iteration_per_block_with_notable_events_ran(1, None); + let mut task = test.into_task(); - // Block 2: Second notable block (prior session exists) -> HasEvents::Notable - test.assert_task_iteration_per_block_with_notable_events_ran(2, Some(session0_id)); + // Re-run the task, block 2 properly builds upon block 1 + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + // block 1 was already asserted and cleared from queue, assert only block 2 now + let builds_upon_after_block_2 = node.builds_upon().await; + test.assert_task_iteration_per_block_with_no_events_ran(2, &builds_upon_after_block_2); + } - test.assert_scan_cosign_from_is_expected(3); -} + #[tokio::test] + async fn errors_if_block_not_found() { + let (node, mut test) = setup_mock_test().await; -#[tokio::test] -async fn intend_task_handles_set_keys_event_error_if_notable_block_has_no_stake() { - let mut test = IntendTest::default(); - - let validator1 = SeraiAddress([0x01; 32]); - let validator2 = SeraiAddress([0x02; 32]); - - let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - let vset0 = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; - let set1 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(1) }; - let vset1 = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(1) }; - - // Block 1: Normal notable block with allocations - let block1_hash = test.serai.make_block(1); - test.serai.set_events( - block1_hash, - vec![ - allocation_event(validator1, NetworkId::External(ExternalNetworkId::Bitcoin), 100), - set_decided_event(vset0, vec![(validator1, KeyShares::ONE)]), - set_keys_event(set0), - ], - ); - - // Block 2: SetDecided and SetKeys for new session with validator2 who has no allocations -> 0 stake - let block2_hash = test.serai.make_block(2); - test.serai.set_events( - block2_hash, - vec![set_decided_event(vset1, vec![(validator2, KeyShares::ONE)]), set_keys_event(set1)], - ); - - let mut task = test.into_task(); - Test::assert_task_run_and_failed_with(&mut task, "had 0 stake").await; -} + node.make_block(0, vec![]).await; + node.make_block(1, vec![]).await; -#[tokio::test] -async fn intend_task_handles_burn_with_instruction_events() { - let mut test = IntendTest::default(); - - let validator1 = SeraiAddress([0x01; 32]); - let validator2 = SeraiAddress([0x02; 32]); - - let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - let vset0 = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; - - // Block 1: Create a session (first notable block, treated as No because no prior session) - let allocations_block1 = - [(validator1, ExternalNetworkId::Bitcoin, 100), (validator2, ExternalNetworkId::Bitcoin, 200)]; - let block1_hash = test.serai.make_block(1); - let mut events = events_from_allocations(&allocations_block1); - events.push(set_decided_event( - vset0, - vec![(validator1, KeyShares::ONE), (validator2, KeyShares::try_from(2).unwrap())], - )); - events.push(set_keys_event(set0)); - test.serai.set_events(block1_hash, events); - - // Block 2: Burn event makes block NonNotable (with additional allocations) - let allocations_block2 = [(validator1, ExternalNetworkId::Bitcoin, 50)]; - let block2_hash = test.serai.make_block(2); - let mut events2 = events_from_allocations(&allocations_block2); - events2.push(burn_with_instruction_event(validator1)); - test.serai.set_events(block2_hash, events2); - - let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - - test.assert_stakes_is_expected(ExternalNetworkId::Bitcoin, validator1, Some(Amount(150))); - test.assert_stakes_is_expected(ExternalNetworkId::Bitcoin, validator2, Some(Amount(200))); - - // Block 1: First notable block (no prior session, treated as No) - test.assert_task_iteration_per_block(1); - test - .assert_block_events_is_expected(BlockEventData { block_number: 1, has_events: HasEvents::No }); - - // Block 2: NonNotable (has burn event, session exists from block 1) - test.assert_task_iteration_per_block_with_non_notable_events_ran(2); -} + // Capture builds_upon after block 1 + let builds_upon_after_block_1 = node.builds_upon().await; -#[tokio::test] -async fn intend_task_handles_ignore_non_validator_sets_events() { - let mut test = IntendTest::default(); - - let vset0 = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; - let vset1 = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Ethereum), session: Session(0) }; - - // Block 1: Signals event (outer _ => continue) and AcceptedHandover (inner _ => continue) - let block1_hash = test.serai.make_block(1); - test.serai.set_events( - block1_hash, - vec![ - Event::Signals(signals::Event::NetworkHalted { network: ExternalNetworkId::Bitcoin }), - Event::ValidatorSets(validator_sets::Event::AcceptedHandover { set: vset0 }), - ], - ); - - // Block 2: More ignored events on different network - let block2_hash = test.serai.make_block(2); - test.serai.set_events( - block2_hash, - vec![ - Event::Signals(signals::Event::NetworkHalted { network: ExternalNetworkId::Ethereum }), - Event::ValidatorSets(validator_sets::Event::AcceptedHandover { set: vset1 }), - ], - ); - - let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - - // Both blocks have only ignored events -> HasEvents::No - test.assert_task_iterations_with_no_events_ran(1, 2); -} + // Block 2 exists in terms of finalization, but returns None when fetched + node.make_block(2, vec![]).await; + node.set_block_missing(2).await; -#[tokio::test] -async fn intend_task_handles_ignore_non_burn_with_instruction_coins_events() { - let mut test = IntendTest::default(); - - let validator = SeraiAddress([0x01; 32]); - - // Block 1: Mint and Transfer events (should be ignored) - let block1_hash = test.serai.make_block(1); - test.serai.set_events( - block1_hash, - vec![ - Event::Coins(coins::Event::Mint { - to: validator, - coins: Balance { coin: Coin::External(ExternalCoin::Bitcoin), amount: Amount(100) }, - }), - Event::Coins(coins::Event::Transfer { - from: validator, - to: SeraiAddress([0x02; 32]), - coins: Balance { coin: Coin::External(ExternalCoin::Bitcoin), amount: Amount(50) }, - }), - ], - ); - - // Block 2: Burn event (not BurnWithInstruction, should be ignored) - let block2_hash = test.serai.make_block(2); - test.serai.set_events( - block2_hash, - vec![Event::Coins(coins::Event::Burn { - from: validator, - coins: Balance { coin: Coin::External(ExternalCoin::Bitcoin), amount: Amount(50) }, - })], - ); - - let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; - - // All Coins events except BurnWithInstruction are ignored -> HasEvents::No - test.assert_task_iterations_with_no_events_ran(1, 2); -} + let mut task = test.into_task(); + TaskTest::assert_task_run_and_failed_with( + &mut task, + "couldn't get block which should've been finalized", + ) + .await; -#[tokio::test] -async fn intend_task_handles_ignores_serai_network_events() { - let mut test = IntendTest::default(); + test.assert_block_events_is_expected(BlockEventData { + block_number: 0, + has_events: HasEvents::No, + }); + test.assert_task_iterations_with_no_events_failed_at(2, &builds_upon_after_block_1); - let validator = SeraiAddress([0x01; 32]); + node.clear_block_missing(2).await; - let vset_serai = ValidatorSet { network: NetworkId::Serai, session: Session(0) }; + let mut task = test.into_task(); - let block1_hash = test.serai.make_block(1); - test.serai.set_events( - block1_hash, - vec![ - allocation_event(validator, NetworkId::Serai, 100), - // Can even try a greater deallocation amount, both will be ignored anyway - deallocation_event(validator, NetworkId::Serai, 150), - ], - ); + // Re-run the task, block 2 now fetched and processed + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + let builds_upon = node.builds_upon().await; + test.assert_task_iteration_per_block_with_no_events_ran(2, &builds_upon); + } - let block2_hash = test.serai.make_block(2); - test.serai.set_events( - block2_hash, - vec![set_decided_event(vset_serai, vec![(validator, KeyShares::ONE)])], - ); + #[tokio::test] + async fn handles_rpc_error_on_block_fetch() { + let (node, mut test) = setup_mock_test().await; - let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; + node.make_block(0, vec![]).await; + node.make_block(1, vec![]).await; - test.assert_task_iterations_with_no_events_ran(1, 2); -} + // Capture builds_upon after block 1 + let builds_upon_after_block_1 = node.builds_upon().await; -#[tokio::test] -async fn intend_task_handles_downgrades_events_when_no_session_available() { - let mut test = IntendTest::default(); + // Block 2 exists in terms of finalization, but fetching it returns an error + node.make_block(2, vec![]).await; + node.set_block_number_error("blockchain/block", 2, "connection refused").await; - let validator1 = SeraiAddress([0x01; 32]); - let validator2 = SeraiAddress([0x02; 32]); + let mut task = test.into_task(); + TaskTest::assert_task_run_and_failed_with(&mut task, "RPC error fetching block").await; - let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - let vset0 = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; + test.assert_block_events_is_expected(BlockEventData { + block_number: 0, + has_events: HasEvents::No, + }); + test.assert_task_iterations_with_no_events_failed_at(2, &builds_upon_after_block_1); - let block1_hash = test.serai.make_block(1); - test.serai.set_events(block1_hash, vec![burn_with_instruction_event(validator1)]); + node.clear_block_number_error("blockchain/block", 2).await; - let allocations_block2 = - [(validator1, ExternalNetworkId::Bitcoin, 100), (validator2, ExternalNetworkId::Bitcoin, 200)]; - let block2_hash = test.serai.make_block(2); - let mut events = events_from_allocations(&allocations_block2); - events.push(set_decided_event( - vset0, - vec![(validator1, KeyShares::ONE), (validator2, KeyShares::try_from(2).unwrap())], - )); - events.push(set_keys_event(set0)); - test.serai.set_events(block2_hash, events); + let mut task = test.into_task(); - let block3_hash = test.serai.make_block(3); - test.serai.set_events(block3_hash, vec![burn_with_instruction_event(validator2)]); + // Re-run the task, block 2 now fetched and processed + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + let builds_upon = node.builds_upon().await; + test.assert_task_iteration_per_block_with_no_events_ran(2, &builds_upon); + } - let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; + #[tokio::test] + async fn handles_rpc_error_on_events_fetch() { + let (node, mut test) = setup_mock_test().await; - test.assert_task_iteration_per_block(1); - test - .assert_block_events_is_expected(BlockEventData { block_number: 1, has_events: HasEvents::No }); + node.make_block(0, vec![]).await; + node.make_block(1, vec![]).await; - test.assert_task_iteration_per_block(2); - test - .assert_block_events_is_expected(BlockEventData { block_number: 2, has_events: HasEvents::No }); + // Capture builds_upon after block 1 + let builds_upon_after_block_1 = node.builds_upon().await; - test.assert_task_iteration_per_block_with_non_notable_events_ran(3); + // Block 2 exists in terms of finalization, but fetching its events returns an error + let block2_hash = node.make_block(2, vec![]).await; + node.set_block_hash_error("blockchain/events", block2_hash, "timeout").await; - test.assert_scan_cosign_from_is_expected(4); -} + let mut task = test.into_task(); + TaskTest::assert_task_run_and_failed_with(&mut task, "RPC error fetching events").await; -#[tokio::test] -async fn intend_task_handles_errors_when_global_session_not_in_database() { - use serai_db::Db as _; + test.assert_block_events_is_expected(BlockEventData { + block_number: 0, + has_events: HasEvents::No, + }); + test.assert_task_iterations_with_no_events_failed_at(2, &builds_upon_after_block_1); - let mut test = IntendTest::default(); + node.clear_block_hash_error("blockchain/events", block2_hash).await; - let validator = SeraiAddress([0x01; 32]); + let mut task = test.into_task(); - let fake_session_id = [0xAB; 32]; - { - let mut txn = test.db.txn(); - LatestGlobalSessionIntended::set(&mut txn, &fake_session_id); - txn.commit(); + // Re-run the task, block 2 events now fetched and processed + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + let builds_upon = node.builds_upon().await; + test.assert_task_iteration_per_block_with_no_events_ran(2, &builds_upon); } - let block1_hash = test.serai.make_block(1); - test.serai.set_events(block1_hash, vec![burn_with_instruction_event(validator)]); - - let mut task = test.into_task(); - Test::assert_task_run_and_failed_with(&mut task, "wasn't saved to the database").await; + #[tokio::test] + async fn errors_if_set_decided_has_empty_validators() { + let (node, test) = setup_mock_test().await; - test.assert_no_substrate_block_hash(1); - test.assert_no_scan_cosign_from(); - test.assert_no_block_events(); + // Block 0: no events + node.make_block(0, vec![]).await; - test.assert_latest_global_session_intended(Some(fake_session_id)); -} - -#[tokio::test] -async fn intend_task_handles_safeguard_prevents_cosigning_with_no_session() { - let mut test = IntendTest::default(); - - let validator1 = SeraiAddress([0x01; 32]); - let validator2 = SeraiAddress([0x02; 32]); - - let block1_hash = test.serai.make_block(1); - test.serai.set_events(block1_hash, vec![burn_with_instruction_event(validator1)]); + // Block 1: SetDecided with an external network but an empty validator list + let empty_set_decided = set_decided_event( + ValidatorSet { + network: NetworkId::External(ExternalNetworkId::Bitcoin), + session: Session(0), + }, + vec![], + ); + node.make_block(1, vec![vec![empty_set_decided]]).await; - let set0 = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - let vset0 = - ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), session: Session(0) }; + let mut task = test.into_task(); + TaskTest::assert_task_run_and_failed_with( + &mut task, + "validator set from Event::SetDecided was empty", + ) + .await; + } - let allocations = - [(validator1, ExternalNetworkId::Bitcoin, 100), (validator2, ExternalNetworkId::Bitcoin, 200)]; - let block2_hash = test.serai.make_block(2); - let mut events = events_from_allocations(&allocations); - events.push(set_decided_event(vset0, vec![(validator1, KeyShares::ONE)])); - events.push(set_keys_event(set0)); - test.serai.set_events(block2_hash, events); + #[tokio::test] + async fn handles_rpc_error_on_latest_finalized() { + let (node, mut test) = setup_mock_test().await; - let mut task = test.into_task(); - Test::assert_task_run_iteration_and_check_progress(&mut task, true).await; + node.make_block(0, vec![]).await; + node.make_block(1, vec![]).await; + node.set_error("blockchain/latest_finalized_block_number", "network error").await; - test.assert_task_iteration_per_block(1); - test - .assert_block_events_is_expected(BlockEventData { block_number: 1, has_events: HasEvents::No }); + let mut task = test.into_task(); + TaskTest::assert_task_run_and_failed_with(&mut task, "RPC error fetching latest finalized") + .await; - test.assert_task_iteration_per_block(2); - test - .assert_block_events_is_expected(BlockEventData { block_number: 2, has_events: HasEvents::No }); + node.clear_error("blockchain/latest_finalized_block_number").await; - assert!( - LatestGlobalSessionIntended::get(&test.db).is_some(), - "session should have been created by block 2" - ); + let mut task = test.into_task(); - test.assert_scan_cosign_from_is_expected(3); + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + test.assert_block_events_is_expected(BlockEventData { + block_number: 0, + has_events: HasEvents::No, + }); + let builds_upon = node.builds_upon().await; + test.assert_task_iteration_per_block_with_no_events_ran(1, &builds_upon); + } } diff --git a/coordinator/cosign/src/tests/mod.rs b/coordinator/cosign/src/tests/mod.rs index da257d3cd..6cbfdd327 100644 --- a/coordinator/cosign/src/tests/mod.rs +++ b/coordinator/cosign/src/tests/mod.rs @@ -17,7 +17,10 @@ use std::{ }, }; -use serai_task::{ContinuallyRan, Task, TaskHandle}; +pub(crate) use serai_test_task::{IntoTask, TaskTest}; + +pub(crate) static SERAI_NODE_LOCK: std::sync::LazyLock> = + std::sync::LazyLock::new(|| tokio::sync::Mutex::new(())); use crate::RequestNotableCosigns; @@ -103,40 +106,6 @@ macro_rules! wait_until { } pub(crate) use wait_until; -pub(crate) struct Test; -impl Test { - pub(crate) async fn assert_task_run_iteration_and_check_progress( - task: &mut impl ContinuallyRan, - made_progress: bool, - ) { - assert_eq!(task.run_iteration().await.unwrap(), made_progress); - } - - pub(crate) async fn assert_task_run_and_failed_with(task: &mut impl ContinuallyRan, error: &str) { - let err = task.run_iteration().await.unwrap_err(); - let err_str = format!("{err:?}"); - assert!(err_str.contains(error), "{err_str}"); - } - - /// Spawns a task to run continuously in the background, returning its handle. - /// - /// This allows testing a task while it runs as expected (with the full `continually_run` - /// loop including delays and error handling). Drop the returned `TaskHandle` to stop the task. - pub fn spawn_task_continually_running( - task_runner: T, - dependents: Vec, - ) -> TaskHandle { - let (task, task_handle) = Task::new(); - tokio::spawn(task_runner.continually_run(task, dependents)); - task_handle - } -} - -pub(crate) trait IntoTask { - type Task: ContinuallyRan + 'static; - fn into_task(&self) -> Self::Task; -} - #[derive(Clone)] pub(crate) struct TestRequest { pub(crate) calls: Arc, diff --git a/substrate/abi/src/modules/validator_sets.rs b/substrate/abi/src/modules/validator_sets.rs index c2501e130..21e6acd17 100644 --- a/substrate/abi/src/modules/validator_sets.rs +++ b/substrate/abi/src/modules/validator_sets.rs @@ -12,8 +12,6 @@ use serai_primitives::{ validator_sets::*, }; -pub use serai_primitives::validator_sets::DeallocationTimeline; - /// Slash(es) to occur on-chain. #[derive(Clone, PartialEq, Eq, Debug)] pub enum Slashes { diff --git a/substrate/client/serai/src/lib.rs b/substrate/client/serai/src/lib.rs index 667282c48..f507554cc 100644 --- a/substrate/client/serai/src/lib.rs +++ b/substrate/client/serai/src/lib.rs @@ -171,13 +171,13 @@ impl Serai { self .call( "blockchain/publish_transaction", - &format!(r#"{{ "transaction": {} }}"#, hex::encode(borsh::to_vec(transaction).unwrap())), + &format!(r#"{{ "transaction": "{}" }}"#, hex::encode(borsh::to_vec(transaction).unwrap())), ) .await } /// Fetch the events of a specific block. - pub async fn events(&self, block: BlockHash) -> Result { + pub async fn events(&self, block: &BlockHash) -> Result { Ok(Events { events: Arc::new( self @@ -228,23 +228,7 @@ impl Serai { } } -impl Default for Events { - fn default() -> Self { - Events { events: Arc::new(vec![vec![]]) } - } -} - impl Events { - /// Create an instance of Events - pub fn new() -> Self { - Events::default() - } - - /// Create an instance of Events - pub fn with(events: Vec) -> Self { - Events { events: Arc::new(vec![events]) } - } - /// The events within this container. /// /// This will yield the events for each transaction within the block, including the implicit diff --git a/substrate/primitives/src/lib.rs b/substrate/primitives/src/lib.rs index 12b56fe17..72b36d2d0 100644 --- a/substrate/primitives/src/lib.rs +++ b/substrate/primitives/src/lib.rs @@ -75,7 +75,7 @@ impl From for BlockNumber { level so this is fine for our use-case. If we do ever see a 64-byte block hash, we can simply hash it into a 32-byte hash or truncate it. */ -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)] pub struct BlockHash(pub [u8; 32]); #[cfg(feature = "scale")] crate::borsh_as_scale!(BlockHash); From eb7eb2d98d558762545288dda415cef90f8a6350 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Mon, 23 Feb 2026 17:04:53 -0300 Subject: [PATCH 21/71] feat(cosign): skip 0 stake vsets --- Cargo.lock | 510 ++++++++++++++++--------------- audits/crypto/dkg/evrf/README.md | 2 +- common/log/Cargo.toml | 21 ++ common/log/LICENSE | 21 ++ common/log/src/lib.rs | 64 ++++ coordinator/cosign/src/intend.rs | 99 +++--- 6 files changed, 403 insertions(+), 314 deletions(-) create mode 100644 common/log/Cargo.toml create mode 100644 common/log/LICENSE create mode 100644 common/log/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 2b95efe92..c367be33d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -103,9 +103,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.6.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86debde32d8dbb0ab29e7cc75ae1a98688ac7a4c9da54b3a9b14593b9b3c46d3" +checksum = "b0c0dc44157867da82c469c13186015b86abef209bf0e41625e4b68bac61d728" dependencies = [ "alloy-eips", "alloy-primitives", @@ -130,9 +130,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.6.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d6cb2e7efd385b333f5a77b71baaa2605f7e22f1d583f2879543b54cbce777c" +checksum = "ba4cdb42df3871cd6b346d6a938ec2ba69a9a0f49d1f82714bc5c48349268434" dependencies = [ "alloy-consensus", "alloy-eips", @@ -144,9 +144,9 @@ dependencies = [ [[package]] name = "alloy-core" -version = "1.5.4" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcfbc46fa201350bf859add798d818bbe68b84882a8af832e4433791d28a975d" +checksum = "23e8604b0c092fabc80d075ede181c9b9e596249c70b99253082d7e689836529" dependencies = [ "alloy-primitives", ] @@ -195,9 +195,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.6.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be47bf1b91674a5f394b9ed3c691d764fb58ba43937f1371550ff4bc8e59c295" +checksum = "b9f7ef09f21bd1e9cb8a686f168cb4a206646804567f0889eadb8dcc4c9288c8" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -219,9 +219,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.6.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a59f6f520c323111650d319451de1edb1e32760029a468105b9d7b0f7c11bdf2" +checksum = "7c9cf3b99f46615fbf7dc1add0c96553abb7bf88fc9ec70dfbe7ad0b47ba7fe8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -245,9 +245,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "1.5.4" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8708475665cc00e081c085886e68eada2f64cfa08fc668213a9231655093d4de" +checksum = "e9dbe713da0c737d9e5e387b0ba790eb98b14dd207fe53eef50e19a5a8ec3dac" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -257,9 +257,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.6.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a24c81a56d684f525cd1c012619815ad3a1dd13b0238f069356795d84647d3c" +checksum = "ff42cd777eea61f370c0b10f2648a1c81e0b783066cd7269228aa993afd487f7" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -272,9 +272,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.6.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "786c5b3ad530eaf43cda450f973fe7fb1c127b4c8990adf66709dafca25e3f6f" +checksum = "8cbca04f9b410fdc51aaaf88433cbac761213905a65fe832058bcf6690585762" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -298,9 +298,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.6.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1ed40adf21ae4be786ef5eb62db9c692f6a30f86d34452ca3f849d6390ce319" +checksum = "42d6d15e069a8b11f56bef2eccbad2a873c6dd4d4c81d04dda29710f5ea52f04" dependencies = [ "alloy-consensus", "alloy-eips", @@ -311,9 +311,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "1.6.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0f15cffc63c421d33d77f6d30d7d18ef090a44ecf32205f5bfa12d4e565360a" +checksum = "091dc8117d84de3a9ac7ec97f2c4d83987e24d485b478d26aa1ec455d7d52f7d" dependencies = [ "alloy-genesis", "alloy-hardforks", @@ -333,9 +333,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "1.5.4" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b88cf92ed20685979ed1d8472422f0c6c2d010cec77caf63aaa7669cc1a7bc2" +checksum = "de3b431b4e72cd8bd0ec7a50b4be18e73dab74de0dba180eef171055e5d5926e" dependencies = [ "alloy-rlp", "bytes", @@ -360,9 +360,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.6.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3ca4c15818be7ac86208aff3a91b951d14c24e1426e66624e75f2215ba5e2cc" +checksum = "d181c8cc7cf4805d7e589bf4074d56d55064fa1a979f005a45a62b047616d870" dependencies = [ "alloy-chains", "alloy-consensus", @@ -415,14 +415,14 @@ checksum = "ce8849c74c9ca0f5a03da1c865e3eb6f768df816e67dd3721a398a8a7e398011" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "alloy-rpc-client" -version = "1.6.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abe0addad5b8197e851062b49dc47157444bced173b601d91e3f9b561a060a50" +checksum = "f2792758a93ae32a32e9047c843d536e1448044f78422d71bf7d7c05149e103f" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -440,9 +440,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.6.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0e98aabb013a71a4b67b52825f7b503e5bb6057fb3b7b2290d514b0b0574b57" +checksum = "dd720b63f82b457610f2eaaf1f32edf44efffe03ae25d537632e7d23e7929e1a" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -451,9 +451,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "1.6.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a1dd760b6a798ee045ab6a7bbd1a02ad8bd6a64d8e18d6e41732f4fc4a4fe5c" +checksum = "e1b21e1ad18ff1b31ff1030e046462ab8168cf8894e6778cd805c8bdfe2bd649" dependencies = [ "alloy-primitives", "derive_more 2.0.1", @@ -463,9 +463,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.6.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5899af8417dcf89f40f88fa3bdb2f3f172605d8e167234311ee34811bbfdb0bf" +checksum = "9b2dc411f13092f237d2bf6918caf80977fc2f51485f9b90cb2a2f956912c8c9" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -484,9 +484,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.6.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "410a80e9ac786a2d885adfd7da3568e8f392da106cb5432f00eb4787689d281a" +checksum = "1ad79f1e27e161943b5a4f99fe5534ef0849876214be411e0032c12f38e94daa" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -498,9 +498,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.6.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feb73325ee881e42972a5a7bc85250f6af89f92c6ad1222285f74384a203abeb" +checksum = "e2ce1e0dbf7720eee747700e300c99aac01b1a95bb93f493a01e78ee28bb1a37" dependencies = [ "alloy-primitives", "serde", @@ -509,9 +509,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.6.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bea4c8f30eddb11d7ab56e83e49c814655daa78ca708df26c300c10d0189cbc" +checksum = "2425c6f314522c78e8198979c8cbf6769362be4da381d4152ea8eefce383535d" dependencies = [ "alloy-primitives", "async-trait", @@ -524,9 +524,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.6.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28bd71507db58477151a6fe6988fa62a4b778df0f166c3e3e1ef11d059fe5fa" +checksum = "c3ecb71ee53d8d9c3fa7bac17542c8116ebc7a9726c91b1bf333ec3d04f5a789" dependencies = [ "alloy-consensus", "alloy-network", @@ -551,23 +551,23 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "1.5.4" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5fa1ca7e617c634d2bd9fa71f9ec8e47c07106e248b9fcbd3eaddc13cabd625" +checksum = "ab81bab693da9bb79f7a95b64b394718259fdd7e41dceeced4cad57cb71c4f6a" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "alloy-sol-macro-expander" -version = "1.5.4" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27c00c0c3a75150a9dc7c8c679ca21853a137888b4e1c5569f92d7e2b15b5102" +checksum = "489f1620bb7e2483fb5819ed01ab6edc1d2f93939dce35a5695085a1afd1d699" dependencies = [ "alloy-sol-macro-input", "const-hex", @@ -577,15 +577,15 @@ dependencies = [ "proc-macro2", "quote", "sha3 0.10.8", - "syn 2.0.114", + "syn 2.0.117", "syn-solidity", ] [[package]] name = "alloy-sol-macro-input" -version = "1.5.4" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "297db260eb4d67c105f68d6ba11b8874eec681caec5505eab8fbebee97f790bc" +checksum = "56cef806ad22d4392c5fc83cf8f2089f988eb99c7067b4e0c6f1971fc1cca318" dependencies = [ "const-hex", "dunce", @@ -593,15 +593,15 @@ dependencies = [ "macro-string", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "1.5.4" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94b91b13181d3bcd23680fd29d7bc861d1f33fbe90fdd0af67162434aeba902d" +checksum = "a6df77fea9d6a2a75c0ef8d2acbdfd92286cc599983d3175ccdc170d3433d249" dependencies = [ "serde", "winnow", @@ -609,9 +609,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "1.5.4" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc442cc2a75207b708d481314098a0f8b6f7b58e3148dd8d8cc7407b0d6f9385" +checksum = "64612d29379782a5dde6f4b6570d9c756d734d760c0c94c254d361e678a6591f" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -621,9 +621,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.6.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b321f506bd67a434aae8e8a7dfe5373bf66137c149a5f09c9e7dfb0ca43d7c91" +checksum = "fa186e560d523d196580c48bf00f1bf62e63041f28ecf276acc22f8b27bb9f53" dependencies = [ "alloy-json-rpc", "auto_impl", @@ -661,14 +661,14 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.6.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a91d6b4c2f6574fdbcb1611e460455c326667cf5b805c6bd1640dad8e8ee4d2" +checksum = "6fa0c53e8c1e1ef4d01066b01c737fb62fc9397ab52c6e7bb5669f97d281b9bc" dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -732,9 +732,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.101" +version = "1.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e0fee31ef5ed1ba1316088939cea399010ed7731dba877ed44aeb407a75ea" +checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" [[package]] name = "approx" @@ -830,7 +830,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" dependencies = [ "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -843,7 +843,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -882,7 +882,7 @@ checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -940,7 +940,7 @@ checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "synstructure 0.13.2", ] @@ -952,7 +952,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1004,7 +1004,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1015,7 +1015,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1067,7 +1067,7 @@ checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1137,7 +1137,7 @@ version = "0.72.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "cexpr", "clang-sys", "itertools 0.13.0", @@ -1146,7 +1146,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1253,9 +1253,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" +checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af" [[package]] name = "bitvec" @@ -1284,7 +1284,7 @@ version = "0.11.0-rc.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d52965399b470437fc7f4d4b51134668dbc96573fea6f1b83318a420e4605745" dependencies = [ - "digest 0.11.0-rc.11", + "digest 0.11.0", ] [[package]] @@ -1381,7 +1381,7 @@ dependencies = [ "proc-macro-crate 3.4.0", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1411,9 +1411,9 @@ version = "0.1.1" [[package]] name = "bumpalo" -version = "3.19.1" +version = "3.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" +checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb" dependencies = [ "allocator-api2", ] @@ -1461,9 +1461,9 @@ version = "2.99.99" [[package]] name = "cc" -version = "1.2.55" +version = "1.2.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47b26a0954ae34af09b50f0de26458fa95369a0d478d8236d3f93082b219bd29" +checksum = "aebf35691d1bfb0ac386a69bac2fde4dd276fb618cf8bf4f5318fe285e821bb2" dependencies = [ "find-msvc-tools", "jobserver", @@ -1554,7 +1554,7 @@ dependencies = [ name = "ciphersuite" version = "0.4.2" dependencies = [ - "digest 0.11.0-rc.11", + "digest 0.11.0", "ff", "ff-group-tests", "group", @@ -1600,9 +1600,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.57" +version = "4.5.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6899ea499e3fb9305a65d5ebf6e3d2248c5fab291f300ad0a704fbe142eae31a" +checksum = "2797f34da339ce31042b27d23607e051786132987f595b02ba4f6a6dffb7030a" dependencies = [ "clap_builder", "clap_derive", @@ -1610,9 +1610,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.57" +version = "4.5.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b12c8b680195a62a8364d16b8447b01b6c2c8f9aaf68bee653be34d4245e238" +checksum = "24a241312cea5059b13574bb9b3861cabf758b879c15190b37b6d6fd63ab6876" dependencies = [ "anstream", "anstyle", @@ -1630,14 +1630,20 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "clap_lex" -version = "0.7.7" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831" + +[[package]] +name = "cmov" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3e64b0cc0439b12df2fa678eae89a1c56a529fd067a9115f7827f1fffd22b32" +checksum = "de0758edba32d61d1fd9f4d69491b47604b91ee2f7e6b33de7e54ca4ebe55dc3" [[package]] name = "cobs" @@ -2050,6 +2056,15 @@ dependencies = [ "cipher", ] +[[package]] +name = "ctutils" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1005a6d4446f5120ef475ad3d2af2b30c49c2c9c6904258e3bb30219bebed5e4" +dependencies = [ + "cmov", +] + [[package]] name = "curve25519-dalek" version = "4.1.3" @@ -2077,7 +2092,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2135,7 +2150,7 @@ dependencies = [ "quote", "serde", "strsim", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2146,7 +2161,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2186,7 +2201,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ab67060fc6b8ef687992d439ca0fa36e7ed17e9a0b16b25b601e8757df720de" dependencies = [ "data-encoding", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2215,9 +2230,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" +checksum = "2163a0e204a148662b6b6816d4b5d5668a5f2f8df498ccbd5cd0e864e78fecba" dependencies = [ "powerfmt", "serde_core", @@ -2231,7 +2246,7 @@ checksum = "d65d7ce8132b7c0e54497a4d9a55a1c2a0912a0d786cf894472ba818fba45762" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2242,7 +2257,7 @@ checksum = "ef941ded77d15ca19b40374869ac6000af1c9f2a4c0f3d4c70926287e6364a8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2271,7 +2286,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2282,7 +2297,7 @@ checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "unicode-xid", ] @@ -2300,13 +2315,13 @@ dependencies = [ [[package]] name = "digest" -version = "0.11.0-rc.11" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b42f1d9edf5207c137646b568a0168ca0ec25b7f9eaf7f9961da51a3d91cea" +checksum = "f8bf3682cdec91817be507e4aa104314898b95b84d74f3d43882210101a545b6" dependencies = [ "block-buffer 0.11.0", "crypto-common 0.2.0", - "subtle", + "ctutils", "zeroize", ] @@ -2359,7 +2374,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2540,7 +2555,7 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2618,7 +2633,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2638,16 +2653,7 @@ checksum = "8ca9601fb2d62598ee17836250842873a413586e5d7ed88b356e38ddbb0ec631" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", -] - -[[package]] -name = "env_filter" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bf3c259d255ca70051b30e2e95b5446cdb8949ac4cd22c0d7fd634d89f568e2" -dependencies = [ - "log", + "syn 2.0.117", ] [[package]] @@ -2660,16 +2666,6 @@ dependencies = [ "log", ] -[[package]] -name = "env_logger" -version = "0.11.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c863f0904021b108aa8b2f55046443e6b1ebde8fd4a15c399893aae4fa069f" -dependencies = [ - "env_filter", - "log", -] - [[package]] name = "environmental" version = "1.1.4" @@ -2753,7 +2749,7 @@ dependencies = [ "fs-err", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2860,7 +2856,7 @@ name = "flexible-transcript" version = "0.3.4" dependencies = [ "blake2 0.11.0-rc.5", - "digest 0.11.0-rc.11", + "digest 0.11.0", "merlin", "sha2 0.11.0-rc.5", "zeroize", @@ -2997,7 +2993,7 @@ dependencies = [ "proc-macro2", "quote", "sp-crypto-hashing", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3009,7 +3005,7 @@ dependencies = [ "proc-macro-crate 3.4.0", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3019,7 +3015,7 @@ source = "git+https://github.com/serai-dex/patch-polkadot-sdk#55a9d2cf03623408ee dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3090,9 +3086,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +checksum = "8b147ee9d1f6d097cef9ce628cd2ee62288d963e16fb287bd9286455b241382d" dependencies = [ "futures-channel", "futures-core", @@ -3115,9 +3111,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d" dependencies = [ "futures-core", "futures-sink", @@ -3125,27 +3121,26 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" +checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d" [[package]] name = "futures-executor" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +checksum = "baf29c38818342a3b26b5b923639e7b1f4a61fc5e76102d4b1981c6dc7a7579d" dependencies = [ "futures-core", "futures-task", "futures-util", - "num_cpus", ] [[package]] name = "futures-io" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" +checksum = "cecba35d7ad927e23624b22ad55235f2239cfa44fd10428eecbeba6d6a717718" [[package]] name = "futures-lite" @@ -3162,13 +3157,13 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3184,15 +3179,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" +checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893" [[package]] name = "futures-task" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" +checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393" [[package]] name = "futures-timer" @@ -3202,9 +3197,9 @@ checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6" dependencies = [ "futures-channel", "futures-core", @@ -3214,7 +3209,6 @@ dependencies = [ "futures-task", "memchr", "pin-project-lite", - "pin-utils", "slab", ] @@ -3889,7 +3883,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3997,9 +3991,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.85" +version = "0.3.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" +checksum = "93f0862381daaec758576dcc22eb7bbf4d7efd67328553f3b45a412a51a3fb21" dependencies = [ "once_cell", "wasm-bindgen", @@ -4052,7 +4046,7 @@ dependencies = [ "proc-macro-crate 3.4.0", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4109,9 +4103,9 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +checksum = "cb26cec98cce3a3d96cbb7bced3c4b16e3d13f27ec56dbd62cbc8f39cfb9d653" dependencies = [ "cpufeatures", ] @@ -4179,9 +4173,9 @@ checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" [[package]] name = "libc" -version = "0.2.180" +version = "0.2.182" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" +checksum = "6800badb6cb2082ffd7b6a67e6125bb39f18782f793520caee8cb8846be06112" [[package]] name = "libm" @@ -4530,7 +4524,7 @@ checksum = "dd297cf53f0cb3dee4d2620bb319ae47ef27c702684309f682bdb7e55a18ae9c" dependencies = [ "heck", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4625,7 +4619,7 @@ version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "libc", ] @@ -4750,7 +4744,7 @@ checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4762,7 +4756,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4776,7 +4770,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4787,7 +4781,7 @@ checksum = "31e7b9b365f39f573850b21c1e241234e29426ee8b0d6ee13637f714fad7390f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4798,7 +4792,7 @@ checksum = "54256681b01f4e5b038a619b24896f8c76d61995075909226d4e6bcf60bad525" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4847,9 +4841,9 @@ dependencies = [ [[package]] name = "memmap2" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744133e4a0e0a658e1374cf3bf8e415c4052a15a111acd372764c55b4177d490" +checksum = "714098028fe011992e1c3962653c96b2d578c4b4bce9036e15ff220319b1e0e3" dependencies = [ "libc", ] @@ -4872,7 +4866,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" dependencies = [ "byteorder", - "keccak 0.1.5", + "keccak 0.1.6", "rand_core 0.6.4", "zeroize", ] @@ -5543,7 +5537,7 @@ checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -5743,7 +5737,7 @@ dependencies = [ "proc-macro-crate 3.4.0", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -5838,7 +5832,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -5946,7 +5940,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -6050,7 +6044,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -6061,7 +6055,7 @@ checksum = "75eea531cfcd120e0851a3f8aed42c4841f78c889eefafd96339c72677ae42c3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -6107,7 +6101,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -6118,7 +6112,7 @@ checksum = "37566cb3fdacef14c0737f9546df7cfeadbfbc9fef10991038bf5015d0c80532" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.10.0", + "bitflags 2.11.0", "num-traits", "rand 0.9.2", "rand_chacha 0.9.0", @@ -6154,7 +6148,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.114", + "syn 2.0.117", "tempfile", ] @@ -6168,7 +6162,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -6200,7 +6194,7 @@ checksum = "2cf194f5b1a415ef3a44ee35056f4009092cc4038a9f7e3c7c1e392f48ee7dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -6423,7 +6417,7 @@ version = "11.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "498cd0dc59d73224351ee52a95fee0f1a617a2eae0e7d9d720cc622c73a54186" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", ] [[package]] @@ -6471,7 +6465,7 @@ version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", ] [[package]] @@ -6502,7 +6496,7 @@ checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -6724,7 +6718,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "311720d4f0f239b041375e7ddafdbd20032a33b7bae718562ea188e188ed9fd3" dependencies = [ "alloy-eip7928", - "bitflags 2.10.0", + "bitflags 2.11.0", "revm-bytecode", "revm-primitives", ] @@ -6897,7 +6891,7 @@ version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "errno", "libc", "linux-raw-sys", @@ -7752,7 +7746,7 @@ dependencies = [ "proc-macro-crate 3.4.0", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -7840,7 +7834,7 @@ dependencies = [ "proc-macro-crate 3.4.0", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -7869,7 +7863,7 @@ version = "0.5.2" dependencies = [ "ciphersuite 0.4.2", "dalek-ff-group 0.5.0", - "digest 0.11.0-rc.11", + "digest 0.11.0", "flexible-transcript", "hex", "multiexp", @@ -7970,11 +7964,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "3.5.1" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" +checksum = "b7f4bc775c73d9a02cde8bf7b2ec4c9d12743edf609006c7facc23998404cd1d" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "core-foundation 0.10.1", "core-foundation-sys", "libc", @@ -7983,9 +7977,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.15.0" +version = "2.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" +checksum = "6ce2691df843ecc5d231c0b14ece2acc3efb62c0a398c7e1d875f3983ce020e3" dependencies = [ "core-foundation-sys", "libc", @@ -8130,7 +8124,7 @@ dependencies = [ "ciphersuite 0.4.2", "dalek-ff-group 0.5.0", "dkg-musig", - "env_logger 0.10.2", + "env_logger", "frost-schnorrkel", "hex", "log", @@ -8249,7 +8243,6 @@ version = "0.1.0" dependencies = [ "blake2 0.11.0-rc.5", "borsh", - "env_logger 0.11.8", "log", "serai-client-serai", "serai-cosign-types", @@ -8324,11 +8317,16 @@ version = "0.1.0" dependencies = [ "frame-support", "frame-system", + "pallet-timestamp", "parity-scale-codec", + "rand_core 0.6.4", "serai-abi", "serai-coins-pallet", "serai-core-pallet", "serai-dex-pallet", + "serai-validator-sets-pallet", + "sp-core", + "sp-io", ] [[package]] @@ -8380,7 +8378,7 @@ dependencies = [ name = "serai-ethereum-relayer" version = "0.1.0" dependencies = [ - "env_logger 0.10.2", + "env_logger", "log", "serai-db", "serai-env", @@ -8453,7 +8451,7 @@ dependencies = [ "borsh", "ciphersuite 0.4.2", "dalek-ff-group 0.5.0", - "env_logger 0.10.2", + "env_logger", "flexible-transcript", "hex", "log", @@ -8642,7 +8640,7 @@ dependencies = [ "borsh", "ciphersuite 0.4.2", "dkg-evrf", - "env_logger 0.10.2", + "env_logger", "hex", "log", "serai-cosign-types", @@ -8735,7 +8733,7 @@ dependencies = [ "serai-processor-ethereum-deployer", "serai-processor-ethereum-erc20", "serai-processor-ethereum-primitives", - "syn 2.0.114", + "syn 2.0.117", "syn-solidity", "tokio", ] @@ -8991,14 +8989,22 @@ dependencies = [ name = "serai-validator-sets-pallet" version = "0.1.0" dependencies = [ + "bitvec", "borsh", + "ciphersuite 0.4.2", + "dalek-ff-group 0.5.0", + "embedwards25519", + "frame-benchmarking", "frame-support", "frame-system", "pallet-babe", "pallet-grandpa", "pallet-session", + "pallet-timestamp", "parity-scale-codec", + "rand_chacha 0.3.1", "rand_core 0.6.4", + "secq256k1", "serai-abi", "serai-coins-pallet", "serai-core-pallet", @@ -9048,7 +9054,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -9072,7 +9078,7 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -9123,7 +9129,7 @@ dependencies = [ "darling 0.20.99", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -9166,7 +9172,7 @@ checksum = "7c5f3b1e2dc8aad28310d8410bd4d7e180eca65fca176c52ab00d364475d0024" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.11.0-rc.11", + "digest 0.11.0", ] [[package]] @@ -9176,7 +9182,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ "digest 0.10.7", - "keccak 0.1.5", + "keccak 0.1.6", ] [[package]] @@ -9185,7 +9191,7 @@ version = "0.11.0-rc.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c5bfe7820113e633d8886e839aae78c1184b8d7011000db6bc7eb61e34f28350" dependencies = [ - "digest 0.11.0-rc.11", + "digest 0.11.0", "keccak 0.2.0-rc.1", ] @@ -9392,7 +9398,7 @@ dependencies = [ "proc-macro-crate 3.4.0", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -9572,7 +9578,7 @@ source = "git+https://github.com/serai-dex/patch-polkadot-sdk#55a9d2cf03623408ee dependencies = [ "quote", "sp-crypto-hashing", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -9592,7 +9598,7 @@ source = "git+https://github.com/serai-dex/patch-polkadot-sdk#55a9d2cf03623408ee dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -9735,7 +9741,7 @@ dependencies = [ "proc-macro-crate 3.4.0", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -9876,7 +9882,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -10006,7 +10012,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -10018,7 +10024,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -10061,9 +10067,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.114" +version = "2.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" dependencies = [ "proc-macro2", "quote", @@ -10072,14 +10078,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "1.5.4" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2379beea9476b89d0237078be761cf8e012d92d5ae4ae0c9a329f974838870fc" +checksum = "53f425ae0b12e2f5ae65542e00898d500d4d318b4baf09f40fd0d410454e9947" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -10108,7 +10114,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -10117,7 +10123,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "core-foundation 0.9.4", "system-configuration-sys", ] @@ -10146,15 +10152,15 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "target-lexicon" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1dd07eb858a2067e2f3c7155d54e929265c264e6f37efe3ee7a8d1b5a1dd0ba" +checksum = "adb6935a6f5c20170eeceb1a3835a49e12e19d792f6dd344ccc76a985ca5a6ca" [[package]] name = "tempfile" -version = "3.24.0" +version = "3.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "655da9c7eb6305c55742045d5a8d2037996d61d8de95806335c7c86ce0f82e9c" +checksum = "0136791f7c95b1f6dd99f9cc786b91bb81c3800b639b3478e561ddb7be95e5f1" dependencies = [ "fastrand", "getrandom 0.3.4", @@ -10221,7 +10227,7 @@ checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -10313,7 +10319,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -10363,9 +10369,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.9.11+spec-1.1.0" +version = "0.9.12+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3afc9a848309fe1aaffaed6e1546a7a14de1f935dc9d89d32afd9a44bab7c46" +checksum = "cf92845e79fc2e2def6a5d828f0801e29a2f8acc037becc5ab08595c7d5e9863" dependencies = [ "indexmap 2.13.0", "serde_core", @@ -10399,9 +10405,9 @@ dependencies = [ [[package]] name = "toml_parser" -version = "1.0.6+spec-1.1.0" +version = "1.0.9+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44" +checksum = "702d4415e08923e7e1ef96cd5727c0dfed80b4d2fa25db9647fe5eb6f7c5a4c4" dependencies = [ "winnow", ] @@ -10447,7 +10453,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "bytes", "http", "http-body", @@ -10489,7 +10495,7 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -10644,9 +10650,9 @@ checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5" [[package]] name = "unicode-ident" -version = "1.0.22" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" [[package]] name = "unicode-joining-type" @@ -10797,9 +10803,9 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.108" +version = "0.2.110" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" +checksum = "1de241cdc66a9d91bd84f097039eb140cdc6eec47e0cdbaf9d932a1dd6c35866" dependencies = [ "cfg-if", "once_cell", @@ -10810,9 +10816,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.108" +version = "0.2.110" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" +checksum = "e12fdf6649048f2e3de6d7d5ff3ced779cdedee0e0baffd7dff5cdfa3abc8a52" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -10820,22 +10826,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.108" +version = "0.2.110" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" +checksum = "0e63d1795c565ac3462334c1e396fd46dbf481c40f51f5072c310717bc4fb309" dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.108" +version = "0.2.110" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" +checksum = "e9f9cdac23a5ce71f6bf9f8824898a501e511892791ea2a0c6b8568c68b9cb53" dependencies = [ "unicode-ident", ] @@ -10856,7 +10862,7 @@ version = "0.243.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6d8db401b0528ec316dfbe579e6ab4152d61739cfe076706d2009127970159d" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "hashbrown 0.15.5", "indexmap 2.13.0", "semver", @@ -10883,7 +10889,7 @@ dependencies = [ "addr2line", "anyhow", "async-trait", - "bitflags 2.10.0", + "bitflags 2.11.0", "bumpalo", "cc", "cfg-if", @@ -10955,7 +10961,7 @@ dependencies = [ "serde", "serde_derive", "sha2 0.10.9", - "toml 0.9.11+spec-1.1.0", + "toml 0.9.12+spec-1.1.0", "wasmtime-environ", "windows-sys 0.61.2", "zstd", @@ -11061,7 +11067,7 @@ checksum = "63ba3124cc2cbcd362672f9f077303ccc4cd61daa908f73447b7fdaece75ff9f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -11080,9 +11086,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.85" +version = "0.3.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" +checksum = "f2c7c5718134e770ee62af3b6b4a84518ec10101aad610c024b64d6ff29bb1ff" dependencies = [ "js-sys", "wasm-bindgen", @@ -11203,7 +11209,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -11214,7 +11220,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -11615,7 +11621,7 @@ checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -11635,14 +11641,14 @@ checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "zmij" -version = "1.0.19" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ff05f8caa9038894637571ae6b9e29466c1f4f829d26c9b28f869a29cbe3445" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" [[package]] name = "zstd" diff --git a/audits/crypto/dkg/evrf/README.md b/audits/crypto/dkg/evrf/README.md index 601be6b42..acbf434f6 100644 --- a/audits/crypto/dkg/evrf/README.md +++ b/audits/crypto/dkg/evrf/README.md @@ -16,7 +16,7 @@ confirm: - The secret shares sent can be received by the intended recipient so long as they can access the bulletin board -Additionally, Serai desired a robust scheme (albeit with an biased key as the +Additionally, Serai desired a robust scheme (albeit with a biased key as the output, which is fine for our purposes). Accordingly, our implementation instantiates the threshold eVRF DKG from the eVRF paper, with our own proposal for verifiable encryption, with the caller allowed to decide the set of diff --git a/common/log/Cargo.toml b/common/log/Cargo.toml new file mode 100644 index 000000000..136b0ddc2 --- /dev/null +++ b/common/log/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "serai-log" +version = "0.1.0" +description = "Coverage-gated logging macros for the Serai project" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/common/log" +authors = ["Luke Parker ", "rafael_xmr "] +keywords = [] +edition = "2021" +publish = false +rust-version = "1.64" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +log = { version = "0.4", default-features = false, features = ["std"] } diff --git a/common/log/LICENSE b/common/log/LICENSE new file mode 100644 index 000000000..5b505480a --- /dev/null +++ b/common/log/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024-2026 Serai + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/common/log/src/lib.rs b/common/log/src/lib.rs new file mode 100644 index 000000000..fa91fbb73 --- /dev/null +++ b/common/log/src/lib.rs @@ -0,0 +1,64 @@ +#![cfg_attr(docsrs, feature(doc_cfg))] + +/// Re-export of `log` for direct access (e.g. `serai_log::log::Level`). +pub use log; + +/// Coverage-gated `trace!`. Compiles to nothing under `cfg(coverage)`. +#[cfg(not(coverage))] +#[macro_export] +macro_rules! trace { + ($($arg:tt)+) => { $crate::log::trace!($($arg)+) }; +} +#[cfg(coverage)] +#[macro_export] +macro_rules! trace { + ($($arg:tt)+) => {}; +} + +/// Coverage-gated `debug!`. Compiles to nothing under `cfg(coverage)`. +#[cfg(not(coverage))] +#[macro_export] +macro_rules! debug { + ($($arg:tt)+) => { $crate::log::debug!($($arg)+) }; +} +#[cfg(coverage)] +#[macro_export] +macro_rules! debug { + ($($arg:tt)+) => {}; +} + +/// Coverage-gated `info!`. Compiles to nothing under `cfg(coverage)`. +#[cfg(not(coverage))] +#[macro_export] +macro_rules! info { + ($($arg:tt)+) => { $crate::log::info!($($arg)+) }; +} +#[cfg(coverage)] +#[macro_export] +macro_rules! info { + ($($arg:tt)+) => {}; +} + +/// Coverage-gated `warn!`. Compiles to nothing under `cfg(coverage)`. +#[cfg(not(coverage))] +#[macro_export] +macro_rules! warn { + ($($arg:tt)+) => { $crate::log::warn!($($arg)+) }; +} +#[cfg(coverage)] +#[macro_export] +macro_rules! warn { + ($($arg:tt)+) => {}; +} + +/// Coverage-gated `error!`. Compiles to nothing under `cfg(coverage)`. +#[cfg(not(coverage))] +#[macro_export] +macro_rules! error { + ($($arg:tt)+) => { $crate::log::error!($($arg)+) }; +} +#[cfg(coverage)] +#[macro_export] +macro_rules! error { + ($($arg:tt)+) => {}; +} diff --git a/coordinator/cosign/src/intend.rs b/coordinator/cosign/src/intend.rs index e57c3c9e2..050bc5936 100644 --- a/coordinator/cosign/src/intend.rs +++ b/coordinator/cosign/src/intend.rs @@ -1,10 +1,9 @@ use core::future::Future; -use std::{collections::HashMap, sync::Arc}; +use std::{sync::Arc, collections::HashMap}; use blake2::{Digest as _, Blake2b256}; use serai_client_serai::{ - Serai, abi::{ primitives::{ network_id::ExternalNetworkId, @@ -13,10 +12,10 @@ use serai_client_serai::{ validator_sets::{Session, ExternalValidatorSet}, address::SeraiAddress, merkle::IncrementalUnbalancedMerkleTree, - constants::GENESIS_LIQUIDITY_PERIOD, }, validator_sets::Event, }, + Serai, }; use serai_db::*; @@ -86,14 +85,13 @@ impl ContinuallyRan for CosignIntendTask { // Ephemeral RPC Err: task to re-run and continue trying .map_err(|e| format!("RPC error fetching latest finalized block number: {e}"))?; - #[cfg(not(coverage))] - log::debug!( + serai_log::debug!( "beginning scan: start={start_scan_block_number}, latest={latest_serai_block_number}" ); if latest_serai_block_number < start_scan_block_number { // made_progress = False - // Return, nothing new to progress with + // Skip block already indexed return Ok(false); } @@ -118,8 +116,7 @@ impl ContinuallyRan for CosignIntendTask { // Ephemeral RPC Err: task to re-run and continue trying .map_err(|e| format!("RPC error fetching events for block #{block_number}: {e}"))?; - #[cfg(not(coverage))] - log::debug!("iterating over block_number={block_number}, hash={serai_block_hash:?}"); + serai_log::debug!("iterating over block_number={block_number}, hash={serai_block_hash:?}"); let mut txn = self.db.txn(); let mut builds_upon = @@ -130,8 +127,8 @@ impl ContinuallyRan for CosignIntendTask { builds_upon.clone().calculate(serai_client_serai::abi::BLOCK_BRANCH_TAG) { // Ephemeral RPC Err: - // serai.block_by_number(block_number) may return a different chain history (fork) - // but the prior indexed block was already finalized, so we MUST build upon it + // serai.block_by_number(block_number) may have returned a different chain history + // from prior indexed block already finalized, // task to re-run and continue trying until on the finalized chain Err(format!( "node's block #{block_number} doesn't build upon the block #{} prior indexed", @@ -159,7 +156,7 @@ impl ContinuallyRan for CosignIntendTask { }; let Ok(network) = ExternalNetworkId::try_from(*network) else { // Not an ExternalNetworkId, possible Serai network allocation - // safe to skip this allocation event + // safe to just skip this allocation event continue; }; @@ -180,8 +177,8 @@ impl ContinuallyRan for CosignIntendTask { let existing = Stakes::get(&txn, network, *validator) // critical panic: // this is a critical issue and will not be solved after re-tries, - // missing Stakes from previous blocks will remain missing until re-indexed (if encountered) - // halt the process + // missing Stakes from previous blocks will remain missing until re-indexed + // if encountered halt the process .expect("unable to deallocate with no prior existing stake"); Stakes::set(&mut txn, network, *validator, &Amount(existing.0 - amount.0)); @@ -213,24 +210,29 @@ impl ContinuallyRan for CosignIntendTask { let Event::SetKeys { set, key_pair } = event else { unreachable!("event from `set_keys_events` wasn't `Event::SetKeys`") }; - has_events = HasEvents::Notable; let validators = Validators::take(&mut txn, *set) // critical panic: // this is a critical issue and will not be solved after re-tries, - // missing Validators from previous blocks will remain missing until re-indexed (if encountered) - // halt the process + // missing Validators from previous blocks will remain missing until re-indexed + // if encountered halt the process .expect("set which wasn't decided set keys"); let stake: u64 = validators .iter() .map(|v| Stakes::get(&txn, set.network, *v).unwrap_or(Amount(0)).0) .sum(); - LatestSet::set( - &mut txn, - set.network, - &Set { session: set.session, key: key_pair.0, stake: Amount(stake) }, - ); + + // Sets with 0 stake should be skipped and not considered w.r.t. cosigning + // for no set with stake then has_events will remain HasEvents::No for this block and ignored + if stake > 0 { + has_events = HasEvents::Notable; + LatestSet::set( + &mut txn, + set.network, + &Set { session: set.session, key: key_pair.0, stake: Amount(stake) }, + ); + } } // Handle burn with instruction events (makes block non-notable if not already notable) @@ -242,6 +244,8 @@ impl ContinuallyRan for CosignIntendTask { let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn); + serai_log::debug!("type of has_events={has_events:?}"); + // If this is notable, it creates a new global session, which we index into the database // now if has_events == HasEvents::Notable { @@ -250,9 +254,10 @@ impl ContinuallyRan for CosignIntendTask { new_sets_and_keys_and_stakes.iter().map(|(set, _key, _stake)| *set).collect(), ); - let mut sets = Vec::with_capacity(new_sets_and_keys_and_stakes.len()); - let mut keys = HashMap::with_capacity(new_sets_and_keys_and_stakes.len()); - let mut stakes = HashMap::with_capacity(new_sets_and_keys_and_stakes.len()); + let length = new_sets_and_keys_and_stakes.len(); + let mut sets = Vec::with_capacity(length); + let mut keys = HashMap::with_capacity(length); + let mut stakes = HashMap::with_capacity(length); let mut total_stake = 0; for (set, key, stake) in new_sets_and_keys_and_stakes { sets.push(set); @@ -261,38 +266,12 @@ impl ContinuallyRan for CosignIntendTask { total_stake += stake.0; } - if GenesisTime::get(&txn).is_none() { - let time = serai_block.header.unix_time_in_millis(); - if time > 0 { - GenesisTime::set(&mut txn, &time); - } - } - if total_stake == 0 { - let genesis_time = GenesisTime::get(&txn) - // critical panic: - // this is a critical issue and will not be solved after re-tries, - // missing GenesisTime from previous blocks will remain missing until re-indexed (if encountered) - // halt the process - .expect("no genesis time for block #{block_number}"); - let time_elapsed_since_genesis = - serai_block.header.unix_time_in_millis().saturating_sub(genesis_time); - let genesis_period_end_timestamp = - genesis_time + u64::try_from(GENESIS_LIQUIDITY_PERIOD.as_millis()).unwrap(); - - if time_elapsed_since_genesis >= genesis_period_end_timestamp { - // critical panic: - // this is a critical issue and will not be solved after re-tries, - // missing Stakes from previous blocks will remain missing until re-indexed (if encountered) - // halt the process - panic!("cosigning sets for block #{block_number} had 0 stake in total, while stake is required"); - } - - // Genesis era not ended period: assign equal stake to each validator set - for set in &sets { - stakes.insert(set.network, 1); - } - total_stake = u64::try_from(sets.len()).unwrap(); + // critical panic: + // this is a critical issue and will not be solved after re-tries, + // missing Stakes greater than zero from previous blocks will remain missing until re-indexed + // if encountered halt the process + panic!("cosigning sets for block #{block_number} had 0 stake in total, while stake is required"); } let next_global_session_info = GlobalSession { @@ -322,19 +301,18 @@ impl ContinuallyRan for CosignIntendTask { match has_events { HasEvents::Notable | HasEvents::NonNotable => { let global_session_for_this_block = global_session_for_this_block - // critical panic: basically unreachable given the condition above this match + // panic: invariant, this is checked above .expect("global session for this block was None but still attempting to cosign it"); // The GlobalSession that is ending let ending_global_session_info = GlobalSessions::get(&txn, global_session_for_this_block) - // critical panic: something that went wrong above + // panic: invariant, this has to exist by this point .expect("last global session intended wasn't saved to the database"); // Tell each set of their expectation to cosign this block for set in ending_global_session_info.sets { - #[cfg(not(coverage))] - log::debug!("set will cosign block: set={set:?}, block_number={block_number}"); + serai_log::debug!("set will cosign block: set={set:?}, block_number={block_number}"); IntendedCosigns::send( &mut txn, @@ -351,8 +329,7 @@ impl ContinuallyRan for CosignIntendTask { HasEvents::No => {} } - #[cfg(not(coverage))] - log::debug!("finished iterating: has_events={has_events:?}"); + serai_log::debug!("finished iterating: has_events={has_events:?}"); // Populate a singular feed with every block's status for the evaluator to work off of BlockEvents::send(&mut txn, &(BlockEventData { block_number, has_events })); From 95e6c4cb2edab129d061f5a51ffbc43b641205cf Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Mon, 23 Feb 2026 17:11:59 -0300 Subject: [PATCH 22/71] feat(cosign): remove unwanted addition --- coordinator/cosign/src/intend.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/coordinator/cosign/src/intend.rs b/coordinator/cosign/src/intend.rs index 050bc5936..ed831f99d 100644 --- a/coordinator/cosign/src/intend.rs +++ b/coordinator/cosign/src/intend.rs @@ -37,7 +37,6 @@ create_db!( Stakes: (network: ExternalNetworkId, validator: SeraiAddress) -> Amount, Validators: (set: ExternalValidatorSet) -> Vec, LatestSet: (network: ExternalNetworkId) -> Set, - GenesisTime: () -> u64, } ); From e34d702abc0ace0b0bbd4ffeb888fe906901e387 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Mon, 23 Feb 2026 17:13:48 -0300 Subject: [PATCH 23/71] chore(log): wrong license --- common/log/Cargo.toml | 2 +- common/log/LICENSE | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/common/log/Cargo.toml b/common/log/Cargo.toml index 136b0ddc2..69a0b6107 100644 --- a/common/log/Cargo.toml +++ b/common/log/Cargo.toml @@ -2,7 +2,7 @@ name = "serai-log" version = "0.1.0" description = "Coverage-gated logging macros for the Serai project" -license = "AGPL-3.0-only" +license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/common/log" authors = ["Luke Parker ", "rafael_xmr "] keywords = [] diff --git a/common/log/LICENSE b/common/log/LICENSE index 5b505480a..f995f1e78 100644 --- a/common/log/LICENSE +++ b/common/log/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2024-2026 Serai +Copyright (c) 2026 Serai Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal From e488dc2830fb53ea85180fb4cac2e9cc411d3580 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Tue, 24 Feb 2026 17:18:26 -0300 Subject: [PATCH 24/71] feat(cosign): misc log & comments --- coordinator/cosign/Cargo.toml | 2 +- coordinator/cosign/src/delay.rs | 7 ++-- coordinator/cosign/src/evaluator.rs | 57 +++++++++-------------------- coordinator/cosign/src/intend.rs | 9 ++++- 4 files changed, 29 insertions(+), 46 deletions(-) diff --git a/coordinator/cosign/Cargo.toml b/coordinator/cosign/Cargo.toml index 1e51861d0..6f7363e8e 100644 --- a/coordinator/cosign/Cargo.toml +++ b/coordinator/cosign/Cargo.toml @@ -23,7 +23,7 @@ blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } serai-client-serai = { path = "../../substrate/client/serai", default-features = false } -log = { version = "0.4", default-features = false, features = ["std"] } +serai-log = { path = "../../common/log", version = "0.1.0" } tokio = { version = "1", default-features = false } diff --git a/coordinator/cosign/src/delay.rs b/coordinator/cosign/src/delay.rs index 6964fa5a7..66b6a898a 100644 --- a/coordinator/cosign/src/delay.rs +++ b/coordinator/cosign/src/delay.rs @@ -4,7 +4,7 @@ use std::time::{Duration, SystemTime}; use serai_db::*; use serai_task::{DoesNotError, ContinuallyRan}; -use crate::{evaluator::CosignedBlocks, latest_cosigned_block_number}; +use crate::evaluator::CosignedBlocks; #[cfg(not(any(test, feature = "dev")))] /// How often callers should broadcast the cosigns flagged for rebroadcasting. @@ -52,10 +52,9 @@ impl ContinuallyRan for CosignDelayTask { break; }; - let latest_cosigned_block_number = LatestCosignedBlockNumber::get(getter).unwrap_or(0); + let latest_cosigned_block_number = LatestCosignedBlockNumber::get(&mut txn).unwrap_or(0); - #[cfg(not(coverage))] - log::debug!( + serai_log::debug!( "beginning delay: block_number={block_number}, time_evaluated={time_evaluated}, latest_cosigned_block_number={latest_cosigned_block_number}", ); diff --git a/coordinator/cosign/src/evaluator.rs b/coordinator/cosign/src/evaluator.rs index 99320aef0..95d563159 100644 --- a/coordinator/cosign/src/evaluator.rs +++ b/coordinator/cosign/src/evaluator.rs @@ -44,7 +44,7 @@ db_channel!( fn currently_evaluated_global_session_strict( txn: &mut impl DbTxn, block_number: u64, -) -> Result<([u8; 32], GlobalSession), String> { +) -> ([u8; 32], GlobalSession) { let mut res = { let existing = match CurrentlyEvaluatedGlobalSession::get(txn) { Some(existing) => existing, @@ -76,7 +76,7 @@ fn currently_evaluated_global_session_strict( } } - Ok(res) + res } pub(crate) fn currently_evaluated_global_session(getter: &impl Get) -> Option<[u8; 32]> { @@ -145,9 +145,9 @@ fn commit_cosigned_block( CosignedBlocks::send(&mut txn, &(block_number, now_timestamp().as_secs())); txn.commit(); - #[cfg(not(coverage))] + // Roughly ~1 hour, no need for repetitive logging if (block_number % 500) == 0 { - log::debug!("marking {label} #{block_number} as cosigned"); + serai_log::debug!("marking {label} #{block_number} as cosigned"); } Ok(()) @@ -177,15 +177,6 @@ async fn ensure_cosigned( Err(format!("{label} block (#{block_number}) wasn't yet cosigned. this should resolve shortly")) } -fn latest_cosign_block_number( - getter: &impl Get, - global_session: [u8; 32], - network: ExternalNetworkId, -) -> Option { - NetworksLatestCosignedBlock::get(getter, global_session, network) - .map(|signed_cosign| signed_cosign.cosign.block_number) -} - /// A task to determine if a block has been cosigned and we should handle it. pub(crate) struct CosignEvaluatorTask { pub(crate) db: D, @@ -202,44 +193,30 @@ impl ContinuallyRan for CosignEvaluatorT let mut made_progress = false; loop { + /// This task requires the global sessions channel to be populated + // as the block declaring the session is indexed + if CurrentlyEvaluatedGlobalSession::get(&self.db).is_none() && + GlobalSessionsChannel::peek(&self.db).is_none() + { + // no session has ever been declared + return Ok(false); + } + let mut txn = self.db.txn(); let Some(BlockEventData { block_number, has_events }) = BlockEvents::try_recv(&mut txn) else { break; }; - #[cfg(not(coverage))] - log::debug!( + serai_log::log::debug!( "beginning evaluator: block_number={block_number}, has_events={:#?}", has_events ); - // Blocks before the first global session don't need cosigning. The intend task - // commits GlobalSessionsChannel and BlockEvents atomically per-block, so if no - // session exists in the channel, every block in BlockEvents is guaranteed to have - // has_events == HasEvents::No. Pass them through directly so the delay task can - // advance LatestCosignedBlockNumber. - // if CurrentlyEvaluatedGlobalSession::get(&txn).is_none() { - // let skip = match GlobalSessionsChannel::peek(&txn) { - // Some((_, ref session)) => block_number < session.start_block_number, - // // No session declared yet — all queued blocks are pre-session - // None => true, - // }; - // if skip { - // debug_assert!( - // has_events == HasEvents::No, - // "pre-session block #{block_number} had events requiring cosigning" - // ); - // commit_cosigned_block(txn, block_number, "pre-session block")?; - // made_progress = true; - // continue; - // } - // } - // Fetch the global session information. This must be called for ALL post-session blocks // (including HasEvents::No) to maintain incrementality for session transitions. let (global_session, global_session_info) = - currently_evaluated_global_session_strict(&mut txn, block_number)?; + currently_evaluated_global_session_strict(&mut txn, block_number); match has_events { // Because this had notable events, we require an explicit cosign for this block by a @@ -249,7 +226,9 @@ impl ContinuallyRan for CosignEvaluatorT for set in global_session_info.sets { // Check if we have the cosign from this set - if latest_cosign_block_number(&txn, global_session, set.network) == Some(block_number) + if NetworksLatestCosignedBlock::get(&mut txn, global_session, set.network) + .map(|signed_cosign| signed_cosign.cosign.block_number) == + Some(block_number) { // Since have this cosign, add the set's weight to the weight which has cosigned weight_cosigned += diff --git a/coordinator/cosign/src/intend.rs b/coordinator/cosign/src/intend.rs index ed831f99d..98cc5db79 100644 --- a/coordinator/cosign/src/intend.rs +++ b/coordinator/cosign/src/intend.rs @@ -115,8 +115,6 @@ impl ContinuallyRan for CosignIntendTask { // Ephemeral RPC Err: task to re-run and continue trying .map_err(|e| format!("RPC error fetching events for block #{block_number}: {e}"))?; - serai_log::debug!("iterating over block_number={block_number}, hash={serai_block_hash:?}"); - let mut txn = self.db.txn(); let mut builds_upon = BuildsUpon::get(&txn).unwrap_or(IncrementalUnbalancedMerkleTree::new()); @@ -145,6 +143,8 @@ impl ContinuallyRan for CosignIntendTask { ); BuildsUpon::set(&mut txn, &builds_upon); + serai_log::debug!("iterating over block_number={block_number}, hash={serai_block_hash:?}"); + let mut has_events = HasEvents::No; let vset_events = serai_block_events.validator_sets(); @@ -231,6 +231,11 @@ impl ContinuallyRan for CosignIntendTask { set.network, &Set { session: set.session, key: key_pair.0, stake: Amount(stake) }, ); + } else { + serai_log::debug!( + "skipped session {:#?} with 0 stake from being selected for cosigns", + set.session + ); } } From c2ec70734b06af89277ba609353895b09f63973e Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Wed, 25 Feb 2026 15:55:40 -0300 Subject: [PATCH 25/71] feat: add test libs & end clean up cosign tests --- common/log/Cargo.toml | 2 +- common/log/build.rs | 3 + coordinator/cosign/Cargo.toml | 9 +- coordinator/cosign/src/delay.rs | 8 +- coordinator/cosign/src/evaluator.rs | 35 +- coordinator/cosign/src/intend.rs | 4 +- coordinator/cosign/src/lib.rs | 4 +- coordinator/cosign/src/tests/cosigning.rs | 1502 ++++++++++--------- coordinator/cosign/src/tests/delay.rs | 25 +- coordinator/cosign/src/tests/evaluator.rs | 915 ++++++----- coordinator/cosign/src/tests/intend.rs | 646 +++++--- coordinator/cosign/src/tests/mod.rs | 94 +- coordinator/cosign/types/Cargo.toml | 1 - coordinator/cosign/types/src/tests/mod.rs | 19 +- coordinator/src/main.rs | 12 +- substrate/abi/src/modules/validator_sets.rs | 2 + substrate/client/serai/src/lib.rs | 4 +- substrate/primitives/src/lib.rs | 2 +- tests/shim-rpc/Cargo.toml | 32 + tests/shim-rpc/LICENSE | 21 + tests/shim-rpc/README.md | 27 + tests/shim-rpc/src/builder.rs | 46 + tests/shim-rpc/src/lib.rs | 146 ++ tests/shim-rpc/src/rpc.rs | 265 ++++ tests/shim-rpc/src/state.rs | 187 +++ tests/shim-rpc/tests/integration.rs | 275 ++++ tests/task/Cargo.toml | 21 + tests/task/LICENSE | 21 + tests/task/src/lib.rs | 35 + 29 files changed, 2795 insertions(+), 1568 deletions(-) create mode 100644 common/log/build.rs create mode 100644 tests/shim-rpc/Cargo.toml create mode 100644 tests/shim-rpc/LICENSE create mode 100644 tests/shim-rpc/README.md create mode 100644 tests/shim-rpc/src/builder.rs create mode 100644 tests/shim-rpc/src/lib.rs create mode 100644 tests/shim-rpc/src/rpc.rs create mode 100644 tests/shim-rpc/src/state.rs create mode 100644 tests/shim-rpc/tests/integration.rs create mode 100644 tests/task/Cargo.toml create mode 100644 tests/task/LICENSE create mode 100644 tests/task/src/lib.rs diff --git a/common/log/Cargo.toml b/common/log/Cargo.toml index 69a0b6107..6d65fcf51 100644 --- a/common/log/Cargo.toml +++ b/common/log/Cargo.toml @@ -8,7 +8,7 @@ authors = ["Luke Parker ", "rafael_xmr Result<(), String> { - CosignedBlocks::send(&mut txn, &(block_number, now_timestamp().as_secs())); - txn.commit(); - - // Roughly ~1 hour, no need for repetitive logging - if (block_number % 500) == 0 { - serai_log::debug!("marking {label} #{block_number} as cosigned"); - } - - Ok(()) -} - /// If the cosign threshold isn't met, request cosigns and return an error. async fn ensure_cosigned( weight_cosigned: u64, @@ -193,7 +176,7 @@ impl ContinuallyRan for CosignEvaluatorT let mut made_progress = false; loop { - /// This task requires the global sessions channel to be populated + // This task requires the global sessions channel to be populated // as the block declaring the session is indexed if CurrentlyEvaluatedGlobalSession::get(&self.db).is_none() && GlobalSessionsChannel::peek(&self.db).is_none() @@ -213,8 +196,7 @@ impl ContinuallyRan for CosignEvaluatorT has_events ); - // Fetch the global session information. This must be called for ALL post-session blocks - // (including HasEvents::No) to maintain incrementality for session transitions. + // Fetch the global session information let (global_session, global_session_info) = currently_evaluated_global_session_strict(&mut txn, block_number); @@ -296,7 +278,14 @@ impl ContinuallyRan for CosignEvaluatorT } // Since we checked we had the necessary cosigns, send it for delay before acknowledgement - commit_cosigned_block(txn, block_number, "block")?; + CosignedBlocks::send(&mut txn, &(block_number, now_timestamp().as_secs())); + txn.commit(); + + // Roughly ~1 hour, no need for repetitive logging + if (block_number % 500) == 0 { + serai_log::debug!("marking block #{block_number} as cosigned"); + } + made_progress = true; } diff --git a/coordinator/cosign/src/intend.rs b/coordinator/cosign/src/intend.rs index 98cc5db79..9b80e4fc0 100644 --- a/coordinator/cosign/src/intend.rs +++ b/coordinator/cosign/src/intend.rs @@ -110,7 +110,7 @@ impl ContinuallyRan for CosignIntendTask { let serai_block_hash = serai_block.header.hash(); let serai_block_events = self .serai - .events(&serai_block_hash) + .events(serai_block_hash) .await // Ephemeral RPC Err: task to re-run and continue trying .map_err(|e| format!("RPC error fetching events for block #{block_number}: {e}"))?; @@ -233,7 +233,7 @@ impl ContinuallyRan for CosignIntendTask { ); } else { serai_log::debug!( - "skipped session {:#?} with 0 stake from being selected for cosigns", + "skipped session {:?} with 0 stake from being selected for cosigns", set.session ); } diff --git a/coordinator/cosign/src/lib.rs b/coordinator/cosign/src/lib.rs index 0de70ab54..ebafe42c1 100644 --- a/coordinator/cosign/src/lib.rs +++ b/coordinator/cosign/src/lib.rs @@ -38,10 +38,10 @@ use delay::LatestCosignedBlockNumber; /// Test helpers and fixtures. pub mod tests; -#[cfg(not(any(test, feature = "dev")))] +#[cfg(not(any(test)))] /// The interval at which the cosigning loop runs. pub const COSIGN_LOOP_INTERVAL: Duration = Duration::from_secs(5); -#[cfg(any(test, feature = "dev"))] +#[cfg(any(test))] /// The interval at which the cosigning loop runs. pub const COSIGN_LOOP_INTERVAL: Duration = Duration::from_millis(10); diff --git a/coordinator/cosign/src/tests/cosigning.rs b/coordinator/cosign/src/tests/cosigning.rs index e4293df90..1cd374524 100644 --- a/coordinator/cosign/src/tests/cosigning.rs +++ b/coordinator/cosign/src/tests/cosigning.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, sync::Arc, time::Duration}; +use std::{collections::HashMap, time::Duration}; use borsh::{BorshDeserialize, BorshSerialize}; @@ -6,35 +6,26 @@ use blake2::{Blake2s256, Digest}; use serai_db::{Db as _, DbTxn, MemDb}; -use serai_simulator_node::{SimulatorNode, SimulatorState}; +use serai_task::Task; -use serai_client_serai::{ - Serai, - abi::primitives::{ - BlockHash, - crypto::Public, - network_id::ExternalNetworkId, - validator_sets::{ExternalValidatorSet, Session}, - }, +use serai_client_serai::abi::primitives::{ + BlockHash, + crypto::Public, + network_id::ExternalNetworkId, + validator_sets::{ExternalValidatorSet, Session}, }; use crate::{ Cosign, CosignIntent, Cosigning, Faulted, FaultedSession, Faults, GlobalSession, GlobalSessions, GlobalSessionsLastBlock, IntakeCosignError, NetworksLatestCosignedBlock, SignedCosign, SubstrateBlockHash, delay::LatestCosignedBlockNumber, evaluator::CurrentlyEvaluatedGlobalSession, - intend::IntendedCosigns, tests::TestRequest, + intend::IntendedCosigns, tests::TestRequest, tests::setup_shim_serai, }; use serai_cosign_types::tests::{ fixture_public_key, public_key_from_seed, sign_cosign_with_fixture, sign_cosign_with_seed, }; -async fn setup_mock_serai() -> (SimulatorNode, Arc) { - let node = SimulatorNode::start(SimulatorState::default()).await; - let serai = Arc::new(Serai::new(node.url()).unwrap()); - (node, serai) -} - const FIXTURE_SEED: [u8; 32] = [0xff; 32]; struct Sr25519Fixture { @@ -132,12 +123,14 @@ fn global_session_id_generation() { // Both should produce the same ID (order-independent) let id1 = GlobalSession::id(cosigners1.clone()); - let id2 = GlobalSession::id(cosigners2); + let id2 = GlobalSession::id(cosigners2.clone()); assert_eq!(id1, id2, "IDs should be the same regardless of input order"); // Same input should always produce the same ID (deterministic) let id3 = GlobalSession::id(cosigners1.clone()); + let id4 = GlobalSession::id(cosigners2.clone()); assert_eq!(id1, id3, "same input should produce the same ID"); + assert_eq!(id2, id4, "same input should produce the same ID"); // Different sets should produce different IDs let set3 = ExternalValidatorSet { network: network1, session: Session(1) }; // same network as set1, different session @@ -148,852 +141,893 @@ fn global_session_id_generation() { ); } -#[test] -fn temporal_returns_true_for_temporal_errors() { - assert!(IntakeCosignError::NotYetIndexedBlock.temporal()); - assert!(IntakeCosignError::StaleCosign.temporal()); - assert!(IntakeCosignError::UnrecognizedGlobalSession.temporal()); - assert!(IntakeCosignError::FutureGlobalSession.temporal()); -} - -#[test] -fn temporal_returns_false_for_non_temporal_errors() { - assert!(!IntakeCosignError::BeforeGlobalSessionStart.temporal()); - assert!(!IntakeCosignError::AfterGlobalSessionEnd.temporal()); - assert!(!IntakeCosignError::NonParticipatingNetwork.temporal()); - assert!(!IntakeCosignError::InvalidSignature.temporal()); -} +mod intake_cosign_error { + use super::*; -#[tokio::test] -async fn spawn_creates_cosigning_instance() { - let db = MemDb::new(); - let (_node, serai) = setup_mock_serai().await; - let (request, _calls) = TestRequest::new(false); - let cosigning = Cosigning::spawn(db, serai, request, vec![]); + #[test] + fn temporal_returns_true_for_temporal_errors() { + assert!(IntakeCosignError::NotYetIndexedBlock.temporal()); + assert!(IntakeCosignError::StaleCosign.temporal()); + assert!(IntakeCosignError::UnrecognizedGlobalSession.temporal()); + assert!(IntakeCosignError::FutureGlobalSession.temporal()); + } - assert!(cosigning.cosigns_to_rebroadcast().is_empty()); + #[test] + fn temporal_returns_false_for_non_temporal_errors() { + assert!(!IntakeCosignError::BeforeGlobalSessionStart.temporal()); + assert!(!IntakeCosignError::AfterGlobalSessionEnd.temporal()); + assert!(!IntakeCosignError::NonParticipatingNetwork.temporal()); + assert!(!IntakeCosignError::InvalidSignature.temporal()); + } } -#[tokio::test] -async fn spawn_with_tasks_to_run_upon_cosigning() { - use serai_task::Task; +mod spawn { + use super::*; - let db = MemDb::new(); - let (_node, serai) = setup_mock_serai().await; - let (request, _calls) = TestRequest::new(false); + #[tokio::test] + async fn spawn_creates_cosigning_instance() { + let db = MemDb::new(); + let (_shim_serai, serai) = setup_shim_serai().await; + let (request, _calls) = TestRequest::new(false); + let cosigning = Cosigning::spawn(db, serai, request, vec![]); - let (_task, task_handle) = Task::new(); - let tasks_to_run = vec![task_handle]; + assert!(cosigning.cosigns_to_rebroadcast().is_empty()); + } - let cosigning = Cosigning::spawn(db.clone(), serai, request, tasks_to_run); + #[tokio::test] + async fn spawn_with_tasks_to_run_upon_cosigning() { + let db = MemDb::new(); + let (_shim_serai, serai) = setup_shim_serai().await; + let (request, _calls) = TestRequest::new(false); - assert!(cosigning.cosigns_to_rebroadcast().is_empty()); -} + let (_task, task_handle) = Task::new(); + let tasks_to_run = vec![task_handle]; -#[tokio::test] -async fn spawn_initializes_cosigning_instance_correctly() { - let db = MemDb::new(); - let (_node, serai) = setup_mock_serai().await; - let (request, _calls) = TestRequest::new(false); + let cosigning = Cosigning::spawn(db.clone(), serai, request, tasks_to_run); - let cosigning = Cosigning::spawn(db.clone(), serai, request, vec![]); - - assert!(cosigning.cosigns_to_rebroadcast().is_empty()); + assert!(cosigning.cosigns_to_rebroadcast().is_empty()); + } - let latest = Cosigning::::latest_cosigned_block_number(&db); - assert!(latest.is_ok()); - assert_eq!(latest.unwrap(), 0); -} + #[tokio::test] + async fn spawn_initializes_cosigning_instance_correctly() { + let db = MemDb::new(); + let (_shim_serai, serai) = setup_shim_serai().await; + let (request, _calls) = TestRequest::new(false); -#[tokio::test] -async fn spawn_tasks_chain_correctly() { - let db = MemDb::new(); - let (_node, serai) = setup_mock_serai().await; - let (request, _calls) = TestRequest::new(false); + let cosigning = Cosigning::spawn(db.clone(), serai, request, vec![]); - let _cosigning = Cosigning::spawn(db.clone(), serai, request, vec![]); + assert!(cosigning.cosigns_to_rebroadcast().is_empty()); - tokio::time::sleep(Duration::from_millis(10)).await; + let latest = Cosigning::::latest_cosigned_block_number(&db); + assert!(latest.is_ok()); + assert_eq!(latest.unwrap(), 0); + } - let latest = Cosigning::::latest_cosigned_block_number(&db); - assert!(latest.is_ok()); -} + #[tokio::test] + async fn spawn_tasks_chain_correctly() { + let db = MemDb::new(); + let (_shim_serai, serai) = setup_shim_serai().await; + let (request, _calls) = TestRequest::new(false); -#[test] -fn latest_cosigned_block_number_defaults_to_zero() { - let db = MemDb::new(); - assert_eq!(Cosigning::::latest_cosigned_block_number(&db).unwrap(), 0); -} + let _cosigning = Cosigning::spawn(db.clone(), serai, request, vec![]); -#[test] -fn latest_cosigned_block_number_errors_when_faulted() { - let mut db = MemDb::new(); - { - let mut txn = db.txn(); - FaultedSession::set(&mut txn, &[1u8; 32]); - txn.commit(); - } - assert!(matches!(Cosigning::::latest_cosigned_block_number(&db), Err(Faulted))); -} + tokio::time::sleep(Duration::from_millis(10)).await; -#[test] -fn latest_cosigned_block_number_returns_stored_value() { - let mut db = MemDb::new(); - { - let mut txn = db.txn(); - LatestCosignedBlockNumber::set(&mut txn, &42u64); - txn.commit(); + let latest = Cosigning::::latest_cosigned_block_number(&db); + assert!(latest.is_ok()); } - assert_eq!(Cosigning::::latest_cosigned_block_number(&db).unwrap(), 42); } -#[test] -fn cosigned_block_returns_none_beyond_latest() { - let mut db = MemDb::new(); - { - let mut txn = db.txn(); - LatestCosignedBlockNumber::set(&mut txn, &5u64); - txn.commit(); - } - assert_eq!(Cosigning::::cosigned_block(&db, 6).unwrap(), None); -} +mod latest_cosigned_block_number { + use super::*; -#[test] -fn cosigned_block_returns_hash_when_in_range() { - let mut db = MemDb::new(); - let block_hash = BlockHash([9u8; 32]); - { - let mut txn = db.txn(); - LatestCosignedBlockNumber::set(&mut txn, &5u64); - SubstrateBlockHash::set(&mut txn, 3, &block_hash); - txn.commit(); + #[test] + fn latest_cosigned_block_number_defaults_to_zero() { + let db = MemDb::new(); + assert_eq!(Cosigning::::latest_cosigned_block_number(&db).unwrap(), 0); } - assert_eq!(Cosigning::::cosigned_block(&db, 3).unwrap(), Some(block_hash)); -} -#[test] -fn cosigned_block_errors_when_faulted() { - let mut db = MemDb::new(); - { - let mut txn = db.txn(); - FaultedSession::set(&mut txn, &[1u8; 32]); - txn.commit(); + #[test] + fn latest_cosigned_block_number_errors_when_faulted() { + let mut db = MemDb::new(); + { + let mut txn = db.txn(); + FaultedSession::set(&mut txn, &[1u8; 32]); + txn.commit(); + } + assert!(matches!(Cosigning::::latest_cosigned_block_number(&db), Err(Faulted))); } - assert!(matches!(Cosigning::::cosigned_block(&db, 1), Err(Faulted))); -} - -#[tokio::test] -async fn cosigning_cosigned_block_returns_correct_hash() { - let mut db = MemDb::new(); - let block_hash_5 = BlockHash([42u8; 32]); - let block_hash_10 = BlockHash([43u8; 32]); - { - let mut txn = db.txn(); - SubstrateBlockHash::set(&mut txn, 5, &block_hash_5); - SubstrateBlockHash::set(&mut txn, 10, &block_hash_10); - LatestCosignedBlockNumber::set(&mut txn, &10u64); - txn.commit(); + #[test] + fn latest_cosigned_block_number_returns_stored_value() { + let mut db = MemDb::new(); + { + let mut txn = db.txn(); + LatestCosignedBlockNumber::set(&mut txn, &42u64); + txn.commit(); + } + assert_eq!(Cosigning::::latest_cosigned_block_number(&db).unwrap(), 42); } - - let result = Cosigning::::cosigned_block(&db, 5); - assert!(result.is_ok()); - assert_eq!(result.unwrap(), Some(block_hash_5)); - - let result_10 = Cosigning::::cosigned_block(&db, 10); - assert!(result_10.is_ok()); - assert_eq!(result_10.unwrap(), Some(block_hash_10)); - - let result_11 = Cosigning::::cosigned_block(&db, 11); - assert!(result_11.is_ok()); - assert_eq!(result_11.unwrap(), None); -} - -#[test] -fn notable_cosigns_empty_without_cosigns() { - let db = MemDb::new(); - let cosigns = Cosigning::::notable_cosigns(&db, [1u8; 32]); - assert!(cosigns.is_empty()); } -#[test] -fn notable_cosigns_returns_cosigns_for_session() { - let session = session_fixture(); - let id = session.id(); - let keypair = sr25519_fixture(); - - let mut db = MemDb::new(); - seed_minimal_state(&mut db, &session); +mod cosigned_block { + use super::*; - let block_number = 1; - let block_hash = BlockHash([9u8; 32]); - { - let mut txn = db.txn(); - SubstrateBlockHash::set(&mut txn, block_number, &block_hash); - txn.commit(); + #[test] + fn cosigned_block_returns_none_beyond_latest() { + let mut db = MemDb::new(); + { + let mut txn = db.txn(); + LatestCosignedBlockNumber::set(&mut txn, &5u64); + txn.commit(); + } + assert_eq!(Cosigning::::cosigned_block(&db, 6).unwrap(), None); } - let cosign = - Cosign { global_session: id, block_number, block_hash, cosigner: ExternalNetworkId::Bitcoin }; - let signed = sign_cosign(cosign, &keypair); - - let mut cosigning = Cosigning::new(db.clone()); - cosigning.intake_cosign(&signed).unwrap(); - - let notable = Cosigning::::notable_cosigns(&db, id); - assert_eq!(notable.len(), 1); - assert_eq!(notable[0].cosign.block_number, block_number); - assert_eq!(notable[0].cosign.block_hash, block_hash); - assert_eq!(notable[0].cosign.cosigner, ExternalNetworkId::Bitcoin); -} - -#[test] -fn cosigns_to_rebroadcast_excludes_cosigns_from_different_global_session() { - let session = session_fixture(); - let id = session.id(); - let keypair = sr25519_fixture(); - - let mut db = MemDb::new(); - seed_minimal_state(&mut db, &session); - - let block_number = 1; - let our_hash = BlockHash([1u8; 32]); - let faulty_hash = BlockHash([2u8; 32]); - { - let mut txn = db.txn(); - SubstrateBlockHash::set(&mut txn, block_number, &our_hash); - txn.commit(); + #[test] + fn cosigned_block_returns_hash_when_in_range() { + let mut db = MemDb::new(); + let block_hash = BlockHash([9u8; 32]); + { + let mut txn = db.txn(); + LatestCosignedBlockNumber::set(&mut txn, &5u64); + SubstrateBlockHash::set(&mut txn, 3, &block_hash); + txn.commit(); + } + assert_eq!(Cosigning::::cosigned_block(&db, 3).unwrap(), Some(block_hash)); } - let faulty_cosign = Cosign { - global_session: id, - block_number, - block_hash: faulty_hash, - cosigner: ExternalNetworkId::Bitcoin, - }; - let faulty_signed = sign_cosign(faulty_cosign, &keypair); - - let mut cosigning = Cosigning::new(db.clone()); - cosigning.intake_cosign(&faulty_signed).unwrap(); - - let different_session_id = [99u8; 32]; - let different_cosign = Cosign { - global_session: different_session_id, - block_number, - block_hash: our_hash, - cosigner: ExternalNetworkId::Bitcoin, - }; - let different_signed = sign_cosign(different_cosign, &keypair); - { - let mut txn = db.txn(); - NetworksLatestCosignedBlock::set(&mut txn, id, ExternalNetworkId::Bitcoin, &different_signed); - txn.commit(); + #[test] + fn cosigned_block_errors_when_faulted() { + let mut db = MemDb::new(); + { + let mut txn = db.txn(); + FaultedSession::set(&mut txn, &[1u8; 32]); + txn.commit(); + } + assert!(matches!(Cosigning::::cosigned_block(&db, 1), Err(Faulted))); } - let cosigning = Cosigning::new(db); - let rebroadcast = cosigning.cosigns_to_rebroadcast(); + #[tokio::test] + async fn cosigning_cosigned_block_returns_correct_hash() { + let mut db = MemDb::new(); + let block_hash_5 = BlockHash([42u8; 32]); + let block_hash_10 = BlockHash([43u8; 32]); + + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, 5, &block_hash_5); + SubstrateBlockHash::set(&mut txn, 10, &block_hash_10); + LatestCosignedBlockNumber::set(&mut txn, &10u64); + txn.commit(); + } - assert_eq!( - rebroadcast.len(), - 1, - "should only include faults, not cosigns from different sessions" - ); - assert_eq!(rebroadcast[0].cosign.block_hash, faulty_hash); - assert_eq!(rebroadcast[0].cosign.global_session, id); -} + let result = Cosigning::::cosigned_block(&db, 5); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), Some(block_hash_5)); -#[test] -fn cosigns_to_rebroadcast_returns_latest_cosigns_when_not_faulted() { - let session = session_fixture(); - let id = session.id(); - let keypair = sr25519_fixture(); - - let mut db = MemDb::new(); - seed_minimal_state(&mut db, &session); + let result_10 = Cosigning::::cosigned_block(&db, 10); + assert!(result_10.is_ok()); + assert_eq!(result_10.unwrap(), Some(block_hash_10)); - let block_number = 1; - let block_hash = BlockHash([9u8; 32]); - { - let mut txn = db.txn(); - SubstrateBlockHash::set(&mut txn, block_number, &block_hash); - txn.commit(); + let result_11 = Cosigning::::cosigned_block(&db, 11); + assert!(result_11.is_ok()); + assert_eq!(result_11.unwrap(), None); } - - let cosign = - Cosign { global_session: id, block_number, block_hash, cosigner: ExternalNetworkId::Bitcoin }; - let signed = sign_cosign(cosign, &keypair); - - let mut cosigning = Cosigning::new(db.clone()); - cosigning.intake_cosign(&signed).unwrap(); - - let rebroadcast = cosigning.cosigns_to_rebroadcast(); - assert_eq!(rebroadcast.len(), 1); - assert_eq!(rebroadcast[0].cosign.block_number, block_number); - assert_eq!(rebroadcast[0].cosign.block_hash, block_hash); } -#[test] -fn cosigns_to_rebroadcast_returns_faults_and_honest_when_faulted() { - let session = session_fixture(); - let id = session.id(); - let keypair = sr25519_fixture(); - - let mut db = MemDb::new(); - seed_minimal_state(&mut db, &session); +mod notable_cosigns { + use super::*; - let block_number = 1; - let our_hash = BlockHash([1u8; 32]); - let faulty_hash = BlockHash([2u8; 32]); - { - let mut txn = db.txn(); - SubstrateBlockHash::set(&mut txn, block_number, &our_hash); - txn.commit(); + #[test] + fn notable_cosigns_empty_without_cosigns() { + let db = MemDb::new(); + let cosigns = Cosigning::::notable_cosigns(&db, [1u8; 32]); + assert!(cosigns.is_empty()); } - let faulty_cosign = Cosign { - global_session: id, - block_number, - block_hash: faulty_hash, - cosigner: ExternalNetworkId::Bitcoin, - }; - let faulty_signed = sign_cosign(faulty_cosign, &keypair); - - let mut cosigning = Cosigning::new(db.clone()); - cosigning.intake_cosign(&faulty_signed).unwrap(); - - let honest_cosign = Cosign { - global_session: id, - block_number, - block_hash: our_hash, - cosigner: ExternalNetworkId::Bitcoin, - }; - let honest_signed = sign_cosign(honest_cosign, &keypair); - { - let mut txn = db.txn(); - NetworksLatestCosignedBlock::set(&mut txn, id, ExternalNetworkId::Bitcoin, &honest_signed); - txn.commit(); - } - - let cosigning = Cosigning::new(db); - let rebroadcast = cosigning.cosigns_to_rebroadcast(); - - assert!(rebroadcast.iter().any(|c| c.cosign.block_hash == faulty_hash)); - assert!(rebroadcast.iter().any(|c| c.cosign.block_hash == our_hash)); -} - -#[test] -fn intake_cosign_rejects_not_yet_indexed_block() { - let db = MemDb::new(); - let keypair = sr25519_fixture(); - - let cosign = Cosign { - global_session: [1u8; 32], - block_number: 1, - block_hash: BlockHash([9u8; 32]), - cosigner: ExternalNetworkId::Bitcoin, - }; - let signed = sign_cosign(cosign, &keypair); - - let mut cosigning = Cosigning::new(db); - assert!(matches!(cosigning.intake_cosign(&signed), Err(IntakeCosignError::NotYetIndexedBlock))); -} + #[test] + fn notable_cosigns_returns_cosigns_for_session() { + let session = session_fixture(); + let id = session.id(); + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_number = 1; + let block_hash = BlockHash([9u8; 32]); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &block_hash); + txn.commit(); + } -#[test] -fn intake_cosign_accepts_valid_cosign() { - let session = session_fixture(); - let id = session.id(); - let keypair = sr25519_fixture(); + let cosign = + Cosign { global_session: id, block_number, block_hash, cosigner: ExternalNetworkId::Bitcoin }; + let signed = sign_cosign(cosign, &keypair); - let mut db = MemDb::new(); - seed_minimal_state(&mut db, &session); + let mut cosigning = Cosigning::new(db.clone()); + cosigning.intake_cosign(&signed).unwrap(); - let block_number = 1; - let block_hash = BlockHash([9u8; 32]); - { - let mut txn = db.txn(); - SubstrateBlockHash::set(&mut txn, block_number, &block_hash); - txn.commit(); + let notable = Cosigning::::notable_cosigns(&db, id); + assert_eq!(notable.len(), 1); + assert_eq!(notable[0].cosign.block_number, block_number); + assert_eq!(notable[0].cosign.block_hash, block_hash); + assert_eq!(notable[0].cosign.cosigner, ExternalNetworkId::Bitcoin); } - - let cosign = - Cosign { global_session: id, block_number, block_hash, cosigner: ExternalNetworkId::Bitcoin }; - let signed = sign_cosign(cosign, &keypair); - - let mut cosigning = Cosigning::new(db); - assert!(cosigning.intake_cosign(&signed).is_ok()); } -#[test] -fn intake_cosign_rejects_stale_cosign() { - let session = session_fixture(); - let id = session.id(); - let keypair = sr25519_fixture(); - - let mut db = MemDb::new(); - seed_minimal_state(&mut db, &session); +mod cosigns_to_rebroadcast { + use super::*; + + #[test] + fn cosigns_to_rebroadcast_excludes_cosigns_from_different_global_session() { + let session = session_fixture(); + let id = session.id(); + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_number = 1; + let our_hash = BlockHash([1u8; 32]); + let faulty_hash = BlockHash([2u8; 32]); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &our_hash); + txn.commit(); + } - let block_hash = BlockHash([9u8; 32]); - { - let mut txn = db.txn(); - SubstrateBlockHash::set(&mut txn, 1, &block_hash); - SubstrateBlockHash::set(&mut txn, 2, &BlockHash([2u8; 32])); - txn.commit(); - } + let faulty_cosign = Cosign { + global_session: id, + block_number, + block_hash: faulty_hash, + cosigner: ExternalNetworkId::Bitcoin, + }; + let faulty_signed = sign_cosign(faulty_cosign, &keypair); + + let mut cosigning = Cosigning::new(db.clone()); + cosigning.intake_cosign(&faulty_signed).unwrap(); + + let different_session_id = [99u8; 32]; + let different_cosign = Cosign { + global_session: different_session_id, + block_number, + block_hash: our_hash, + cosigner: ExternalNetworkId::Bitcoin, + }; + let different_signed = sign_cosign(different_cosign, &keypair); + { + let mut txn = db.txn(); + NetworksLatestCosignedBlock::set(&mut txn, id, ExternalNetworkId::Bitcoin, &different_signed); + txn.commit(); + } - let first_cosign = Cosign { - global_session: id, - block_number: 2, - block_hash: BlockHash([2u8; 32]), - cosigner: ExternalNetworkId::Bitcoin, - }; - let first_signed = sign_cosign(first_cosign, &keypair); + let cosigning = Cosigning::new(db); + let rebroadcast = cosigning.cosigns_to_rebroadcast(); - let mut cosigning = Cosigning::new(db.clone()); - cosigning.intake_cosign(&first_signed).unwrap(); + assert_eq!( + rebroadcast.len(), + 1, + "should only include faults, not cosigns from different sessions" + ); + assert_eq!(rebroadcast[0].cosign.block_hash, faulty_hash); + assert_eq!(rebroadcast[0].cosign.global_session, id); + } - let stale_cosign = Cosign { - global_session: id, - block_number: 1, - block_hash, - cosigner: ExternalNetworkId::Bitcoin, - }; - let stale_signed = sign_cosign(stale_cosign, &keypair); + #[test] + fn cosigns_to_rebroadcast_returns_latest_cosigns_when_not_faulted() { + let session = session_fixture(); + let id = session.id(); + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_number = 1; + let block_hash = BlockHash([9u8; 32]); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &block_hash); + txn.commit(); + } - assert!(matches!(cosigning.intake_cosign(&stale_signed), Err(IntakeCosignError::StaleCosign))); -} + let cosign = + Cosign { global_session: id, block_number, block_hash, cosigner: ExternalNetworkId::Bitcoin }; + let signed = sign_cosign(cosign, &keypair); -#[test] -fn intake_cosign_rejects_unrecognized_global_session() { - let keypair = sr25519_fixture(); + let mut cosigning = Cosigning::new(db.clone()); + cosigning.intake_cosign(&signed).unwrap(); - let mut db = MemDb::new(); - let block_number = 1; - let block_hash = BlockHash([9u8; 32]); - { - let mut txn = db.txn(); - SubstrateBlockHash::set(&mut txn, block_number, &block_hash); - txn.commit(); + let rebroadcast = cosigning.cosigns_to_rebroadcast(); + assert_eq!(rebroadcast.len(), 1); + assert_eq!(rebroadcast[0].cosign.block_number, block_number); + assert_eq!(rebroadcast[0].cosign.block_hash, block_hash); } - let cosign = Cosign { - global_session: [99u8; 32], - block_number, - block_hash, - cosigner: ExternalNetworkId::Bitcoin, - }; - let signed = sign_cosign(cosign, &keypair); - - let mut cosigning = Cosigning::new(db); - assert!(matches!( - cosigning.intake_cosign(&signed), - Err(IntakeCosignError::UnrecognizedGlobalSession) - )); -} + #[test] + fn cosigns_to_rebroadcast_returns_faults_and_honest_when_faulted() { + let session = session_fixture(); + let id = session.id(); + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_number = 1; + let our_hash = BlockHash([1u8; 32]); + let faulty_hash = BlockHash([2u8; 32]); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &our_hash); + txn.commit(); + } -#[test] -fn intake_cosign_rejects_before_global_session_start() { - let mut session = session_fixture(); - session.start_block_number = 10; - let id = session.id(); - let keypair = sr25519_fixture(); + let faulty_cosign = Cosign { + global_session: id, + block_number, + block_hash: faulty_hash, + cosigner: ExternalNetworkId::Bitcoin, + }; + let faulty_signed = sign_cosign(faulty_cosign, &keypair); + + let mut cosigning = Cosigning::new(db.clone()); + cosigning.intake_cosign(&faulty_signed).unwrap(); + + let honest_cosign = Cosign { + global_session: id, + block_number, + block_hash: our_hash, + cosigner: ExternalNetworkId::Bitcoin, + }; + let honest_signed = sign_cosign(honest_cosign, &keypair); + { + let mut txn = db.txn(); + NetworksLatestCosignedBlock::set(&mut txn, id, ExternalNetworkId::Bitcoin, &honest_signed); + txn.commit(); + } - let mut db = MemDb::new(); - { - let mut txn = db.txn(); - GlobalSessions::set(&mut txn, id, &session.to_global()); - CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, session.to_global())); - LatestCosignedBlockNumber::set(&mut txn, &10u64); + let cosigning = Cosigning::new(db); + let rebroadcast = cosigning.cosigns_to_rebroadcast(); - SubstrateBlockHash::set(&mut txn, 5, &BlockHash([5u8; 32])); - txn.commit(); + assert!(rebroadcast.iter().any(|c| c.cosign.block_hash == faulty_hash)); + assert!(rebroadcast.iter().any(|c| c.cosign.block_hash == our_hash)); } - - let cosign = Cosign { - global_session: id, - block_number: 5, - block_hash: BlockHash([5u8; 32]), - cosigner: ExternalNetworkId::Bitcoin, - }; - let signed = sign_cosign(cosign, &keypair); - - let mut cosigning = Cosigning::new(db); - assert!(matches!( - cosigning.intake_cosign(&signed), - Err(IntakeCosignError::BeforeGlobalSessionStart) - )); } -#[test] -fn intake_cosign_rejects_after_global_session_end() { - let session = session_fixture(); - let id = session.id(); - let keypair = sr25519_fixture(); +mod intake_cosign { + use super::*; - let mut db = MemDb::new(); - seed_minimal_state(&mut db, &session); + #[test] + fn intake_cosign_rejects_not_yet_indexed_block() { + let db = MemDb::new(); + let keypair = sr25519_fixture(); - { - let mut txn = db.txn(); + let cosign = Cosign { + global_session: [1u8; 32], + block_number: 1, + block_hash: BlockHash([9u8; 32]), + cosigner: ExternalNetworkId::Bitcoin, + }; + let signed = sign_cosign(cosign, &keypair); - GlobalSessionsLastBlock::set(&mut txn, id, &5u64); - - SubstrateBlockHash::set(&mut txn, 10, &BlockHash([10u8; 32])); - txn.commit(); + let mut cosigning = Cosigning::new(db); + assert!(matches!(cosigning.intake_cosign(&signed), Err(IntakeCosignError::NotYetIndexedBlock))); } - let cosign = Cosign { - global_session: id, - block_number: 10, - block_hash: BlockHash([10u8; 32]), - cosigner: ExternalNetworkId::Bitcoin, - }; - let signed = sign_cosign(cosign, &keypair); - - let mut cosigning = Cosigning::new(db); - assert!(matches!( - cosigning.intake_cosign(&signed), - Err(IntakeCosignError::AfterGlobalSessionEnd) - )); -} - -#[test] -fn intake_cosign_rejects_invalid_signature() { - let session = session_fixture(); - let id = session.id(); - // Use a different keypair than the one in session_fixture - let wrong_keypair = Sr25519Fixture { seed: [99u8; 32] }; + #[test] + fn intake_cosign_accepts_valid_cosign() { + let session = session_fixture(); + let id = session.id(); + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_number = 1; + let block_hash = BlockHash([9u8; 32]); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &block_hash); + txn.commit(); + } - let mut db = MemDb::new(); - seed_minimal_state(&mut db, &session); + let cosign = + Cosign { global_session: id, block_number, block_hash, cosigner: ExternalNetworkId::Bitcoin }; + let signed = sign_cosign(cosign, &keypair); - let block_number = 1; - let block_hash = BlockHash([9u8; 32]); - { - let mut txn = db.txn(); - SubstrateBlockHash::set(&mut txn, block_number, &block_hash); - txn.commit(); + let mut cosigning = Cosigning::new(db); + assert!(cosigning.intake_cosign(&signed).is_ok()); } - let cosign = - Cosign { global_session: id, block_number, block_hash, cosigner: ExternalNetworkId::Bitcoin }; - let signed = sign_cosign(cosign, &wrong_keypair); - - let mut cosigning = Cosigning::new(db); - assert!(matches!(cosigning.intake_cosign(&signed), Err(IntakeCosignError::InvalidSignature))); -} - -#[test] -fn intake_cosign_rejects_future_global_session() { - let mut session = session_fixture(); - session.start_block_number = 10; - let id = session.id(); - let keypair = sr25519_fixture(); - - let mut db = MemDb::new(); - { - let mut txn = db.txn(); - GlobalSessions::set(&mut txn, id, &session.to_global()); - CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, session.to_global())); + #[test] + fn intake_cosign_rejects_stale_cosign() { + let session = session_fixture(); + let id = session.id(); + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_hash = BlockHash([9u8; 32]); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, 1, &block_hash); + SubstrateBlockHash::set(&mut txn, 2, &BlockHash([2u8; 32])); + txn.commit(); + } - LatestCosignedBlockNumber::set(&mut txn, &5u64); - SubstrateBlockHash::set(&mut txn, 10, &BlockHash([10u8; 32])); - txn.commit(); + let first_cosign = Cosign { + global_session: id, + block_number: 2, + block_hash: BlockHash([2u8; 32]), + cosigner: ExternalNetworkId::Bitcoin, + }; + let first_signed = sign_cosign(first_cosign, &keypair); + + let mut cosigning = Cosigning::new(db.clone()); + cosigning.intake_cosign(&first_signed).unwrap(); + + let stale_cosign = Cosign { + global_session: id, + block_number: 1, + block_hash, + cosigner: ExternalNetworkId::Bitcoin, + }; + let stale_signed = sign_cosign(stale_cosign, &keypair); + + assert!(matches!(cosigning.intake_cosign(&stale_signed), Err(IntakeCosignError::StaleCosign))); } - let cosign = Cosign { - global_session: id, - block_number: 10, - block_hash: BlockHash([10u8; 32]), - cosigner: ExternalNetworkId::Bitcoin, - }; - let signed = sign_cosign(cosign, &keypair); - - let mut cosigning = Cosigning::new(db); - assert!(matches!(cosigning.intake_cosign(&signed), Err(IntakeCosignError::FutureGlobalSession))); -} - -#[test] -fn intake_cosign_handles_faulty_cosign() { - let session = session_fixture(); - let id = session.id(); - let keypair = sr25519_fixture(); - - let mut db = MemDb::new(); - seed_minimal_state(&mut db, &session); + #[test] + fn intake_cosign_rejects_unrecognized_global_session() { + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + let block_number = 1; + let block_hash = BlockHash([9u8; 32]); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &block_hash); + txn.commit(); + } - let block_number = 1; - let our_hash = BlockHash([1u8; 32]); - let faulty_hash = BlockHash([2u8; 32]); - { - let mut txn = db.txn(); - SubstrateBlockHash::set(&mut txn, block_number, &our_hash); - txn.commit(); + let cosign = Cosign { + global_session: [99u8; 32], + block_number, + block_hash, + cosigner: ExternalNetworkId::Bitcoin, + }; + let signed = sign_cosign(cosign, &keypair); + + let mut cosigning = Cosigning::new(db); + assert!(matches!( + cosigning.intake_cosign(&signed), + Err(IntakeCosignError::UnrecognizedGlobalSession) + )); } - let cosign = Cosign { - global_session: id, - block_number, - block_hash: faulty_hash, - cosigner: ExternalNetworkId::Bitcoin, - }; - let signed = sign_cosign(cosign, &keypair); + #[test] + fn intake_cosign_rejects_before_global_session_start() { + let mut session = session_fixture(); + session.start_block_number = 10; + let id = session.id(); + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + { + let mut txn = db.txn(); + GlobalSessions::set(&mut txn, id, &session.to_global()); + CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, session.to_global())); + LatestCosignedBlockNumber::set(&mut txn, &10u64); + + SubstrateBlockHash::set(&mut txn, 5, &BlockHash([5u8; 32])); + txn.commit(); + } - let mut cosigning = Cosigning::new(db.clone()); + let cosign = Cosign { + global_session: id, + block_number: 5, + block_hash: BlockHash([5u8; 32]), + cosigner: ExternalNetworkId::Bitcoin, + }; + let signed = sign_cosign(cosign, &keypair); + + let mut cosigning = Cosigning::new(db); + assert!(matches!( + cosigning.intake_cosign(&signed), + Err(IntakeCosignError::BeforeGlobalSessionStart) + )); + } - assert!(cosigning.intake_cosign(&signed).is_ok()); + #[test] + fn intake_cosign_rejects_after_global_session_end() { + let session = session_fixture(); + let id = session.id(); + let keypair = sr25519_fixture(); - let faults: Option> = Faults::get(&db, id); - assert!(faults.is_some()); - assert_eq!(faults.as_ref().unwrap().len(), 1); - assert_eq!(faults.unwrap()[0].cosign.block_hash, faulty_hash); + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); - let faulted: Option<[u8; 32]> = FaultedSession::get(&db); - assert_eq!(faulted, Some(id)); -} + { + let mut txn = db.txn(); -#[test] -fn intake_cosign_accepts_newer_cosign_when_existing_is_older() { - let session = session_fixture(); - let id = session.id(); - let keypair = sr25519_fixture(); + GlobalSessionsLastBlock::set(&mut txn, id, &5u64); - let mut db = MemDb::new(); - seed_minimal_state(&mut db, &session); + SubstrateBlockHash::set(&mut txn, 10, &BlockHash([10u8; 32])); + txn.commit(); + } - { - let mut txn = db.txn(); - SubstrateBlockHash::set(&mut txn, 1, &BlockHash([1u8; 32])); - SubstrateBlockHash::set(&mut txn, 2, &BlockHash([2u8; 32])); - txn.commit(); + let cosign = Cosign { + global_session: id, + block_number: 10, + block_hash: BlockHash([10u8; 32]), + cosigner: ExternalNetworkId::Bitcoin, + }; + let signed = sign_cosign(cosign, &keypair); + + let mut cosigning = Cosigning::new(db); + assert!(matches!( + cosigning.intake_cosign(&signed), + Err(IntakeCosignError::AfterGlobalSessionEnd) + )); } - let first_cosign = Cosign { - global_session: id, - block_number: 1, - block_hash: BlockHash([1u8; 32]), - cosigner: ExternalNetworkId::Bitcoin, - }; - let first_signed = sign_cosign(first_cosign, &keypair); - - let mut cosigning = Cosigning::new(db.clone()); - cosigning.intake_cosign(&first_signed).unwrap(); - - let newer_cosign = Cosign { - global_session: id, - block_number: 2, - block_hash: BlockHash([2u8; 32]), - cosigner: ExternalNetworkId::Bitcoin, - }; - let newer_signed = sign_cosign(newer_cosign, &keypair); - - assert!(cosigning.intake_cosign(&newer_signed).is_ok()); - - let latest = NetworksLatestCosignedBlock::get(&db, id, ExternalNetworkId::Bitcoin).unwrap(); - assert_eq!(latest.cosign.block_number, 2); -} + #[test] + fn intake_cosign_rejects_invalid_signature() { + let session = session_fixture(); + let id = session.id(); + // Use a different keypair than the one in session_fixture + let wrong_keypair = Sr25519Fixture { seed: [99u8; 32] }; + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_number = 1; + let block_hash = BlockHash([9u8; 32]); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &block_hash); + txn.commit(); + } -#[test] -fn intake_cosign_accepts_cosign_at_global_session_last_block() { - let session = session_fixture(); - let id = session.id(); - let keypair = sr25519_fixture(); + let cosign = + Cosign { global_session: id, block_number, block_hash, cosigner: ExternalNetworkId::Bitcoin }; + let signed = sign_cosign(cosign, &wrong_keypair); - let mut db = MemDb::new(); - seed_minimal_state(&mut db, &session); + let mut cosigning = Cosigning::new(db); + assert!(matches!(cosigning.intake_cosign(&signed), Err(IntakeCosignError::InvalidSignature))); + } - { - let mut txn = db.txn(); - GlobalSessionsLastBlock::set(&mut txn, id, &5u64); - for i in 1 ..= 5 { - SubstrateBlockHash::set(&mut txn, i, &BlockHash([i as u8; 32])); + #[test] + fn intake_cosign_rejects_future_global_session() { + let mut session = session_fixture(); + session.start_block_number = 10; + let id = session.id(); + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + { + let mut txn = db.txn(); + GlobalSessions::set(&mut txn, id, &session.to_global()); + CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, session.to_global())); + + LatestCosignedBlockNumber::set(&mut txn, &5u64); + SubstrateBlockHash::set(&mut txn, 10, &BlockHash([10u8; 32])); + txn.commit(); } - txn.commit(); - } - let mut cosigning = Cosigning::new(db.clone()); + let cosign = Cosign { + global_session: id, + block_number: 10, + block_hash: BlockHash([10u8; 32]), + cosigner: ExternalNetworkId::Bitcoin, + }; + let signed = sign_cosign(cosign, &keypair); + + let mut cosigning = Cosigning::new(db); + assert!(matches!( + cosigning.intake_cosign(&signed), + Err(IntakeCosignError::FutureGlobalSession) + )); + } - let cosign = Cosign { - global_session: id, - block_number: 5, - block_hash: BlockHash([5u8; 32]), - cosigner: ExternalNetworkId::Bitcoin, - }; - let signed = sign_cosign(cosign, &keypair); + #[test] + fn intake_cosign_handles_faulty_cosign() { + let session = session_fixture(); + let id = session.id(); + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_number = 1; + let our_hash = BlockHash([1u8; 32]); + let faulty_hash = BlockHash([2u8; 32]); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &our_hash); + txn.commit(); + } - assert!(cosigning.intake_cosign(&signed).is_ok()); + let cosign = Cosign { + global_session: id, + block_number, + block_hash: faulty_hash, + cosigner: ExternalNetworkId::Bitcoin, + }; + let signed = sign_cosign(cosign, &keypair); - let latest = NetworksLatestCosignedBlock::get(&db, id, ExternalNetworkId::Bitcoin).unwrap(); - assert_eq!(latest.cosign.block_number, 5); -} + let mut cosigning = Cosigning::new(db.clone()); -#[test] -fn intake_cosign_ignores_duplicate_fault_from_same_network() { - let session = session_fixture(); - let id = session.id(); - let keypair = sr25519_fixture(); + assert!(cosigning.intake_cosign(&signed).is_ok()); - let mut db = MemDb::new(); - seed_minimal_state(&mut db, &session); + let faults: Option> = Faults::get(&db, id); + assert!(faults.is_some()); + assert_eq!(faults.as_ref().unwrap().len(), 1); + assert_eq!(faults.unwrap()[0].cosign.block_hash, faulty_hash); - let block_number = 1; - let our_hash = BlockHash([1u8; 32]); - let faulty_hash_1 = BlockHash([2u8; 32]); - let faulty_hash_2 = BlockHash([3u8; 32]); - { - let mut txn = db.txn(); - SubstrateBlockHash::set(&mut txn, block_number, &our_hash); - txn.commit(); + let faulted: Option<[u8; 32]> = FaultedSession::get(&db); + assert_eq!(faulted, Some(id)); } - let faulty_cosign_1 = Cosign { - global_session: id, - block_number, - block_hash: faulty_hash_1, - cosigner: ExternalNetworkId::Bitcoin, - }; - let faulty_signed_1 = sign_cosign(faulty_cosign_1, &keypair); + #[test] + fn intake_cosign_accepts_newer_cosign_when_existing_is_older() { + let session = session_fixture(); + let id = session.id(); + let keypair = sr25519_fixture(); - let mut cosigning = Cosigning::new(db.clone()); - assert!(cosigning.intake_cosign(&faulty_signed_1).is_ok()); + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); - let faults_after_first = Faults::get(&db, id).unwrap(); - assert_eq!(faults_after_first.len(), 1); - assert_eq!(faults_after_first[0].cosign.block_hash, faulty_hash_1); - - let faulty_cosign_2 = Cosign { - global_session: id, - block_number, - block_hash: faulty_hash_2, - cosigner: ExternalNetworkId::Bitcoin, - }; - let faulty_signed_2 = sign_cosign(faulty_cosign_2, &keypair); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, 1, &BlockHash([1u8; 32])); + SubstrateBlockHash::set(&mut txn, 2, &BlockHash([2u8; 32])); + txn.commit(); + } - assert!(cosigning.intake_cosign(&faulty_signed_2).is_ok()); + let first_cosign = Cosign { + global_session: id, + block_number: 1, + block_hash: BlockHash([1u8; 32]), + cosigner: ExternalNetworkId::Bitcoin, + }; + let first_signed = sign_cosign(first_cosign, &keypair); + + let mut cosigning = Cosigning::new(db.clone()); + cosigning.intake_cosign(&first_signed).unwrap(); + + let newer_cosign = Cosign { + global_session: id, + block_number: 2, + block_hash: BlockHash([2u8; 32]), + cosigner: ExternalNetworkId::Bitcoin, + }; + let newer_signed = sign_cosign(newer_cosign, &keypair); + + assert!(cosigning.intake_cosign(&newer_signed).is_ok()); + + let latest = NetworksLatestCosignedBlock::get(&db, id, ExternalNetworkId::Bitcoin).unwrap(); + assert_eq!(latest.cosign.block_number, 2); + } - let faults_after_second = Faults::get(&db, id).unwrap(); - assert_eq!(faults_after_second.len(), 1, "duplicate fault from same network should not be added"); - assert_eq!(faults_after_second[0].cosign.block_hash, faulty_hash_1); -} + #[test] + fn intake_cosign_accepts_cosign_at_global_session_last_block() { + let session = session_fixture(); + let id = session.id(); + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + { + let mut txn = db.txn(); + GlobalSessionsLastBlock::set(&mut txn, id, &5u64); + for i in 1 ..= 5 { + SubstrateBlockHash::set(&mut txn, i, &BlockHash([i as u8; 32])); + } + txn.commit(); + } -#[test] -fn intake_cosign_rejects_non_participating_network() { - let session = session_fixture(); - let id = session.id(); + let mut cosigning = Cosigning::new(db.clone()); - let eth_keypair = Sr25519Fixture { seed: [77u8; 32] }; + let cosign = Cosign { + global_session: id, + block_number: 5, + block_hash: BlockHash([5u8; 32]), + cosigner: ExternalNetworkId::Bitcoin, + }; + let signed = sign_cosign(cosign, &keypair); - let mut db = MemDb::new(); - seed_minimal_state(&mut db, &session); + assert!(cosigning.intake_cosign(&signed).is_ok()); - let block_number = 1; - let block_hash = BlockHash([9u8; 32]); - { - let mut txn = db.txn(); - SubstrateBlockHash::set(&mut txn, block_number, &block_hash); - txn.commit(); + let latest = NetworksLatestCosignedBlock::get(&db, id, ExternalNetworkId::Bitcoin).unwrap(); + assert_eq!(latest.cosign.block_number, 5); } - let cosign = - Cosign { global_session: id, block_number, block_hash, cosigner: ExternalNetworkId::Ethereum }; - let signed = sign_cosign(cosign, ð_keypair); - - let mut cosigning = Cosigning::new(db); - assert!(matches!( - cosigning.intake_cosign(&signed), - Err(IntakeCosignError::NonParticipatingNetwork) - )); -} - -#[test] -fn intake_cosign_records_fault_below_threshold() { - let network1 = ExternalNetworkId::Bitcoin; - let network2 = ExternalNetworkId::Ethereum; - let set1 = ExternalValidatorSet { network: network1, session: Session(0) }; - let set2 = ExternalValidatorSet { network: network2, session: Session(0) }; - - let keypair1 = sr25519_fixture(); - let keypair2 = Sr25519Fixture { seed: [88u8; 32] }; + #[test] + fn intake_cosign_ignores_duplicate_fault_from_same_network() { + let session = session_fixture(); + let id = session.id(); + let keypair = sr25519_fixture(); + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_number = 1; + let our_hash = BlockHash([1u8; 32]); + let faulty_hash_1 = BlockHash([2u8; 32]); + let faulty_hash_2 = BlockHash([3u8; 32]); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &our_hash); + txn.commit(); + } - let mut keys = HashMap::new(); - let mut stakes = HashMap::new(); + let faulty_cosign_1 = Cosign { + global_session: id, + block_number, + block_hash: faulty_hash_1, + cosigner: ExternalNetworkId::Bitcoin, + }; + let faulty_signed_1 = sign_cosign(faulty_cosign_1, &keypair); + + let mut cosigning = Cosigning::new(db.clone()); + assert!(cosigning.intake_cosign(&faulty_signed_1).is_ok()); + + let faults_after_first = Faults::get(&db, id).unwrap(); + assert_eq!(faults_after_first.len(), 1); + assert_eq!(faults_after_first[0].cosign.block_hash, faulty_hash_1); + + let faulty_cosign_2 = Cosign { + global_session: id, + block_number, + block_hash: faulty_hash_2, + cosigner: ExternalNetworkId::Bitcoin, + }; + let faulty_signed_2 = sign_cosign(faulty_cosign_2, &keypair); + + assert!(cosigning.intake_cosign(&faulty_signed_2).is_ok()); + + let faults_after_second = Faults::get(&db, id).unwrap(); + assert_eq!( + faults_after_second.len(), + 1, + "duplicate fault from same network should not be added" + ); + assert_eq!(faults_after_second[0].cosign.block_hash, faulty_hash_1); + } - keys.insert(network1, Public(keypair1.public_bytes())); - keys.insert(network2, Public(keypair2.public_bytes())); + #[test] + fn intake_cosign_rejects_non_participating_network() { + let session = session_fixture(); + let id = session.id(); - stakes.insert(network1, 10); - stakes.insert(network2, 90); + let eth_keypair = Sr25519Fixture { seed: [77u8; 32] }; - let session = TestGlobalSession { - start_block_number: 1, - sets: vec![set1, set2], - keys, - stakes, - total_stake: 100, - }; - let id = session.id(); + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); - let mut db = MemDb::new(); - seed_minimal_state(&mut db, &session); + let block_number = 1; + let block_hash = BlockHash([9u8; 32]); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &block_hash); + txn.commit(); + } - let block_number = 1; - let our_hash = BlockHash([1u8; 32]); - let faulty_hash = BlockHash([2u8; 32]); - { - let mut txn = db.txn(); - SubstrateBlockHash::set(&mut txn, block_number, &our_hash); - txn.commit(); + let cosign = Cosign { + global_session: id, + block_number, + block_hash, + cosigner: ExternalNetworkId::Ethereum, + }; + let signed = sign_cosign(cosign, ð_keypair); + + let mut cosigning = Cosigning::new(db); + assert!(matches!( + cosigning.intake_cosign(&signed), + Err(IntakeCosignError::NonParticipatingNetwork) + )); } - let faulty_cosign = - Cosign { global_session: id, block_number, block_hash: faulty_hash, cosigner: network1 }; - let faulty_signed = sign_cosign(faulty_cosign, &keypair1); + #[test] + fn intake_cosign_records_fault_below_threshold() { + let network1 = ExternalNetworkId::Bitcoin; + let network2 = ExternalNetworkId::Ethereum; + let set1 = ExternalValidatorSet { network: network1, session: Session(0) }; + let set2 = ExternalValidatorSet { network: network2, session: Session(0) }; + + let keypair1 = sr25519_fixture(); + let keypair2 = Sr25519Fixture { seed: [88u8; 32] }; + + let mut keys = HashMap::new(); + let mut stakes = HashMap::new(); + + keys.insert(network1, Public(keypair1.public_bytes())); + keys.insert(network2, Public(keypair2.public_bytes())); + + stakes.insert(network1, 10); + stakes.insert(network2, 90); + + let session = TestGlobalSession { + start_block_number: 1, + sets: vec![set1, set2], + keys, + stakes, + total_stake: 100, + }; + let id = session.id(); + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_number = 1; + let our_hash = BlockHash([1u8; 32]); + let faulty_hash = BlockHash([2u8; 32]); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &our_hash); + txn.commit(); + } - let mut cosigning = Cosigning::new(db.clone()); - assert!(cosigning.intake_cosign(&faulty_signed).is_ok()); + let faulty_cosign = + Cosign { global_session: id, block_number, block_hash: faulty_hash, cosigner: network1 }; + let faulty_signed = sign_cosign(faulty_cosign, &keypair1); - let faults = Faults::get(&db, id).unwrap(); - assert_eq!(faults.len(), 1); - assert_eq!(faults[0].cosign.block_hash, faulty_hash); + let mut cosigning = Cosigning::new(db.clone()); + assert!(cosigning.intake_cosign(&faulty_signed).is_ok()); - let faulted = FaultedSession::get(&db); - assert_eq!(faulted, None, "session should not be faulted when weight is below 17% threshold"); -} + let faults = Faults::get(&db, id).unwrap(); + assert_eq!(faults.len(), 1); + assert_eq!(faults[0].cosign.block_hash, faulty_hash); -#[test] -fn intended_cosigns_empty_returns_empty() { - let mut db = MemDb::new(); - let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - let mut txn = db.txn(); - assert!(Cosigning::::intended_cosigns(&mut txn, set).is_empty()); - txn.commit(); + let faulted = FaultedSession::get(&db); + assert_eq!(faulted, None, "session should not be faulted when weight is below 17% threshold"); + } } -#[test] -fn intended_cosigns_receives_sent_intent() { - let mut db = MemDb::new(); - let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - - let intent = CosignIntent { - global_session: [1u8; 32], - block_number: 5, - block_hash: BlockHash([5u8; 32]), - notable: true, - }; - - { +mod intended_cosigns { + use super::*; + + #[test] + fn intended_cosigns_empty_returns_empty() { + let mut db = MemDb::new(); + let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; let mut txn = db.txn(); - IntendedCosigns::send(&mut txn, set, &intent); + assert!(Cosigning::::intended_cosigns(&mut txn, set).is_empty()); txn.commit(); } - { - let mut txn = db.txn(); - let got = Cosigning::::intended_cosigns(&mut txn, set); - txn.commit(); - assert_eq!(got.len(), 1); - assert_eq!(got[0].global_session, intent.global_session); - assert_eq!(got[0].block_number, intent.block_number); - assert_eq!(got[0].block_hash, intent.block_hash); - assert!(got[0].notable); + #[test] + fn intended_cosigns_receives_sent_intent() { + let mut db = MemDb::new(); + let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + + let intent = CosignIntent { + global_session: [1u8; 32], + block_number: 5, + block_hash: BlockHash([5u8; 32]), + notable: true, + }; + + { + let mut txn = db.txn(); + IntendedCosigns::send(&mut txn, set, &intent); + txn.commit(); + } + + { + let mut txn = db.txn(); + let got = Cosigning::::intended_cosigns(&mut txn, set); + txn.commit(); + assert_eq!(got.len(), 1); + assert_eq!(got[0].global_session, intent.global_session); + assert_eq!(got[0].block_number, intent.block_number); + assert_eq!(got[0].block_hash, intent.block_hash); + assert!(got[0].notable); + } } } diff --git a/coordinator/cosign/src/tests/delay.rs b/coordinator/cosign/src/tests/delay.rs index c7f236d80..7bdead847 100644 --- a/coordinator/cosign/src/tests/delay.rs +++ b/coordinator/cosign/src/tests/delay.rs @@ -1,10 +1,10 @@ -use std::time::{Duration, Instant}; +use std::time::Instant; use crate::{ LatestCosignedBlockNumber, delay::{CosignDelayTask, now_timestamp}, evaluator::CosignedBlocks, - tests::{IntoTask, TaskTest, wait_until}, + tests::{IntoTask, TaskTest}, }; use serai_db::{Db as _, DbTxn as _, MemDb}; @@ -39,15 +39,14 @@ impl DelayTest { } async fn assert_task_iteration_completes_with(&self, latest_cosigned_block_number: u64) { - wait_until!(LatestCosignedBlockNumber::get(&self.db) => Some(latest_cosigned_block_number)); - + assert_eq!(LatestCosignedBlockNumber::get(&self.db), Some(latest_cosigned_block_number)); // Assert CosignedBlocks queue items have been consumed after task run assert_eq!(CosignedBlocks::peek(&self.db), None); } } #[tokio::test] -async fn delay_task_returns_false_with_no_messages() { +async fn returns_false_with_no_messages() { let test = DelayTest::default(); let mut task = test.into_task(); @@ -58,7 +57,7 @@ async fn delay_task_returns_false_with_no_messages() { } #[tokio::test] -async fn delay_task_updates_latest_cosigned_block_number_after_ack_delay() { +async fn updates_latest_cosigned_block_number_after_ack_delay() { let (mut test, start) = DelayTest::new(); { @@ -72,12 +71,12 @@ async fn delay_task_updates_latest_cosigned_block_number_after_ack_delay() { txn.commit(); } - let task = test.into_task(); - let _handle = TaskTest::spawn_task_continually_running(task, vec![]); + let mut task = test.into_task(); + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; test.assert_task_iteration_completes_with(2).await; - log::info!("Blocks 0-2 processed after {:?}", start.elapsed()); + serai_log::log::info!("Blocks 0-2 processed after {:?}", start.elapsed()); { let mut txn = test.db.txn(); @@ -90,9 +89,10 @@ async fn delay_task_updates_latest_cosigned_block_number_after_ack_delay() { txn.commit(); } + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; test.assert_task_iteration_completes_with(5).await; - log::info!("Blocks 3-5 processed after {:?}", start.elapsed()); + serai_log::log::info!("Blocks 3-5 processed after {:?}", start.elapsed()); { let mut txn = test.db.txn(); @@ -105,13 +105,14 @@ async fn delay_task_updates_latest_cosigned_block_number_after_ack_delay() { txn.commit(); } + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; test.assert_task_iteration_completes_with(8).await; - log::info!("Blocks 6-8 processed after {:?}", start.elapsed()); + serai_log::log::info!("Blocks 6-8 processed after {:?}", start.elapsed()); } #[tokio::test] -async fn delay_task_does_not_regress_and_skips_if_not_a_later_block() { +async fn does_not_regress_and_skips_if_not_a_later_block() { let mut test = DelayTest::default(); { diff --git a/coordinator/cosign/src/tests/evaluator.rs b/coordinator/cosign/src/tests/evaluator.rs index 945d84cd6..b77a6a67d 100644 --- a/coordinator/cosign/src/tests/evaluator.rs +++ b/coordinator/cosign/src/tests/evaluator.rs @@ -44,61 +44,8 @@ impl IntoTask for EvaluatorTest { } impl EvaluatorTest { - fn assert_no_currently_evaluated_global_session(&self) { - assert_eq!(CurrentlyEvaluatedGlobalSession::get(&self.db).is_none(), true); - } - - /// Asserts that cosigned blocks from start_block to end_block (inclusive) are present in order. - fn assert_cosigned_blocks_range(&mut self, start_block: u64, end_block: u64) { - let mut txn = self.db.txn(); - for expected_block in start_block ..= end_block { - let (block_number, _time) = CosignedBlocks::try_recv(&mut txn) - .unwrap_or_else(|| panic!("expected cosigned block {expected_block}")); - assert_eq!(block_number, expected_block, "cosigned block mismatch"); - } - assert!(CosignedBlocks::try_recv(&mut txn).is_none(), "unexpected extra cosigned block"); - txn.commit(); - } - - fn assert_no_cosigned_blocks(&self) { - assert_eq!(CosignedBlocks::peek(&self.db).is_none(), true); - } - - fn assert_no_global_sessions_channel(&self) { - assert_eq!(GlobalSessionsChannel::peek(&self.db).is_none(), true); - } - - fn assert_has_global_sessions_channel(&self) { - assert_eq!(GlobalSessionsChannel::peek(&self.db).is_some(), true); - } - - fn assert_no_block_events(&self) { - assert_eq!(BlockEvents::peek(&self.db).is_none(), true); - } - - fn assert_has_block_events(&self) { - assert_eq!(BlockEvents::peek(&self.db).is_some(), true); - } - - /// Asserts that all evaluator DB entries are cleared (return None or are empty). - /// This is useful for verifying initial state or that cleanup worked correctly. - fn assert_evaluator_db_is_clear(&self) { - self.assert_no_currently_evaluated_global_session(); - self.assert_no_cosigned_blocks(); - self.assert_no_global_sessions_channel(); - self.assert_no_block_events(); - } - - fn assert_task_iteration_completed(&mut self, start_block: u64, end_block: u64) { - self.assert_no_global_sessions_channel(); - self.assert_no_block_events(); - self.assert_cosigned_blocks_range(start_block, end_block); - } - const GLOBAL_SESSION: [u8; 32] = [1u8; 32]; - /// Initializes a global session with the hardcoded test ID and the given start block number. - /// Returns the global session ID for use in tests that need it. fn init_global_session(&mut self, start_block_number: u64) -> [u8; 32] { let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; @@ -119,71 +66,37 @@ impl EvaluatorTest { } } -#[tokio::test] -async fn evaluator_task_returns_false_with_no_block_events() { - let test = EvaluatorTest::default(); - let mut task = test.into_task(); - TaskTest::task_runs_once_and_matches_progress(&mut task, false).await; - test.assert_evaluator_db_is_clear(); -} - -#[tokio::test] -async fn evaluator_task_processes_blocks_with_no_events() { - let mut test = EvaluatorTest::default(); - test.init_global_session(0); - - { - let mut txn = test.db.txn(); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 0, has_events: HasEvents::No }); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 2, has_events: HasEvents::No }); - txn.commit(); - } - - let mut task = test.into_task(); - TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - test.assert_task_iteration_completed(0, 2); -} - -#[tokio::test] -async fn evaluator_task_errors_on_notable_events_without_cosign() { - let mut test = EvaluatorTest::default(); - test.init_global_session(0); - - { - let mut txn = test.db.txn(); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 0, has_events: HasEvents::No }); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 2, has_events: HasEvents::Notable }, - ); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 3, has_events: HasEvents::No }); - txn.commit(); - } - - let mut task = test.into_task(); - TaskTest::assert_task_run_and_failed_with(&mut task, "wasn't yet cosigned").await; - // When iteration fails, nothing is committed - block events are consumed but CosignedBlocks is empty - test.assert_no_global_sessions_channel(); - test.assert_has_block_events(); - - { - let mut txn = test.db.txn(); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 2, has_events: HasEvents::Notable }, - ); - txn.commit(); +/// Verify evaluator post-run DB invariants. +/// +/// After a successful task run, all input channels should be consumed and the +/// `CosignedBlocks` output channel should contain exactly the expected block range. +fn verify_db_invariants(db: &mut MemDb, expected_cosigned_range: Option<(u64, u64)>) { + // All input channels should be fully consumed + assert!(BlockEvents::peek(db).is_none(), "BlockEvents should be fully consumed"); + assert!(GlobalSessionsChannel::peek(db).is_none(), "GlobalSessionsChannel should be consumed"); + + let has_session = CurrentlyEvaluatedGlobalSession::get(db).is_some(); + + let mut txn = db.txn(); + + // Verify cosigned blocks output + match expected_cosigned_range { + Some((start, end)) => { + assert!(has_session, "CurrentlyEvaluatedGlobalSession should exist after processing blocks"); + + for expected_block in start ..= end { + let (block_number, _time) = CosignedBlocks::try_recv(&mut txn) + .unwrap_or_else(|| panic!("expected cosigned block {expected_block}")); + assert_eq!(block_number, expected_block, "cosigned block mismatch"); + } + assert!(CosignedBlocks::try_recv(&mut txn).is_none(), "unexpected extra cosigned block"); + } + None => { + assert!(!has_session, "no session should exist when no blocks were processed"); + assert!(CosignedBlocks::try_recv(&mut txn).is_none(), "expected no cosigned blocks"); + } } - - let mut task: CosignEvaluatorTask = test.into_task().into(); - task.last_request_for_cosigns = Instant::now() - Duration::from_secs(5); - - TaskTest::assert_task_run_and_failed_with(&mut task, "wasn't yet cosigned").await; - test.assert_no_global_sessions_channel(); - test.assert_has_block_events(); + txn.commit(); } fn signed_cosign( @@ -198,48 +111,15 @@ fn signed_cosign( } #[tokio::test] -async fn evaluator_task_errors_on_notable_events_without_stakes() { +async fn returns_false_with_no_block_events() { let mut test = EvaluatorTest::default(); - - let global_session = { - let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - - let mut keys = HashMap::new(); - keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); - - let stakes = HashMap::new(); - - let info = - GlobalSession { start_block_number: 0, sets: vec![set], keys, stakes, total_stake: 1u64 }; - - let mut txn = test.db.txn(); - GlobalSessionsChannel::send(&mut txn, &(EvaluatorTest::GLOBAL_SESSION, info)); - txn.commit(); - - EvaluatorTest::GLOBAL_SESSION - }; - - { - let mut txn = test.db.txn(); - NetworksLatestCosignedBlock::set( - &mut txn, - global_session, - ExternalNetworkId::Bitcoin, - &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 1), - ); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 1, has_events: HasEvents::Notable }, - ); - txn.commit(); - } - let mut task = test.into_task(); - TaskTest::assert_task_run_and_failed_with(&mut task, "didn't have its stake").await; + TaskTest::task_runs_once_and_matches_progress(&mut task, false).await; + verify_db_invariants(&mut test.db, None); } #[tokio::test] -async fn evaluator_task_errors_on_non_notable_events_without_cosign() { +async fn processes_blocks_with_no_events() { let mut test = EvaluatorTest::default(); test.init_global_session(0); @@ -247,88 +127,17 @@ async fn evaluator_task_errors_on_non_notable_events_without_cosign() { let mut txn = test.db.txn(); BlockEvents::send(&mut txn, &BlockEventData { block_number: 0, has_events: HasEvents::No }); BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 2, has_events: HasEvents::NonNotable }, - ); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 3, has_events: HasEvents::No }); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 2, has_events: HasEvents::No }); txn.commit(); } let mut task = test.into_task(); - TaskTest::assert_task_run_and_failed_with(&mut task, "wasn't yet cosigned").await; - // When iteration fails, nothing is committed - test.assert_no_global_sessions_channel(); - test.assert_has_block_events(); - - { - let mut txn = test.db.txn(); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 2, has_events: HasEvents::NonNotable }, - ); - txn.commit(); - } - - let mut task: CosignEvaluatorTask = test.into_task().into(); - task.last_request_for_cosigns = Instant::now() - Duration::from_secs(5); - - TaskTest::assert_task_run_and_failed_with(&mut task, "wasn't yet cosigned").await; -} - -#[tokio::test] -async fn evaluator_task_errors_on_request_notable_cosigns_failure() { - let mut test = EvaluatorTest::default(); - test.init_global_session(0); - - { - let mut txn = test.db.txn(); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 1, has_events: HasEvents::Notable }, - ); - txn.commit(); - } - - let (request, calls) = TestRequest::new(true); - let mut task = CosignEvaluatorTask { - db: test.db.clone(), - request, - last_request_for_cosigns: Instant::now() - REQUEST_COSIGNS_SPACING - Duration::from_secs(5), - }; - - TaskTest::assert_task_run_and_failed_with(&mut task, "RequestError").await; - assert_eq!(calls.load(Ordering::SeqCst), 1, "request_notable_cosigns should have been called"); -} - -#[tokio::test] -async fn evaluator_task_errors_on_request_non_notable_cosigns_failure() { - let mut test = EvaluatorTest::default(); - test.init_global_session(0); - - { - let mut txn = test.db.txn(); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 1, has_events: HasEvents::NonNotable }, - ); - txn.commit(); - } - - let (request, calls) = TestRequest::new(true); - let mut task = CosignEvaluatorTask { - db: test.db.clone(), - request, - last_request_for_cosigns: Instant::now() - REQUEST_COSIGNS_SPACING - Duration::from_secs(5), - }; - - TaskTest::assert_task_run_and_failed_with(&mut task, "RequestError").await; - assert_eq!(calls.load(Ordering::SeqCst), 1, "request_notable_cosigns should have been called"); + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + verify_db_invariants(&mut test.db, Some((0, 2))); } #[tokio::test] -async fn evaluator_task_processes_notable_events_when_cosigned() { +async fn processes_notable_events_when_cosigned() { let mut test = EvaluatorTest::default(); let global_session = test.init_global_session(0); @@ -349,13 +158,11 @@ async fn evaluator_task_processes_notable_events_when_cosigned() { let mut task = test.into_task(); TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - - let (block_number, _time) = CosignedBlocks::peek(&test.db).expect("expected cosigned block"); - assert_eq!(block_number, 1); + verify_db_invariants(&mut test.db, Some((1, 1))); } #[tokio::test] -async fn evaluator_task_non_notable_uses_cached_known_cosign() { +async fn non_notable_uses_cached_known_cosign() { let mut test = EvaluatorTest::default(); let global_session = test.init_global_session(0); @@ -384,13 +191,11 @@ async fn evaluator_task_non_notable_uses_cached_known_cosign() { let mut task = test.into_task(); TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - - // All three blocks should be marked as cosigned - test.assert_cosigned_blocks_range(1, 3); + verify_db_invariants(&mut test.db, Some((1, 3))); } #[tokio::test] -async fn evaluator_task_non_notable_with_cosign_returns_some() { +async fn non_notable_with_cosign_returns_some() { let mut test = EvaluatorTest::default(); let global_session = test.init_global_session(0); @@ -411,77 +216,11 @@ async fn evaluator_task_non_notable_with_cosign_returns_some() { let mut task = test.into_task(); TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - - test.assert_cosigned_blocks_range(1, 1); -} - -#[tokio::test] -async fn evaluator_task_non_notable_cosign_too_low_does_not_add_weight() { - let mut test = EvaluatorTest::default(); - let global_session = test.init_global_session(0); - - { - let mut txn = test.db.txn(); - NetworksLatestCosignedBlock::set( - &mut txn, - global_session, - ExternalNetworkId::Bitcoin, - &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 1), - ); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 5, has_events: HasEvents::NonNotable }, - ); - txn.commit(); - } - - let mut task = test.into_task(); - TaskTest::assert_task_run_and_failed_with(&mut task, "wasn't yet cosigned").await; + verify_db_invariants(&mut test.db, Some((1, 1))); } #[tokio::test] -async fn evaluator_task_errors_on_non_notable_events_without_stakes() { - let mut test = EvaluatorTest::default(); - - let global_session = { - let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - - let mut keys = HashMap::new(); - keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); - - let stakes = HashMap::new(); - - let info = - GlobalSession { start_block_number: 0, sets: vec![set], keys, stakes, total_stake: 1u64 }; - - let mut txn = test.db.txn(); - GlobalSessionsChannel::send(&mut txn, &(EvaluatorTest::GLOBAL_SESSION, info)); - txn.commit(); - - EvaluatorTest::GLOBAL_SESSION - }; - - { - let mut txn = test.db.txn(); - NetworksLatestCosignedBlock::set( - &mut txn, - global_session, - ExternalNetworkId::Bitcoin, - &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 5), - ); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 1, has_events: HasEvents::NonNotable }, - ); - txn.commit(); - } - - let mut task = test.into_task(); - TaskTest::assert_task_run_and_failed_with(&mut task, "didn't have its stake").await; -} - -#[tokio::test] -async fn evaluator_task_non_notable_computes_lowest_common_block() { +async fn non_notable_computes_lowest_common_block() { let mut test = EvaluatorTest::default(); let global_session = { @@ -538,82 +277,11 @@ async fn evaluator_task_non_notable_computes_lowest_common_block() { let mut task = test.into_task(); TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - - test.assert_cosigned_blocks_range(1, 3); -} - -#[tokio::test] -#[should_panic(expected = "candidate's start block number ")] -async fn evaluator_task_panics_when_session_starts_after_block() { - let mut test = EvaluatorTest::default(); - - { - let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - - let mut keys = HashMap::new(); - keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); - - let mut stakes = HashMap::new(); - stakes.insert(ExternalNetworkId::Bitcoin, 1u64); - - let info = - GlobalSession { start_block_number: 10, sets: vec![set], keys, stakes, total_stake: 1u64 }; - - let mut txn = test.db.txn(); - CurrentlyEvaluatedGlobalSession::set(&mut txn, &(EvaluatorTest::GLOBAL_SESSION, info)); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 5, has_events: HasEvents::No }); - txn.commit(); - } - - let mut task = test.into_task(); - let _ = task.run_iteration().await; -} - -#[tokio::test] -#[should_panic(expected = "currently_evaluated_global_session_strict wasn't called incrementally")] -async fn evaluator_task_panics_when_called_non_incrementally() { - let mut test = EvaluatorTest::default(); - - { - let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - let mut keys = HashMap::new(); - keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); - let mut stakes = HashMap::new(); - stakes.insert(ExternalNetworkId::Bitcoin, 1u64); - let info = - GlobalSession { start_block_number: 0, sets: vec![set], keys, stakes, total_stake: 1u64 }; - - let mut txn = test.db.txn(); - GlobalSessionsChannel::send(&mut txn, &([1u8; 32], info)); - txn.commit(); - } - - { - let set = ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }; - let mut keys = HashMap::new(); - keys.insert(ExternalNetworkId::Ethereum, Public([2u8; 32])); - let mut stakes = HashMap::new(); - stakes.insert(ExternalNetworkId::Ethereum, 1u64); - let info = - GlobalSession { start_block_number: 5, sets: vec![set], keys, stakes, total_stake: 1u64 }; - - let mut txn = test.db.txn(); - GlobalSessionsChannel::send(&mut txn, &([2u8; 32], info)); - txn.commit(); - } - - { - let mut txn = test.db.txn(); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 10, has_events: HasEvents::No }); - txn.commit(); - } - - let mut task = test.into_task(); - let _ = task.run_iteration().await; + verify_db_invariants(&mut test.db, Some((1, 3))); } #[tokio::test] -async fn evaluator_task_advances_global_session_at_start_block() { +async fn advances_global_session_at_start_block() { let mut test = EvaluatorTest::default(); let session1 = [1u8; 32]; @@ -656,8 +324,7 @@ async fn evaluator_task_advances_global_session_at_start_block() { let mut task = test.into_task(); TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - - test.assert_cosigned_blocks_range(1, 3); + verify_db_invariants(&mut test.db, Some((1, 3))); let current = CurrentlyEvaluatedGlobalSession::get(&test.db).expect("should have current session"); @@ -665,123 +332,419 @@ async fn evaluator_task_advances_global_session_at_start_block() { assert_eq!(current.1.start_block_number, 3, "session 2 should start at block 3"); } -#[tokio::test] -#[should_panic(expected = "attempt to add with overflow")] -async fn evaluator_task_errors_on_weight_overflow_notable() { - let mut test = EvaluatorTest::default(); +mod errors { + use super::*; + + #[tokio::test] + async fn notable_events_without_cosign() { + let mut test = EvaluatorTest::default(); + test.init_global_session(0); + + { + let mut txn = test.db.txn(); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 0, has_events: HasEvents::No }); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 2, has_events: HasEvents::Notable }, + ); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 3, has_events: HasEvents::No }); + txn.commit(); + } - let global_session = { - let sets = vec![ - ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }, - ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }, - ]; + let mut task = test.into_task(); + TaskTest::task_runs_and_fails_with(&mut task, "wasn't yet cosigned").await; + // On failure, global session was consumed but block events remain + assert!(GlobalSessionsChannel::peek(&test.db).is_none()); + assert!(BlockEvents::peek(&test.db).is_some()); + + { + let mut txn = test.db.txn(); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 2, has_events: HasEvents::Notable }, + ); + txn.commit(); + } - let mut keys = HashMap::new(); - keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); - keys.insert(ExternalNetworkId::Ethereum, Public([2u8; 32])); + let mut task: CosignEvaluatorTask = test.into_task().into(); + task.last_request_for_cosigns = Instant::now() - Duration::from_secs(5); - let mut stakes = HashMap::new(); - stakes.insert(ExternalNetworkId::Bitcoin, u64::MAX); - stakes.insert(ExternalNetworkId::Ethereum, 1u64); + TaskTest::task_runs_and_fails_with(&mut task, "wasn't yet cosigned").await; + assert!(GlobalSessionsChannel::peek(&test.db).is_none()); + assert!(BlockEvents::peek(&test.db).is_some()); + } - let info = GlobalSession { start_block_number: 0, sets, keys, stakes, total_stake: u64::MAX }; + #[tokio::test] + async fn notable_events_without_stakes() { + let mut test = EvaluatorTest::default(); - let mut txn = test.db.txn(); - GlobalSessionsChannel::send(&mut txn, &(EvaluatorTest::GLOBAL_SESSION, info)); - txn.commit(); + let global_session = { + let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - EvaluatorTest::GLOBAL_SESSION - }; + let mut keys = HashMap::new(); + keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); - { - let mut txn = test.db.txn(); - NetworksLatestCosignedBlock::set( - &mut txn, - global_session, - ExternalNetworkId::Bitcoin, - &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 1), - ); - NetworksLatestCosignedBlock::set( - &mut txn, - global_session, - ExternalNetworkId::Ethereum, - &signed_cosign(global_session, ExternalNetworkId::Ethereum, 1), - ); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 1, has_events: HasEvents::Notable }, - ); - txn.commit(); + let stakes = HashMap::new(); + + let info = + GlobalSession { start_block_number: 0, sets: vec![set], keys, stakes, total_stake: 1u64 }; + + let mut txn = test.db.txn(); + GlobalSessionsChannel::send(&mut txn, &(EvaluatorTest::GLOBAL_SESSION, info)); + txn.commit(); + + EvaluatorTest::GLOBAL_SESSION + }; + + { + let mut txn = test.db.txn(); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + ExternalNetworkId::Bitcoin, + &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 1), + ); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 1, has_events: HasEvents::Notable }, + ); + txn.commit(); + } + + let mut task = test.into_task(); + TaskTest::task_runs_and_fails_with(&mut task, "didn't have its stake").await; + } + + #[tokio::test] + async fn non_notable_events_without_cosign() { + let mut test = EvaluatorTest::default(); + test.init_global_session(0); + + { + let mut txn = test.db.txn(); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 0, has_events: HasEvents::No }); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 2, has_events: HasEvents::NonNotable }, + ); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 3, has_events: HasEvents::No }); + txn.commit(); + } + + let mut task = test.into_task(); + TaskTest::task_runs_and_fails_with(&mut task, "wasn't yet cosigned").await; + assert!(GlobalSessionsChannel::peek(&test.db).is_none()); + assert!(BlockEvents::peek(&test.db).is_some()); + + { + let mut txn = test.db.txn(); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 2, has_events: HasEvents::NonNotable }, + ); + txn.commit(); + } + + let mut task: CosignEvaluatorTask = test.into_task().into(); + task.last_request_for_cosigns = Instant::now() - Duration::from_secs(5); + + TaskTest::task_runs_and_fails_with(&mut task, "wasn't yet cosigned").await; } - let mut task = test.into_task(); - TaskTest::assert_task_run_and_failed_with(&mut task, "weight_cosigned overflow").await; -} + #[tokio::test] + async fn non_notable_events_without_stakes() { + let mut test = EvaluatorTest::default(); -#[tokio::test] -#[should_panic(expected = "attempt to add with overflow")] -async fn evaluator_task_errors_on_weight_overflow_non_notable() { - let mut test = EvaluatorTest::default(); + let global_session = { + let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - let global_session = { - let sets = vec![ - ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }, - ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }, - ]; + let mut keys = HashMap::new(); + keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); - let mut keys = HashMap::new(); - keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); - keys.insert(ExternalNetworkId::Ethereum, Public([2u8; 32])); + let stakes = HashMap::new(); - let mut stakes = HashMap::new(); - stakes.insert(ExternalNetworkId::Bitcoin, u64::MAX); - stakes.insert(ExternalNetworkId::Ethereum, 1u64); + let info = + GlobalSession { start_block_number: 0, sets: vec![set], keys, stakes, total_stake: 1u64 }; - let info = GlobalSession { start_block_number: 0, sets, keys, stakes, total_stake: u64::MAX }; + let mut txn = test.db.txn(); + GlobalSessionsChannel::send(&mut txn, &(EvaluatorTest::GLOBAL_SESSION, info)); + txn.commit(); - let mut txn = test.db.txn(); - GlobalSessionsChannel::send(&mut txn, &(EvaluatorTest::GLOBAL_SESSION, info)); - txn.commit(); + EvaluatorTest::GLOBAL_SESSION + }; - EvaluatorTest::GLOBAL_SESSION - }; + { + let mut txn = test.db.txn(); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + ExternalNetworkId::Bitcoin, + &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 5), + ); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 1, has_events: HasEvents::NonNotable }, + ); + txn.commit(); + } - { - let mut txn = test.db.txn(); - NetworksLatestCosignedBlock::set( - &mut txn, - global_session, - ExternalNetworkId::Bitcoin, - &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 5), - ); - NetworksLatestCosignedBlock::set( - &mut txn, - global_session, - ExternalNetworkId::Ethereum, - &signed_cosign(global_session, ExternalNetworkId::Ethereum, 5), - ); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 1, has_events: HasEvents::NonNotable }, - ); - txn.commit(); + let mut task = test.into_task(); + TaskTest::task_runs_and_fails_with(&mut task, "didn't have its stake").await; + } + + #[tokio::test] + async fn non_notable_cosign_too_low_does_not_add_weight() { + let mut test = EvaluatorTest::default(); + let global_session = test.init_global_session(0); + + { + let mut txn = test.db.txn(); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + ExternalNetworkId::Bitcoin, + &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 1), + ); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 5, has_events: HasEvents::NonNotable }, + ); + txn.commit(); + } + + let mut task = test.into_task(); + TaskTest::task_runs_and_fails_with(&mut task, "wasn't yet cosigned").await; } - let mut task = test.into_task(); - TaskTest::assert_task_run_and_failed_with(&mut task, "weight_cosigned overflow").await; -} + #[tokio::test] + async fn request_notable_cosigns_failure() { + let mut test = EvaluatorTest::default(); + test.init_global_session(0); -#[tokio::test] -#[should_panic(expected = "fetching latest global session yet none declared")] -async fn evaluator_task_errors_when_no_global_session_in_channel() { - let mut test = EvaluatorTest::default(); + { + let mut txn = test.db.txn(); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 1, has_events: HasEvents::Notable }, + ); + txn.commit(); + } - { - let mut txn = test.db.txn(); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); - txn.commit(); + let (request, calls) = TestRequest::new(true); + let mut task = CosignEvaluatorTask { + db: test.db.clone(), + request, + last_request_for_cosigns: Instant::now() - REQUEST_COSIGNS_SPACING - Duration::from_secs(5), + }; + + TaskTest::task_runs_and_fails_with(&mut task, "RequestError").await; + assert_eq!(calls.load(Ordering::SeqCst), 1, "request_notable_cosigns should have been called"); + } + + #[tokio::test] + async fn request_non_notable_cosigns_failure() { + let mut test = EvaluatorTest::default(); + test.init_global_session(0); + + { + let mut txn = test.db.txn(); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 1, has_events: HasEvents::NonNotable }, + ); + txn.commit(); + } + + let (request, calls) = TestRequest::new(true); + let mut task = CosignEvaluatorTask { + db: test.db.clone(), + request, + last_request_for_cosigns: Instant::now() - REQUEST_COSIGNS_SPACING - Duration::from_secs(5), + }; + + TaskTest::task_runs_and_fails_with(&mut task, "RequestError").await; + assert_eq!(calls.load(Ordering::SeqCst), 1, "request_notable_cosigns should have been called"); } - let mut task = test.into_task(); - let _ = task.run_iteration().await; + #[tokio::test] + #[should_panic(expected = "candidate's start block number ")] + async fn panics_when_session_starts_after_block() { + let mut test = EvaluatorTest::default(); + + { + let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + + let mut keys = HashMap::new(); + keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); + + let mut stakes = HashMap::new(); + stakes.insert(ExternalNetworkId::Bitcoin, 1u64); + + let info = + GlobalSession { start_block_number: 10, sets: vec![set], keys, stakes, total_stake: 1u64 }; + + let mut txn = test.db.txn(); + CurrentlyEvaluatedGlobalSession::set(&mut txn, &(EvaluatorTest::GLOBAL_SESSION, info)); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 5, has_events: HasEvents::No }); + txn.commit(); + } + + let mut task = test.into_task(); + let _ = task.run_iteration().await; + } + + #[tokio::test] + #[should_panic( + expected = "currently_evaluated_global_session_strict wasn't called incrementally" + )] + async fn panics_when_called_non_incrementally() { + let mut test = EvaluatorTest::default(); + + { + let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + let mut keys = HashMap::new(); + keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); + let mut stakes = HashMap::new(); + stakes.insert(ExternalNetworkId::Bitcoin, 1u64); + let info = + GlobalSession { start_block_number: 0, sets: vec![set], keys, stakes, total_stake: 1u64 }; + + let mut txn = test.db.txn(); + GlobalSessionsChannel::send(&mut txn, &([1u8; 32], info)); + txn.commit(); + } + + { + let set = ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }; + let mut keys = HashMap::new(); + keys.insert(ExternalNetworkId::Ethereum, Public([2u8; 32])); + let mut stakes = HashMap::new(); + stakes.insert(ExternalNetworkId::Ethereum, 1u64); + let info = + GlobalSession { start_block_number: 5, sets: vec![set], keys, stakes, total_stake: 1u64 }; + + let mut txn = test.db.txn(); + GlobalSessionsChannel::send(&mut txn, &([2u8; 32], info)); + txn.commit(); + } + + { + let mut txn = test.db.txn(); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 10, has_events: HasEvents::No }); + txn.commit(); + } + + let mut task = test.into_task(); + let _ = task.run_iteration().await; + } + + #[tokio::test] + #[should_panic(expected = "attempt to add with overflow")] + async fn weight_overflow_notable() { + let mut test = EvaluatorTest::default(); + + let global_session = { + let sets = vec![ + ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }, + ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }, + ]; + + let mut keys = HashMap::new(); + keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); + keys.insert(ExternalNetworkId::Ethereum, Public([2u8; 32])); + + let mut stakes = HashMap::new(); + stakes.insert(ExternalNetworkId::Bitcoin, u64::MAX); + stakes.insert(ExternalNetworkId::Ethereum, 1u64); + + let info = GlobalSession { start_block_number: 0, sets, keys, stakes, total_stake: u64::MAX }; + + let mut txn = test.db.txn(); + GlobalSessionsChannel::send(&mut txn, &(EvaluatorTest::GLOBAL_SESSION, info)); + txn.commit(); + + EvaluatorTest::GLOBAL_SESSION + }; + + { + let mut txn = test.db.txn(); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + ExternalNetworkId::Bitcoin, + &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 1), + ); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + ExternalNetworkId::Ethereum, + &signed_cosign(global_session, ExternalNetworkId::Ethereum, 1), + ); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 1, has_events: HasEvents::Notable }, + ); + txn.commit(); + } + + let mut task = test.into_task(); + TaskTest::task_runs_and_fails_with(&mut task, "weight_cosigned overflow").await; + } + + #[tokio::test] + #[should_panic(expected = "attempt to add with overflow")] + async fn weight_overflow_non_notable() { + let mut test = EvaluatorTest::default(); + + let global_session = { + let sets = vec![ + ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }, + ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }, + ]; + + let mut keys = HashMap::new(); + keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); + keys.insert(ExternalNetworkId::Ethereum, Public([2u8; 32])); + + let mut stakes = HashMap::new(); + stakes.insert(ExternalNetworkId::Bitcoin, u64::MAX); + stakes.insert(ExternalNetworkId::Ethereum, 1u64); + + let info = GlobalSession { start_block_number: 0, sets, keys, stakes, total_stake: u64::MAX }; + + let mut txn = test.db.txn(); + GlobalSessionsChannel::send(&mut txn, &(EvaluatorTest::GLOBAL_SESSION, info)); + txn.commit(); + + EvaluatorTest::GLOBAL_SESSION + }; + + { + let mut txn = test.db.txn(); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + ExternalNetworkId::Bitcoin, + &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 5), + ); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + ExternalNetworkId::Ethereum, + &signed_cosign(global_session, ExternalNetworkId::Ethereum, 5), + ); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 1, has_events: HasEvents::NonNotable }, + ); + txn.commit(); + } + + let mut task = test.into_task(); + TaskTest::task_runs_and_fails_with(&mut task, "weight_cosigned overflow").await; + } } diff --git a/coordinator/cosign/src/tests/intend.rs b/coordinator/cosign/src/tests/intend.rs index 13c42662f..07bc3286b 100644 --- a/coordinator/cosign/src/tests/intend.rs +++ b/coordinator/cosign/src/tests/intend.rs @@ -1,23 +1,21 @@ -use std::{collections::HashMap, sync::Arc, time::Duration}; +use std::{collections::HashMap, sync::Arc}; use rand_core::{OsRng, RngCore}; -use serai_db::{Db as _, DbTxn, MemDb}; +use serai_db::MemDb; -use serai_simulator_node::{SimulatorNode, SimulatorState}; +use serai_shim_rpc::SeraiShimRpc; use serai_client_serai::{ Serai, abi::{ Event, coins, primitives::{ - BlockHash, address::{ExternalAddress, SeraiAddress}, balance::{Amount, ExternalBalance}, coin::ExternalCoin, crypto::{ExternalKey, KeyPair, Public}, instructions::{OutInstruction, OutInstructionWithBalance}, - merkle::IncrementalUnbalancedMerkleTree, network_id::{ExternalNetworkId, NetworkId}, validator_sets::{ExternalValidatorSet, KeyShares, Session, ValidatorSet}, }, @@ -27,8 +25,6 @@ use serai_client_serai::{ use crate::{intend::*, tests::*, *}; -use super::SERAI_NODE_LOCK; - fn set_decided_event(set: ValidatorSet, validators: Vec<(SeraiAddress, KeyShares)>) -> Event { Event::ValidatorSets(validator_sets::Event::SetDecided { set, validators }) } @@ -62,8 +58,6 @@ fn burn_with_instruction_event(from: SeraiAddress) -> Event { }) } -/// Generic test struct for intend tests. -/// Uses `FakeSerai` for mock tests and can be extended for live tests. pub(crate) struct IntendTestStruct { pub(crate) serai: Arc, pub(crate) db: MemDb, @@ -77,95 +71,125 @@ impl IntoTask for IntendTestStruct { } } -impl IntendTestStruct { - fn assert_substrate_block_hash_exists(&self, block_number: u64) -> BlockHash { - let block_hash = SubstrateBlockHash::get(&self.db, block_number); - assert!(block_hash.is_some(), "no substrate blockhash for block {block_number}"); - block_hash.expect("no substrate blockhash") - } - - fn assert_builds_upon_is_expected(&self, expected: &IncrementalUnbalancedMerkleTree) { - assert_eq!(BuildsUpon::get(&self.db).as_ref(), Some(expected)); - } +/// Create a [`SeraiShimRpc`] and a [`IntendTestStruct`] connected to it. +async fn setup_mock_test() -> (SeraiShimRpc, IntendTestStruct) { + let (shim_serai, serai) = setup_shim_serai().await; + (shim_serai, IntendTestStruct { serai, db: MemDb::new() }) +} - fn assert_block_events_is_expected(&mut self, expected: BlockEventData) { - let mut txn = self.db.txn(); - let actual = BlockEvents::try_recv(&mut txn); - txn.commit(); - match actual { - Some(a) => { - assert_eq!(a.block_number, expected.block_number); - assert_eq!(a.has_events, expected.has_events); +/// Verify all post-run DB invariants by replaying events from the Serai node. +async fn verify_db_invariants(db: &MemDb, serai: &Serai, num_blocks: usize) { + use serai_client_serai::abi::validator_sets::Event as VsEvent; + + let num_blocks_u64 = u64::try_from(num_blocks).unwrap(); + + // ScanCosignFrom should point to the block after the last processed + assert_eq!( + ScanCosignFrom::get(db), + Some(num_blocks_u64), + "ScanCosignFrom should be {num_blocks} after processing blocks 0..={n}", + n = num_blocks - 1 + ); + + // Replay events from the shim node to compute expected DB state. + let mut expected_stakes: HashMap<(ExternalNetworkId, SeraiAddress), u64> = HashMap::new(); + let mut expected_latest_set: HashMap = HashMap::new(); + let mut decided_validators: HashMap> = HashMap::new(); + let mut next_session: HashMap = HashMap::new(); + let mut set_keys_count = 0usize; + + for block_num in 0 .. num_blocks_u64 { + let block = serai.block_by_number(block_num).await.unwrap().unwrap(); + let hash = block.header.hash(); + let events = serai.events(hash).await.unwrap(); + let vset = events.validator_sets(); + + for event in vset.allocation_events() { + let VsEvent::Allocation { validator, network, amount } = event else { continue }; + let Ok(net) = ExternalNetworkId::try_from(*network) else { continue }; + *expected_stakes.entry((net, *validator)).or_default() += amount.0; + } + for event in vset.deallocation_events() { + let VsEvent::Deallocation { validator, network, amount, .. } = event else { continue }; + let Ok(net) = ExternalNetworkId::try_from(*network) else { continue }; + *expected_stakes.entry((net, *validator)).or_default() -= amount.0; + } + for event in vset.set_decided_events() { + let VsEvent::SetDecided { set, validators } = event else { continue }; + let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue }; + decided_validators.insert(set, validators.iter().map(|(v, _)| *v).collect()); + } + for event in vset.set_keys_events() { + let VsEvent::SetKeys { set, .. } = event else { continue }; + let validators = decided_validators.get(set).cloned().unwrap_or_default(); + let mut total_stake = 0u64; + for v in &validators { + total_stake += expected_stakes.get(&(set.network, *v)).copied().unwrap_or(0); } - None => panic!("BlockEvents mismatch: got None, expected {:?}", expected), + if total_stake > 0 { + set_keys_count += 1; + expected_latest_set.insert(set.network, (set.session, total_stake)); + } + *next_session.entry(set.network).or_insert(0) += 1; } } - fn assert_scan_cosign_from_is_expected(&self, expected: u64) { - assert_eq!(ScanCosignFrom::get(&self.db), Some(expected)); + // Verify Stakes match the expected. + for (&(network, validator), &expected_amount) in &expected_stakes { + let db_stake = Stakes::get(db, network, validator); + assert_eq!( + db_stake, + Some(Amount(expected_amount)), + "stake mismatch for ({network:?}, {validator:?}): db={db_stake:?}, expected={expected_amount}" + ); } - fn assert_task_iteration_per_block_concluded( - &mut self, - block_number: u64, - has_events: HasEvents, - ) { - self.assert_block_events_is_expected(BlockEventData { block_number, has_events }); - self.assert_scan_cosign_from_is_expected(block_number + 1); + // Verify LatestSet matches the expected. + for (&network, &(session, stake)) in &expected_latest_set { + let latest = LatestSet::get(db, network); + assert!(latest.is_some(), "LatestSet should exist for {network:?}"); + let latest = latest.unwrap(); + assert_eq!(latest.session, session, "LatestSet session mismatch for {network:?}"); + assert_eq!(latest.stake.0, stake, "LatestSet stake mismatch for {network:?}"); } - /// Assert that the task processed `block_number` correctly (no events). - fn assert_task_iteration_per_block_with_no_events_ran( - &mut self, - block_number: u64, - expected_builds_upon: &IncrementalUnbalancedMerkleTree, - ) { - self.assert_substrate_block_hash_exists(block_number); - self.assert_builds_upon_is_expected(expected_builds_upon); - self.assert_task_iteration_per_block_concluded(block_number, HasEvents::No); + // No pending Validators entries (all should have been taken by SetKeys). + for (&network, _) in &expected_latest_set { + let session_num = next_session.get(&network).copied().unwrap_or(0); + if session_num > 0 { + let last_set = ExternalValidatorSet { network, session: Session(session_num - 1) }; + assert_eq!( + Validators::get(db, last_set), + None, + "Validators for {last_set:?} should have been consumed by SetKeys" + ); + } } - /// Assert blocks were processed up to (but not including) `failed_block`. - fn assert_task_iterations_with_no_events_failed_at( - &mut self, - failed_block: u64, - expected_builds_upon: &IncrementalUnbalancedMerkleTree, - ) { - self.assert_task_iteration_per_block_with_no_events_ran(failed_block - 1, expected_builds_upon); + // If any SetKeys happened, a GlobalSession should exist with consistent total_stake + if set_keys_count > 0 { + let session_id = LatestGlobalSessionIntended::get(db); + assert!( + session_id.is_some(), + "LatestGlobalSessionIntended should exist after {set_keys_count} SetKeys events", + ); + let session = GlobalSessions::get(db, session_id.unwrap()); + assert!(session.is_some(), "GlobalSession should exist"); + let session = session.unwrap(); + let sum: u64 = session.stakes.values().sum(); + assert_eq!( + session.total_stake, sum, + "GlobalSession total_stake should equal sum of individual stakes" + ); } -} -/// Create a [`SimulatorNode`] and an [`IntendTestStruct`] connected to it. -async fn setup_mock_test() -> (SimulatorNode, IntendTestStruct) { - let node = SimulatorNode::start(SimulatorState::default()).await; - let serai = Arc::new(Serai::new(node.url()).unwrap()); - (node, IntendTestStruct { serai, db: MemDb::new() }) -} - -#[tokio::test] -async fn iterates_serai_blocks() { - let _lock = SERAI_NODE_LOCK.lock().await; - serai_test_harness::serai_test(async |serai| { - let serai = Arc::new(serai); - let mut task = CosignIntendTask { db: MemDb::new(), serai: serai.clone() }; - - // First run processes all currently finalized blocks (and/or at least genesis), progress = true - TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - - // The task has now consumed everything up to latest_finalized. Record that height. - let height_after_first_run = serai.latest_finalized_block_number().await.unwrap(); - - // Second run: no new blocks beyond what was just processed, progress = false - TaskTest::task_runs_once_and_matches_progress(&mut task, false).await; - - // Wait for at least 3 new finalized blocks beyond the first run's height - let target = height_after_first_run + 3; - serai_test_harness::wait_for_blocks(&serai, target, Duration::from_secs(60)).await; - - // Third run: processes multiple new blocks, progress = true - TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - }) - .await; + serai_log::log::info!( + "DB invariants verified: {} blocks, {} stake entries, {} LatestSets, {} SetKeys events", + num_blocks, + expected_stakes.len(), + expected_latest_set.len(), + set_keys_count + ); } mod errors { @@ -173,149 +197,100 @@ mod errors { #[tokio::test] async fn errors_if_chain_is_not_linear() { - let (node, mut test) = setup_mock_test().await; - - node.make_block(0, vec![]).await; - node.make_block(1, vec![]).await; + let (serai, task_test) = setup_mock_test().await; - let builds_upon_after_block_1 = node.builds_upon().await; - node.make_non_linear_block(2, vec![]).await; + serai.make_block(0, vec![]).await; + serai.make_block(1, vec![]).await; + serai.make_non_linear_block(2, vec![]).await; - let mut task = test.into_task(); + let mut task = task_test.into_task(); + TaskTest::task_runs_and_fails_with(&mut task, "doesn't build upon").await; - TaskTest::assert_task_run_and_failed_with(&mut task, "doesn't build upon").await; + // Blocks 0,1 committed before the error on block 2 + assert_eq!(ScanCosignFrom::get(&task_test.db), Some(2)); - // Consume block 0's channel entry before checking block 1 - test.assert_block_events_is_expected(BlockEventData { - block_number: 0, - has_events: HasEvents::No, - }); - test.assert_task_iterations_with_no_events_failed_at(2, &builds_upon_after_block_1); + // Fix the chain and re-run + serai.remove_block(2).await; + serai.make_block(2, vec![]).await; - // Now fix the chain: remove the broken block 2 and recreate it properly - node.remove_block(2).await; - node.make_block(2, vec![]).await; - - let mut task = test.into_task(); - - // Re-run the task, block 2 properly builds upon block 1 + let mut task = task_test.into_task(); TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - // block 1 was already asserted and cleared from queue, assert only block 2 now - let builds_upon_after_block_2 = node.builds_upon().await; - test.assert_task_iteration_per_block_with_no_events_ran(2, &builds_upon_after_block_2); + verify_db_invariants(&task_test.db, &task_test.serai, 3).await; } #[tokio::test] async fn errors_if_block_not_found() { - let (node, mut test) = setup_mock_test().await; - - node.make_block(0, vec![]).await; - node.make_block(1, vec![]).await; + let (serai, task_test) = setup_mock_test().await; - // Capture builds_upon after block 1 - let builds_upon_after_block_1 = node.builds_upon().await; + serai.make_block(0, vec![]).await; + serai.make_block(1, vec![]).await; + serai.make_block(2, vec![]).await; + serai.set_block_missing(2).await; - // Block 2 exists in terms of finalization, but returns None when fetched - node.make_block(2, vec![]).await; - node.set_block_missing(2).await; - - let mut task = test.into_task(); - TaskTest::assert_task_run_and_failed_with( + let mut task = task_test.into_task(); + TaskTest::task_runs_and_fails_with( &mut task, "couldn't get block which should've been finalized", ) .await; - test.assert_block_events_is_expected(BlockEventData { - block_number: 0, - has_events: HasEvents::No, - }); - test.assert_task_iterations_with_no_events_failed_at(2, &builds_upon_after_block_1); - - node.clear_block_missing(2).await; + assert_eq!(ScanCosignFrom::get(&task_test.db), Some(2)); - let mut task = test.into_task(); + serai.clear_block_missing(2).await; - // Re-run the task, block 2 now fetched and processed + let mut task = task_test.into_task(); TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - let builds_upon = node.builds_upon().await; - test.assert_task_iteration_per_block_with_no_events_ran(2, &builds_upon); + verify_db_invariants(&task_test.db, &task_test.serai, 3).await; } #[tokio::test] async fn handles_rpc_error_on_block_fetch() { - let (node, mut test) = setup_mock_test().await; - - node.make_block(0, vec![]).await; - node.make_block(1, vec![]).await; - - // Capture builds_upon after block 1 - let builds_upon_after_block_1 = node.builds_upon().await; + let (serai, task_test) = setup_mock_test().await; - // Block 2 exists in terms of finalization, but fetching it returns an error - node.make_block(2, vec![]).await; - node.set_block_number_error("blockchain/block", 2, "connection refused").await; + serai.make_block(0, vec![]).await; + serai.make_block(1, vec![]).await; + serai.make_block(2, vec![]).await; + serai.set_block_number_error("blockchain/block", 2, "connection refused").await; - let mut task = test.into_task(); - TaskTest::assert_task_run_and_failed_with(&mut task, "RPC error fetching block").await; + let mut task = task_test.into_task(); + TaskTest::task_runs_and_fails_with(&mut task, "RPC error fetching block").await; - test.assert_block_events_is_expected(BlockEventData { - block_number: 0, - has_events: HasEvents::No, - }); - test.assert_task_iterations_with_no_events_failed_at(2, &builds_upon_after_block_1); + assert_eq!(ScanCosignFrom::get(&task_test.db), Some(2)); - node.clear_block_number_error("blockchain/block", 2).await; + serai.clear_block_number_error("blockchain/block", 2).await; - let mut task = test.into_task(); - - // Re-run the task, block 2 now fetched and processed + let mut task = task_test.into_task(); TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - let builds_upon = node.builds_upon().await; - test.assert_task_iteration_per_block_with_no_events_ran(2, &builds_upon); + verify_db_invariants(&task_test.db, &task_test.serai, 3).await; } #[tokio::test] async fn handles_rpc_error_on_events_fetch() { - let (node, mut test) = setup_mock_test().await; - - node.make_block(0, vec![]).await; - node.make_block(1, vec![]).await; - - // Capture builds_upon after block 1 - let builds_upon_after_block_1 = node.builds_upon().await; - - // Block 2 exists in terms of finalization, but fetching its events returns an error - let block2_hash = node.make_block(2, vec![]).await; - node.set_block_hash_error("blockchain/events", block2_hash, "timeout").await; + let (serai, task_test) = setup_mock_test().await; - let mut task = test.into_task(); - TaskTest::assert_task_run_and_failed_with(&mut task, "RPC error fetching events").await; + serai.make_block(0, vec![]).await; + serai.make_block(1, vec![]).await; + let block2_hash = serai.make_block(2, vec![]).await; + serai.set_block_hash_error("blockchain/events", block2_hash, "timeout").await; - test.assert_block_events_is_expected(BlockEventData { - block_number: 0, - has_events: HasEvents::No, - }); - test.assert_task_iterations_with_no_events_failed_at(2, &builds_upon_after_block_1); + let mut task = task_test.into_task(); + TaskTest::task_runs_and_fails_with(&mut task, "RPC error fetching events").await; - node.clear_block_hash_error("blockchain/events", block2_hash).await; + assert_eq!(ScanCosignFrom::get(&task_test.db), Some(2)); - let mut task = test.into_task(); + serai.clear_block_hash_error("blockchain/events", block2_hash).await; - // Re-run the task, block 2 events now fetched and processed + let mut task = task_test.into_task(); TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - let builds_upon = node.builds_upon().await; - test.assert_task_iteration_per_block_with_no_events_ran(2, &builds_upon); + verify_db_invariants(&task_test.db, &task_test.serai, 3).await; } #[tokio::test] async fn errors_if_set_decided_has_empty_validators() { - let (node, test) = setup_mock_test().await; + let (serai, task_test) = setup_mock_test().await; - // Block 0: no events - node.make_block(0, vec![]).await; + serai.make_block(0, vec![]).await; - // Block 1: SetDecided with an external network but an empty validator list let empty_set_decided = set_decided_event( ValidatorSet { network: NetworkId::External(ExternalNetworkId::Bitcoin), @@ -323,38 +298,301 @@ mod errors { }, vec![], ); - node.make_block(1, vec![vec![empty_set_decided]]).await; + serai.make_block(1, vec![vec![empty_set_decided]]).await; - let mut task = test.into_task(); - TaskTest::assert_task_run_and_failed_with( - &mut task, - "validator set from Event::SetDecided was empty", - ) - .await; + let mut task = task_test.into_task(); + TaskTest::task_runs_and_fails_with(&mut task, "validator set from Event::SetDecided was empty") + .await; + + // Block 0 committed, block 1 failed mid-processing + assert_eq!(ScanCosignFrom::get(&task_test.db), Some(1)); } #[tokio::test] async fn handles_rpc_error_on_latest_finalized() { - let (node, mut test) = setup_mock_test().await; + let (serai, task_test) = setup_mock_test().await; - node.make_block(0, vec![]).await; - node.make_block(1, vec![]).await; - node.set_error("blockchain/latest_finalized_block_number", "network error").await; + serai.make_block(0, vec![]).await; + serai.make_block(1, vec![]).await; + serai.set_error("blockchain/latest_finalized_block_number", "network error").await; - let mut task = test.into_task(); - TaskTest::assert_task_run_and_failed_with(&mut task, "RPC error fetching latest finalized") - .await; + let mut task = task_test.into_task(); + TaskTest::task_runs_and_fails_with(&mut task, "RPC error fetching latest finalized").await; - node.clear_error("blockchain/latest_finalized_block_number").await; + // No blocks processed — error happened before scanning + assert_eq!(ScanCosignFrom::get(&task_test.db), None); - let mut task = test.into_task(); + serai.clear_error("blockchain/latest_finalized_block_number").await; + let mut task = task_test.into_task(); TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - test.assert_block_events_is_expected(BlockEventData { - block_number: 0, - has_events: HasEvents::No, - }); - let builds_upon = node.builds_upon().await; - test.assert_task_iteration_per_block_with_no_events_ran(1, &builds_upon); + verify_db_invariants(&task_test.db, &task_test.serai, 2).await; } } + +/// Random event, state, and block generator. +struct EventFuzzer { + /// Monotonic counter hashed with blake2 for deterministic pseudo-random bytes. + counter: u64, + /// Seed bytes. + seed: [u8; 32], + /// Available validator addresses. + validators: Vec, + /// All networks. + networks: Vec, + /// Running stake ledger: `(network, validator) -> accumulated_stake`. + stakes: HashMap<(ExternalNetworkId, SeraiAddress), u64>, + /// Sets that have been decided but not yet keyed. + pending_keys: HashMap>, + /// Next session number per network. + next_session: HashMap, +} + +impl EventFuzzer { + fn new() -> Self { + let mut seed = [0u8; 32]; + OsRng.fill_bytes(&mut seed); + + let num_validators = usize::try_from((OsRng.next_u64() % 17) + 4).unwrap(); + + let validators: Vec = (0 .. num_validators) + .map(|i| { + let mut bytes = [0u8; 32]; + bytes[0 .. 8].copy_from_slice(&u64::try_from(i).unwrap().to_le_bytes()); + bytes[8 .. 16].copy_from_slice(&seed[0 .. 8]); + SeraiAddress(bytes) + }) + .collect(); + + let networks: Vec = NetworkId::all().collect(); + + Self { + counter: 0, + seed, + validators, + networks, + stakes: HashMap::new(), + pending_keys: HashMap::new(), + next_session: HashMap::new(), + } + } + + /// Generate a pseudo-random `u64` by hashing counter + seed with blake2. + fn next_u64(&mut self) -> u64 { + use blake2::{Blake2b256, Digest as _}; + let hash = + Blake2b256::new().chain_update(self.seed).chain_update(self.counter.to_le_bytes()).finalize(); + self.counter += 1; + u64::from_le_bytes(hash[0 .. 8].try_into().unwrap()) + } + + /// Pick a random element from a slice. + fn pick<'a, T>(&mut self, slice: &'a [T]) -> &'a T { + let idx = self.next_u64() % u64::try_from(slice.len()).unwrap(); + &slice[usize::try_from(idx).unwrap()] + } + + /// Generate a random allocation event. + fn random_allocation(&mut self) -> Event { + let validator = *self.pick(&self.validators.clone()); + let network = *self.pick(&self.networks.clone()); + let amount = (self.next_u64() % 10000) + 1; // 1..=10000 + if let Ok(ext) = ExternalNetworkId::try_from(network) { + *self.stakes.entry((ext, validator)).or_default() += amount; + } + allocation_event(validator, network, amount) + } + + /// Generate a random deallocation event. Returns `None` if no validator has stake. + fn random_deallocation(&mut self) -> Option { + // ~25% chance of generating a Serai deallocation (exercises the `continue` branch) + if self.next_u64() % 4 == 0 { + let validator = *self.pick(&self.validators.clone()); + let amount = (self.next_u64() % 100) + 1; + return Some(deallocation_event(validator, NetworkId::Serai, amount)); + } + + let candidates: Vec<((ExternalNetworkId, SeraiAddress), u64)> = + self.stakes.iter().filter(|(_, &s)| s > 0).map(|(&k, &v)| (k, v)).collect(); + if candidates.is_empty() { + return None; + } + let idx = self.next_u64() % u64::try_from(candidates.len()).unwrap(); + let ((network, validator), current_stake) = candidates[usize::try_from(idx).unwrap()]; + let amount = (self.next_u64() % current_stake) + 1; // 1..=current_stake + *self.stakes.entry((network, validator)).or_default() -= amount; + Some(deallocation_event(validator, NetworkId::External(network), amount)) + } + + /// Generate a random SetDecided event. + /// + /// SetDecided only applies to external networks (Serai sessions are managed by the runtime). + fn random_set_decided(&mut self) -> Option { + let external_networks: Vec = + self.networks.iter().copied().filter_map(|n| ExternalNetworkId::try_from(n).ok()).collect(); + let network = *self.pick(&external_networks); + let session_num = *self.next_session.entry(network).or_insert(0); + let set = ExternalValidatorSet { network, session: Session(session_num) }; + + // Don't double-decide a set that's already pending keys + if self.pending_keys.contains_key(&set) { + return None; + } + + // Pick 1..=min(3, validators.len()) random validators for this set + let max_count = self.validators.len().min(3); + let count = usize::try_from((self.next_u64() % u64::try_from(max_count).unwrap()) + 1).unwrap(); + + // Shuffle-pick by swapping from a clone + let mut pool = self.validators.clone(); + let mut chosen = Vec::with_capacity(count); + for _ in 0 .. count { + let idx = usize::try_from(self.next_u64() % u64::try_from(pool.len()).unwrap()).unwrap(); + chosen.push(pool.swap_remove(idx)); + } + + self.pending_keys.insert(set, chosen.clone()); + + let validators_with_shares: Vec<(SeraiAddress, KeyShares)> = + chosen.into_iter().map(|v| (v, KeyShares::ONE)).collect(); + + Some(set_decided_event( + ValidatorSet { network: NetworkId::External(network), session: Session(session_num) }, + validators_with_shares, + )) + } + + /// Generate a random SetKeys event for a pending (decided but not yet keyed) set. + fn random_set_keys(&mut self) -> Option { + if self.pending_keys.is_empty() { + return None; + } + + let keys: Vec = self.pending_keys.keys().copied().collect(); + let idx = usize::try_from(self.next_u64() % u64::try_from(keys.len()).unwrap()).unwrap(); + let set = keys[idx]; + // Remove from pending — the task will Validators::take it + self.pending_keys.remove(&set); + + // Advance session for this network so the next SetDecided gets session+1 + *self.next_session.entry(set.network).or_insert(0) += 1; + + let mut public = Public([0u8; 32]); + public.0[0 .. 8].copy_from_slice(&self.next_u64().to_le_bytes()); + let external_key = ExternalKey(vec![1u8].try_into().unwrap()); + let key_pair = KeyPair(public, external_key); + + Some(Event::ValidatorSets(validator_sets::Event::SetKeys { set, key_pair })) + } + + /// Generate a random BurnWithInstruction event. + fn random_burn(&mut self) -> Event { + let mut burn_address = SeraiAddress([0u8; 32]); + burn_address.0[0 .. 8].copy_from_slice(&self.next_u64().to_le_bytes()); + burn_with_instruction_event(burn_address) + } + + /// Generate random events for a single block. + fn generate_block_events(&mut self) -> Vec> { + let num_events = self.next_u64() % 8; // 0..=7 events per block + if num_events == 0 { + return vec![]; + } + + let mut alloc_count = 0u64; + let mut dealloc_count = 0u64; + let mut set_decided_count = 0u64; + let mut set_keys_count = 0u64; + let mut burn_count = 0u64; + + for _ in 0 .. num_events { + match self.next_u64() % 100 { + 0 ..= 35 => alloc_count += 1, + 36 ..= 55 => dealloc_count += 1, + 56 ..= 70 => set_decided_count += 1, + 71 ..= 85 => set_keys_count += 1, + 86 ..= 99 => burn_count += 1, + _ => unreachable!(), + } + } + + let mut events = Vec::new(); + + // Update the stakes + for _ in 0 .. alloc_count { + events.push(self.random_allocation()); + } + for _ in 0 .. dealloc_count { + if let Some(e) = self.random_deallocation() { + events.push(e); + } + } + + // Handle decided sets + for _ in 0 .. set_decided_count { + if let Some(e) = self.random_set_decided() { + events.push(e); + } + } + + // Handle declarations of the latest set + for _ in 0 .. set_keys_count { + if let Some(event) = self.random_set_keys() { + events.push(event); + } + } + + // Handle burn with instruction events (makes block non-notable if not already notable) + for _ in 0 .. burn_count { + events.push(self.random_burn()); + } + + // Shuffle the events to test order-independence + for i in (1 .. events.len()).rev() { + let j = usize::try_from(self.next_u64() % u64::try_from(i + 1).unwrap()).unwrap(); + events.swap(i, j); + } + + if events.is_empty() { + vec![] + } else { + vec![events] + } + } + + /// Generate multiple blocks of random events. + fn generate_blocks(&mut self, count: usize) -> Vec>> { + let mut blocks = Vec::with_capacity(count); + for _ in 0 .. count { + blocks.push(self.generate_block_events()); + } + blocks + } +} + +#[tokio::test] +async fn fuzzed_event_processing() { + let _ = env_logger::try_init(); + + let num_blocks = 1000; + + let mut fuzzer = EventFuzzer::new(); + let blocks = fuzzer.generate_blocks(num_blocks); + + serai_log::log::info!( + "Fuzz test: {} blocks, {} validators, seed={:?}", + num_blocks, + fuzzer.validators.len(), + hex::encode(fuzzer.seed) + ); + + let (serai, task_test) = setup_mock_test().await; + for (i, events) in blocks.into_iter().enumerate() { + serai.make_block(u64::try_from(i).unwrap(), events).await; + } + + let mut task = task_test.into_task(); + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + + verify_db_invariants(&task_test.db, &task_test.serai, num_blocks).await; +} diff --git a/coordinator/cosign/src/tests/mod.rs b/coordinator/cosign/src/tests/mod.rs index 6cbfdd327..e306f4e54 100644 --- a/coordinator/cosign/src/tests/mod.rs +++ b/coordinator/cosign/src/tests/mod.rs @@ -17,95 +17,12 @@ use std::{ }, }; +use serai_shim_rpc::{SeraiShimRpc, ShimState}; +use serai_client_serai::Serai; pub(crate) use serai_test_task::{IntoTask, TaskTest}; -pub(crate) static SERAI_NODE_LOCK: std::sync::LazyLock> = - std::sync::LazyLock::new(|| tokio::sync::Mutex::new(())); - use crate::RequestNotableCosigns; -/// Waits until a condition is met, with a timeout. -/// -/// Polls the condition at `interval` and panics if `timeout` is exceeded. -/// -/// # Examples -/// ```ignore -/// // Simple condition (no value printed on timeout) -/// wait_until!(some_condition()); -/// -/// // With comparison - prints actual value on timeout -/// wait_until!(LatestCosignedBlockNumber::get(&db) => Some(3)); -/// -/// // With custom timeout -/// wait_until!(value_expr => expected, Duration::from_secs(30)); -/// ``` -#[allow(unused_macro_rules)] -macro_rules! wait_until { - // Simple condition without value printing - ($condition:expr) => { - wait_until!(@simple $condition, Duration::from_secs(60), Duration::from_millis(10)) - }; - ($condition:expr, $timeout:expr) => { - wait_until!(@simple $condition, $timeout, Duration::from_millis(10)) - }; - ($condition:expr, $timeout:expr, $interval:expr) => { - wait_until!(@simple $condition, $timeout, $interval) - }; - // Comparison form: wait_until!(actual_expr => expected_value) - // Prints actual value on timeout - ($actual:expr => $expected:expr) => { - wait_until!(@compare $actual, $expected, Duration::from_secs(60), Duration::from_millis(10)) - }; - ($actual:expr => $expected:expr, $timeout:expr) => { - wait_until!(@compare $actual, $expected, $timeout, Duration::from_millis(10)) - }; - ($actual:expr => $expected:expr, $timeout:expr, $interval:expr) => { - wait_until!(@compare $actual, $expected, $timeout, $interval) - }; - // Internal: simple condition - (@simple $condition:expr, $timeout:expr, $interval:expr) => { - tokio::select! { - _ = async { - loop { - if $condition { - break; - } - tokio::time::sleep($interval).await; - } - } => {} - _ = tokio::time::sleep($timeout) => { - panic!("timeout waiting for condition: {}", stringify!($condition)); - } - } - }; - // Internal: comparison with value printing - (@compare $actual:expr, $expected:expr, $timeout:expr, $interval:expr) => {{ - let expected = $expected; - let mut last_actual = None; - tokio::select! { - _ = async { - loop { - let actual = $actual; - if actual == expected { - break; - } - last_actual = Some(actual); - tokio::time::sleep($interval).await; - } - } => {} - _ = tokio::time::sleep($timeout) => { - panic!( - "timeout waiting for {} to equal {:?}, last value was {:?}", - stringify!($actual), - expected, - last_actual - ); - } - } - }}; -} -pub(crate) use wait_until; - #[derive(Clone)] pub(crate) struct TestRequest { pub(crate) calls: Arc, @@ -141,3 +58,10 @@ impl RequestNotableCosigns for TestRequest { } } } + +/// Create a [`SeraiShimRpc`] and a [`Arc`] to use it. +async fn setup_shim_serai() -> (SeraiShimRpc, Arc) { + let shim_serai = SeraiShimRpc::start(ShimState::default()).await; + let serai = Arc::new(Serai::new(shim_serai.url()).unwrap()); + (shim_serai, serai) +} diff --git a/coordinator/cosign/types/Cargo.toml b/coordinator/cosign/types/Cargo.toml index 1285c9c48..4ef4f3557 100644 --- a/coordinator/cosign/types/Cargo.toml +++ b/coordinator/cosign/types/Cargo.toml @@ -18,7 +18,6 @@ rustdoc-args = ["--cfg", "docsrs"] workspace = true [features] -default = [] test-helpers = [] [dependencies] diff --git a/coordinator/cosign/types/src/tests/mod.rs b/coordinator/cosign/types/src/tests/mod.rs index 8a34de432..5ce65d3a3 100644 --- a/coordinator/cosign/types/src/tests/mod.rs +++ b/coordinator/cosign/types/src/tests/mod.rs @@ -3,18 +3,16 @@ use crate::{COSIGN_CONTEXT, Cosign, SignedCosign}; #[cfg(test)] use crate::{BlockHash, CosignIntent, ExternalNetworkId, Public}; -fn sr25519_fixture() -> schnorrkel::Keypair { - schnorrkel::MiniSecretKey::from_bytes(&[0xff; 32]) - .expect("fixed seed should be valid") - .expand_to_keypair(schnorrkel::ExpansionMode::Ed25519) -} - fn sr25519_fixture_from_seed(seed: [u8; 32]) -> schnorrkel::Keypair { schnorrkel::MiniSecretKey::from_bytes(&seed) .expect("seed should be valid") .expand_to_keypair(schnorrkel::ExpansionMode::Ed25519) } +fn sr25519_fixture() -> schnorrkel::Keypair { + sr25519_fixture_from_seed([0xff; 32]) +} + fn sign_cosign(cosign: Cosign, keypair: &schnorrkel::Keypair) -> SignedCosign { SignedCosign { cosign: cosign.clone(), @@ -43,7 +41,7 @@ pub fn sign_cosign_with_seed(cosign: Cosign, seed: [u8; 32]) -> SignedCosign { } #[test] -fn cosign_intent_to_cosign() { +fn cosign_intent_into_cosign() { let intent = CosignIntent { global_session: [1u8; 32], block_number: 5, @@ -60,7 +58,7 @@ fn cosign_intent_to_cosign() { } #[test] -fn cosign_signature_message() { +fn deterministic_signature_message() { let cosign = Cosign { global_session: [1u8; 32], block_number: 5, @@ -93,9 +91,6 @@ fn signed_cosign_verify_signature_valid() { #[test] fn signed_cosign_verify_signature_invalid() { let keypair1 = sr25519_fixture(); - let keypair2 = schnorrkel::MiniSecretKey::from_bytes(&[0x01; 32]) - .unwrap() - .expand_to_keypair(schnorrkel::ExpansionMode::Ed25519); let cosign = Cosign { global_session: [1u8; 32], @@ -105,7 +100,7 @@ fn signed_cosign_verify_signature_invalid() { }; let signed = sign_cosign(cosign, &keypair1); - let wrong_pubkey = Public(keypair2.public.to_bytes()); + let wrong_pubkey = public_key_from_seed([0x01; 32]); assert!(!signed.verify_signature(wrong_pubkey), "invalid signature should not verify"); } diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index fd70fceec..3a1bbc2d7 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -131,8 +131,8 @@ fn spawn_cosigning( } } - let time_till_cosign_rebroadcast = (last_cosign_rebroadcast - + serai_cosign::BROADCAST_FREQUENCY) + let time_till_cosign_rebroadcast = (last_cosign_rebroadcast + + serai_cosign::BROADCAST_FREQUENCY) .saturating_duration_since(Instant::now()); tokio::select! { () = tokio::time::sleep(time_till_cosign_rebroadcast) => { @@ -379,8 +379,8 @@ async fn main() { // Remove retired Tributaries from ActiveTributaries let mut active_tributaries = ActiveTributaries::get(&txn).unwrap_or(vec![]); active_tributaries.retain(|tributary| { - RetiredTributary::get(&txn, tributary.set.network).map(|session| session.0) - < Some(tributary.set.session.0) + RetiredTributary::get(&txn, tributary.set.network).map(|session| session.0) < + Some(tributary.set.session.0) }); ActiveTributaries::set(&mut txn, &active_tributaries); @@ -405,8 +405,8 @@ async fn main() { let mut key_bytes = serai_key.to_bytes(); // Schnorrkel SecretKey is the key followed by 32 bytes of entropy for nonces let mut expanded_key = Zeroizing::new([0; 64]); - expanded_key.as_mut_slice()[..32].copy_from_slice(&key_bytes); - OsRng.fill_bytes(&mut expanded_key.as_mut_slice()[32..]); + expanded_key.as_mut_slice()[.. 32].copy_from_slice(&key_bytes); + OsRng.fill_bytes(&mut expanded_key.as_mut_slice()[32 ..]); key_bytes.zeroize(); Zeroizing::new( schnorrkel::SecretKey::from_bytes(expanded_key.as_slice()).unwrap().to_keypair(), diff --git a/substrate/abi/src/modules/validator_sets.rs b/substrate/abi/src/modules/validator_sets.rs index fc5f8ef81..4cf2f02f0 100644 --- a/substrate/abi/src/modules/validator_sets.rs +++ b/substrate/abi/src/modules/validator_sets.rs @@ -12,6 +12,8 @@ use serai_primitives::{ validator_sets::*, }; +pub use serai_primitives::validator_sets::DeallocationTimeline; + /// The address used by the validator sets pallet. pub fn address() -> SeraiAddress { SeraiAddress::system(borsh::to_vec(b"ValidatorSets").unwrap()) diff --git a/substrate/client/serai/src/lib.rs b/substrate/client/serai/src/lib.rs index f507554cc..c99a1fb74 100644 --- a/substrate/client/serai/src/lib.rs +++ b/substrate/client/serai/src/lib.rs @@ -171,13 +171,13 @@ impl Serai { self .call( "blockchain/publish_transaction", - &format!(r#"{{ "transaction": "{}" }}"#, hex::encode(borsh::to_vec(transaction).unwrap())), + &format!(r#"{{ "transaction": {} }}"#, hex::encode(borsh::to_vec(transaction).unwrap())), ) .await } /// Fetch the events of a specific block. - pub async fn events(&self, block: &BlockHash) -> Result { + pub async fn events(&self, block: BlockHash) -> Result { Ok(Events { events: Arc::new( self diff --git a/substrate/primitives/src/lib.rs b/substrate/primitives/src/lib.rs index 72b36d2d0..12b56fe17 100644 --- a/substrate/primitives/src/lib.rs +++ b/substrate/primitives/src/lib.rs @@ -75,7 +75,7 @@ impl From for BlockNumber { level so this is fine for our use-case. If we do ever see a 64-byte block hash, we can simply hash it into a 32-byte hash or truncate it. */ -#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize, BorshSerialize, BorshDeserialize)] pub struct BlockHash(pub [u8; 32]); #[cfg(feature = "scale")] crate::borsh_as_scale!(BlockHash); diff --git a/tests/shim-rpc/Cargo.toml b/tests/shim-rpc/Cargo.toml new file mode 100644 index 000000000..871605015 --- /dev/null +++ b/tests/shim-rpc/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "serai-shim-rpc" +version = "0.1.0" +description = "A bespoke shim RPC node for testing Serai RPC clients without a real chain" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/tests/shim-rpc" +authors = ["Luke Parker ", "rafael_xmr "] +edition = "2021" +rust-version = "1.85" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +jsonrpsee = { version = "0.24", default-features = false, features = ["server"] } +tokio = { version = "1", default-features = false } + +serai-abi = { path = "../../substrate/abi", default-features = false, features = ["std"] } + +borsh = { version = "1", default-features = false, features = ["std", "derive"] } +hex = { version = "0.4", default-features = false, features = ["std"] } +blake2 = { version = "0.11.0-rc.0", default-features = false } +serde = { version = "1", default-features = false } +sp-core = { git = "https://github.com/serai-dex/patch-polkadot-sdk" } + +[dev-dependencies] +serai-client-serai = { path = "../../substrate/client/serai" } diff --git a/tests/shim-rpc/LICENSE b/tests/shim-rpc/LICENSE new file mode 100644 index 000000000..f995f1e78 --- /dev/null +++ b/tests/shim-rpc/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2026 Serai + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/tests/shim-rpc/README.md b/tests/shim-rpc/README.md new file mode 100644 index 000000000..a9dd06ff2 --- /dev/null +++ b/tests/shim-rpc/README.md @@ -0,0 +1,27 @@ +# serai-shim-rpc + +In-process **shim RPC node**, wire-compatible with the production `Serai` RPC serai (`serai-client-serai`). + +- Binds to an ephemeral `127.0.0.1` port and implements some RPC methods the real node exposes (`blockchain/*`, `validator-sets/*`, ...); +- Lets tests **pre-populate blocks with arbitrary events** (allocations, SetDecided, SetKeys, etc.) so the exact scenario is fully controlled; +- Supports **dynamic block addition** (`add_block_with_events`) and **error injection** (`set_error` / `clear_error`) during a test; +- Builds blocks using the same `IncrementalUnbalancedMerkleTree` + `BLOCK_BRANCH_TAG`/`BLOCK_LEAF_TAG` logic as the real chain, so `builds_upon` hashes are valid. + +## Example + +```rust +use serai_shim_rpc::{SeraiShimRpc, ShimState}; +use serai_client_serai::Serai; + +let mut state = ShimState::default(); +state.make_block(0, vec![vec![]]); +state.make_block(1, vec![vec![allocation_event(...), set_keys_event(...)]]); + +let shim_serai = SeraiShimRpc::start(state).await; +let serai = Serai::new(shim_serai.url()).unwrap(); + +let latest = serai.latest_finalized_block_number().await.unwrap(); +assert_eq!(latest, 1); + +shim_serai.stop(); +``` diff --git a/tests/shim-rpc/src/builder.rs b/tests/shim-rpc/src/builder.rs new file mode 100644 index 000000000..1ea285957 --- /dev/null +++ b/tests/shim-rpc/src/builder.rs @@ -0,0 +1,46 @@ +use serai_abi::Event; + +use crate::{SeraiShimRpc, state::ShimState}; + +/// Builder for constructing a [`SeraiShimRpc`] with pre-populated blocks. +#[must_use] +pub struct SeraiShimRpcBuilder { + blocks: Vec>>, +} + +impl SeraiShimRpcBuilder { + /// Create a new builder. + pub fn new() -> Self { + Self { blocks: Vec::new() } + } + + /// Add a single block with the given events (one `Vec` per transaction). + pub fn with_block(mut self, events: Vec>) -> Self { + self.blocks.push(events); + self + } + + /// Add multiple blocks, each with their own events. + pub fn with_blocks(mut self, blocks: Vec>>) -> Self { + self.blocks.extend(blocks); + self + } + + /// Build and start the shim RPC node. + pub async fn build(self) -> SeraiShimRpc { + let mut sim_state = ShimState::default(); + for (i, events) in self.blocks.into_iter().enumerate() { + #[expect(clippy::as_conversions)] + let number = (i as u64) + 1; + sim_state.make_block(number, events); + } + + SeraiShimRpc::start(sim_state).await + } +} + +impl Default for SeraiShimRpcBuilder { + fn default() -> Self { + Self::new() + } +} diff --git a/tests/shim-rpc/src/lib.rs b/tests/shim-rpc/src/lib.rs new file mode 100644 index 000000000..82f803f05 --- /dev/null +++ b/tests/shim-rpc/src/lib.rs @@ -0,0 +1,146 @@ +#![allow(clippy::std_instead_of_alloc, clippy::std_instead_of_core)] + +pub mod state; +pub mod rpc; +pub mod builder; + +pub use state::*; +pub use builder::SeraiShimRpcBuilder; + +use std::net::SocketAddr; + +use jsonrpsee::server::ServerHandle; +use serai_abi::{ + primitives::{BlockHash, merkle::IncrementalUnbalancedMerkleTree}, + Event, +}; + +/// A bespoke shim RPC node that speaks JSON-RPC 2.0 over HTTP, +/// wire-compatible with the production `Serai` client. +pub struct SeraiShimRpc { + url: String, + state: SharedState, + handle: ServerHandle, +} + +impl SeraiShimRpc { + /// Create a builder for configuring and starting a shim RPC node. + pub fn builder() -> SeraiShimRpcBuilder { + SeraiShimRpcBuilder::new() + } + + /// Start a shim RPC node with the given initial state, binding to an ephemeral port. + pub async fn start(initial_state: ShimState) -> Self { + let state = std::sync::Arc::new(tokio::sync::RwLock::new(initial_state)); + let rpc_module = rpc::build_rpc_module(state.clone()).expect("failed to build RPC module"); + + let server = jsonrpsee::server::ServerBuilder::default() + .build(SocketAddr::from(([127, 0, 0, 1], 0))) + .await + .expect("failed to bind shim RPC node server"); + + let addr = server.local_addr().expect("server should have a local address"); + let handle = server.start(rpc_module); + + Self { url: format!("http://{addr}"), state, handle } + } + + /// The HTTP URL this shim is listening on. + pub fn url(&self) -> String { + self.url.clone() + } + + /// Create a block at the given number with events. + /// Returns the hash of the newly created block. + pub async fn make_block(&self, number: u64, events: Vec>) -> BlockHash { + self.state.write().await.make_block(number, events) + } + + /// Add a block with events dynamically (during a test). + /// The block number is automatically determined as the next sequential block. + /// Returns the hash of the newly created block. + pub async fn add_block_with_events(&self, events: Vec>) -> BlockHash { + let mut state = self.state.write().await; + let number = state.latest_finalized_block_number() + 1; + state.make_block(number, events) + } + + /// Get the shim's current `builds_upon` merkle tree state. + pub async fn builds_upon(&self) -> IncrementalUnbalancedMerkleTree { + self.state.read().await.builds_upon.clone() + } + + /// Inject an error for a specific RPC method. Any call to this method will fail. + pub async fn set_error(&self, method: &str, message: &str) { + let mut state = self.state.write().await; + state.errors.method_errors.insert(method.to_owned(), message.to_owned()); + } + + /// Clear an injected error for a specific RPC method. + pub async fn clear_error(&self, method: &str) { + let mut state = self.state.write().await; + state.errors.method_errors.remove(method); + } + + /// Clear all injected errors. + pub async fn clear_all_errors(&self) { + let mut state = self.state.write().await; + state.errors = ErrorInjection::default(); + } + + /// Inject an error for a specific RPC method + block number combination. + pub async fn set_block_number_error(&self, method: &str, number: u64, message: &str) { + let mut state = self.state.write().await; + state.errors.block_number_errors.insert((method.to_owned(), number), message.to_owned()); + } + + /// Clear an injected error for a specific RPC method + block number. + pub async fn clear_block_number_error(&self, method: &str, number: u64) { + let mut state = self.state.write().await; + state.errors.block_number_errors.remove(&(method.to_owned(), number)); + } + + /// Inject an error for a specific RPC method + block hash combination. + pub async fn set_block_hash_error(&self, method: &str, hash: BlockHash, message: &str) { + let mut state = self.state.write().await; + state.errors.block_hash_errors.insert((method.to_owned(), hash), message.to_owned()); + } + + /// Clear an injected error for a specific RPC method + block hash. + pub async fn clear_block_hash_error(&self, method: &str, hash: BlockHash) { + let mut state = self.state.write().await; + state.errors.block_hash_errors.remove(&(method.to_owned(), hash)); + } + + /// Hide a block so that `blockchain/block` returns `None` for it. + pub async fn set_block_missing(&self, number: u64) { + self.state.write().await.missing_blocks.insert(number); + } + + /// Un-hide a previously hidden block. + pub async fn clear_block_missing(&self, number: u64) { + self.state.write().await.missing_blocks.remove(&number); + } + + /// Remove a block (and its associated events) from the shim state. + pub async fn remove_block(&self, number: u64) { + let mut state = self.state.write().await; + state.remove_block(number); + } + + /// Create a non-linear block (wrong `builds_upon`) without advancing the chain state. + pub async fn make_non_linear_block(&self, number: u64, events: Vec>) -> BlockHash { + let mut state = self.state.write().await; + state.make_non_linear_block(number, events) + } + + /// Access the underlying shared state directly. + pub fn state(&self) -> &SharedState { + &self.state + } + + /// Stop the shim RPC node server. + pub fn stop(&self) { + self.handle.stop().expect("failed to stop shim RPC node"); + } +} diff --git a/tests/shim-rpc/src/rpc.rs b/tests/shim-rpc/src/rpc.rs new file mode 100644 index 000000000..54913f80a --- /dev/null +++ b/tests/shim-rpc/src/rpc.rs @@ -0,0 +1,265 @@ +use jsonrpsee::{types::error::ErrorObjectOwned, RpcModule}; + +use serai_abi::{ + Event, + primitives::{ + BlockHash, + network_id::{ExternalNetworkId, NetworkId}, + validator_sets::{ExternalValidatorSet, Session}, + }, +}; + +use crate::state::SharedState; + +/// Typed RPC errors mirroring `substrate/node/src/rpc/utils.rs`. +enum Error { + Internal(String), + InvalidRequest(String), + InvalidStateReference, + #[allow(dead_code)] + InvalidTransaction(String), +} + +impl From for ErrorObjectOwned { + fn from(error: Error) -> Self { + match error { + Error::Internal(msg) => ErrorObjectOwned::owned(-1, msg, Option::<()>::None), + Error::InvalidRequest(msg) => ErrorObjectOwned::owned(-2, msg, Option::<()>::None), + Error::InvalidStateReference => ErrorObjectOwned::owned( + -3, + "the block used as the reference was not locally held", + Option::<()>::None, + ), + Error::InvalidTransaction(msg) => ErrorObjectOwned::owned( + -4, + format!("transaction was not accepted to the mempool: {msg}"), + Option::<()>::None, + ), + } + } +} + +/// Resolve a block hash from JSON-RPC params. +/// +/// Mirrors `substrate/node/src/rpc/utils.rs`: +/// - `{ "block": "hex_hash" }` → lookup by hash +/// - `{ "block": 123 }` → lookup by number +fn resolve_block_hash( + params: &jsonrpsee::types::params::Params, + state: &crate::state::ShimState, +) -> Result, Error> { + #[derive(sp_core::serde::Deserialize)] + struct BlockByHash { + block: String, + } + #[derive(sp_core::serde::Deserialize)] + struct BlockByNumber { + block: u64, + } + + if let Ok(by_hash) = params.parse::() { + let Some(hash_bytes) = + hex::decode(&by_hash.block).ok().and_then(|bytes| <[u8; 32]>::try_from(bytes).ok()) + else { + return Err(Error::InvalidRequest("requested block hash wasn't a valid hash".to_owned())); + }; + let block_hash = BlockHash(hash_bytes); + if state.block_number_by_hash.contains_key(&block_hash) { + Ok(Some(block_hash)) + } else { + Ok(None) + } + } else if let Ok(by_number) = params.parse::() { + Ok(state.block_hash_by_number(by_number.block)) + } else { + Err(Error::InvalidRequest("requested block wasn't a valid hash nor number".to_owned())) + } +} + +fn network_from_str(network: &str) -> Result { + Ok(match network.to_lowercase().as_str() { + "serai" => NetworkId::Serai, + "bitcoin" => NetworkId::External(ExternalNetworkId::Bitcoin), + "ethereum" => NetworkId::External(ExternalNetworkId::Ethereum), + "monero" => NetworkId::External(ExternalNetworkId::Monero), + _ => return Err(Error::InvalidRequest("unrecognized network requested".to_owned())), + }) +} + +fn parse_network(params: &jsonrpsee::types::params::Params) -> Result { + #[derive(sp_core::serde::Deserialize)] + struct Network { + network: String, + } + let network: Network = + params.parse().map_err(|_| Error::InvalidRequest(r#"missing "network" field"#.to_owned()))?; + network_from_str(&network.network) +} + +fn parse_set(params: &jsonrpsee::types::params::Params) -> Result { + #[derive(sp_core::serde::Deserialize)] + struct Set { + network: String, + session: u32, + } + let set: Set = params + .parse() + .map_err(|_| Error::InvalidRequest(r#"missing "network"/"session" fields"#.to_owned()))?; + let network = network_from_str(&set.network)?; + ExternalValidatorSet::try_from(serai_abi::primitives::validator_sets::ValidatorSet { + network, + session: Session(set.session), + }) + .map_err(|()| Error::InvalidRequest("requested keys for a non-external validator set".to_owned())) +} + +/// Build the RPC module with all method handlers matching the real Serai node. +pub fn build_rpc_module(state: SharedState) -> Result, ErrorObjectOwned> { + let mut module = RpcModule::new(state); + + module + .register_async_method( + "blockchain/latest_finalized_block_number", + async |_params, state, _ext| { + let state = state.read().await; + if let Some(err) = state.errors.check_method("blockchain/latest_finalized_block_number") { + return Err(Error::Internal(err.to_owned())); + } + Ok(state.latest_finalized_block_number()) + }, + ) + .map_err(|e| Error::Internal(e.to_string()))?; + + module + .register_async_method("blockchain/is_finalized", async |params, state, _ext| { + let state = state.read().await; + if let Some(err) = state.errors.check_method("blockchain/is_finalized") { + return Err(Error::Internal(err.to_owned())); + } + let Some(block_hash) = resolve_block_hash(¶ms, &state)? else { + return Ok(false); + }; + if let Some(err) = state.errors.check_block_hash("blockchain/is_finalized", &block_hash) { + return Err(Error::Internal(err.to_owned())); + } + Ok(state.block_number_by_hash.contains_key(&block_hash)) + }) + .map_err(|e| Error::Internal(e.to_string()))?; + + module + .register_async_method("blockchain/block", async |params, state, _ext| { + let state = state.read().await; + if let Some(err) = state.errors.check_method("blockchain/block") { + return Err(Error::Internal(err.to_owned())); + } + let Some(block_hash) = resolve_block_hash(¶ms, &state)? else { + return Ok(None); + }; + if let Some(err) = state.errors.check_block_hash("blockchain/block", &block_hash) { + return Err(Error::Internal(err.to_owned())); + } + let Some(&number) = state.block_number_by_hash.get(&block_hash) else { + return Ok(None); + }; + if let Some(err) = state.errors.check_block_number("blockchain/block", number) { + return Err(Error::Internal(err.to_owned())); + } + if state.missing_blocks.contains(&number) { + return Ok(None); + } + let Some(block) = state.blocks_by_number.get(&number) else { + return Ok(None); + }; + Ok(Some(hex::encode(borsh::to_vec(block).unwrap()))) + }) + .map_err(|e| Error::Internal(e.to_string()))?; + + module + .register_async_method("blockchain/events", async |params, state, _ext| { + let state = state.read().await; + if let Some(err) = state.errors.check_method("blockchain/events") { + return Err(Error::Internal(err.to_owned())); + } + let Some(block_hash) = resolve_block_hash(¶ms, &state)? else { + return Err(Error::InvalidStateReference); + }; + if let Some(err) = state.errors.check_block_hash("blockchain/events", &block_hash) { + return Err(Error::Internal(err.to_owned())); + } + let events = state.events_by_hash.get(&block_hash).cloned().unwrap_or_else(|| vec![vec![]]); + Ok( + events + .into_iter() + .map(|events_per_tx: Vec| { + events_per_tx + .into_iter() + .map(|event| hex::encode(borsh::to_vec(&event).unwrap())) + .collect::>() + }) + .collect::>>(), + ) + }) + .map_err(|e| Error::Internal(e.to_string()))?; + + module + .register_async_method("validator-sets/current_session", async |params, state, _ext| { + let state = state.read().await; + if let Some(err) = state.errors.check_method("validator-sets/current_session") { + return Err(Error::Internal(err.to_owned())); + } + let Some(block_hash) = resolve_block_hash(¶ms, &state)? else { + return Err(Error::InvalidStateReference); + }; + let network = parse_network(¶ms)?; + let vs = state.validator_sets_for_block(&block_hash); + Ok(vs.sessions.get(&network).map(|s| s.0)) + }) + .map_err(|e| Error::Internal(e.to_string()))?; + + module + .register_async_method("validator-sets/current_stake", async |params, state, _ext| { + let state = state.read().await; + if let Some(err) = state.errors.check_method("validator-sets/current_stake") { + return Err(Error::Internal(err.to_owned())); + } + let Some(block_hash) = resolve_block_hash(¶ms, &state)? else { + return Err(Error::InvalidStateReference); + }; + let network = parse_network(¶ms)?; + let vs = state.validator_sets_for_block(&block_hash); + Ok(vs.stakes.get(&network).map(|a| a.0)) + }) + .map_err(|e| Error::Internal(e.to_string()))?; + + module + .register_async_method("validator-sets/keys", async |params, state, _ext| { + let state = state.read().await; + if let Some(err) = state.errors.check_method("validator-sets/keys") { + return Err(Error::Internal(err.to_owned())); + } + let Some(block_hash) = resolve_block_hash(¶ms, &state)? else { + return Err(Error::InvalidStateReference); + }; + let set = parse_set(¶ms)?; + let vs = state.validator_sets_for_block(&block_hash); + Ok(vs.keys.get(&set).map(|kp| hex::encode(borsh::to_vec(kp).unwrap()))) + }) + .map_err(|e| Error::Internal(e.to_string()))?; + + module + .register_async_method("validator-sets/current_validators", async |params, state, _ext| { + let state = state.read().await; + if let Some(err) = state.errors.check_method("validator-sets/current_validators") { + return Err(Error::Internal(err.to_owned())); + } + let Some(block_hash) = resolve_block_hash(¶ms, &state)? else { + return Err(Error::InvalidStateReference); + }; + let network = parse_network(¶ms)?; + let vs = state.validator_sets_for_block(&block_hash); + Ok(vs.validators.get(&network).map(|v| v.iter().map(ToString::to_string).collect::>())) + }) + .map_err(|e| Error::Internal(e.to_string()))?; + + Ok(module) +} diff --git a/tests/shim-rpc/src/state.rs b/tests/shim-rpc/src/state.rs new file mode 100644 index 000000000..8c760794a --- /dev/null +++ b/tests/shim-rpc/src/state.rs @@ -0,0 +1,187 @@ +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, + time::{SystemTime, UNIX_EPOCH}, +}; + +use blake2::{Blake2b256, Digest as _}; +use tokio::sync::RwLock; + +use serai_abi::{ + Block, Event, Header, HeaderV1, BLOCK_BRANCH_TAG, BLOCK_LEAF_TAG, + primitives::{ + BlockHash, + balance::Amount, + crypto::KeyPair, + merkle::{IncrementalUnbalancedMerkleTree, UnbalancedMerkleTree}, + network_id::{ExternalNetworkId, NetworkId}, + validator_sets::{ExternalValidatorSet, Session}, + address::SeraiAddress, + }, +}; + +/// Per-block validator-sets state. +#[derive(Clone, Debug, Default)] +pub struct ValidatorSetsState { + pub sessions: HashMap, + pub stakes: HashMap, + pub keys: HashMap, + pub validators: HashMap>, + pub pending_slash_reports: HashMap, +} + +/// Injectable failures at three levels. +#[derive(Clone, Debug, Default)] +pub struct ErrorInjection { + /// Any call to the method fails with this message. + pub method_errors: HashMap, + /// Fails for a specific block number. + pub block_number_errors: HashMap<(String, u64), String>, + /// Fails for a specific block hash. + pub block_hash_errors: HashMap<(String, BlockHash), String>, +} + +impl ErrorInjection { + /// Check if an error should be injected for this method call. + pub fn check_method(&self, method: &str) -> Option<&String> { + self.method_errors.get(method) + } + + /// Check if an error should be injected for this method + block number. + pub fn check_block_number(&self, method: &str, number: u64) -> Option<&String> { + self.block_number_errors.get(&(method.to_owned(), number)) + } + + /// Check if an error should be injected for this method + block hash. + pub fn check_block_hash(&self, method: &str, hash: &BlockHash) -> Option<&String> { + self.block_hash_errors.get(&(method.to_owned(), *hash)) + } +} + +/// The shared mutable state backing the shim RPC node. +pub struct ShimState { + pub blocks_by_number: HashMap, + pub block_number_by_hash: HashMap, + pub events_by_hash: HashMap>>, + pub builds_upon: IncrementalUnbalancedMerkleTree, + pub published_transactions: Vec>, + pub default_validator_sets: ValidatorSetsState, + pub validator_sets_by_block: HashMap, + pub errors: ErrorInjection, + /// Block numbers that `blockchain/block` should return `None` for (simulates "not found"). + pub missing_blocks: HashSet, +} + +impl Default for ShimState { + fn default() -> Self { + Self { + blocks_by_number: HashMap::new(), + block_number_by_hash: HashMap::new(), + events_by_hash: HashMap::new(), + builds_upon: IncrementalUnbalancedMerkleTree::new(), + published_transactions: Vec::new(), + default_validator_sets: ValidatorSetsState::default(), + validator_sets_by_block: HashMap::new(), + errors: ErrorInjection::default(), + missing_blocks: HashSet::new(), + } + } +} + +impl ShimState { + /// Construct a block and register it. Mirrors `FakeSerai::make_block` from intend.rs. + pub fn make_block(&mut self, number: u64, events: Vec>) -> BlockHash { + let block = Block { + header: Header::V1(HeaderV1 { + number, + builds_upon: self.builds_upon.clone().calculate(BLOCK_BRANCH_TAG), + proposer: SeraiAddress([0; 32]), + #[expect(clippy::cast_possible_truncation, clippy::as_conversions)] + unix_time_in_millis: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis() + as u64, + transactions_commitment: UnbalancedMerkleTree::EMPTY, + events_commitment: UnbalancedMerkleTree::EMPTY, + consensus_commitment: [0; 32], + }), + transactions: vec![], + }; + + let block_hash = block.header.hash(); + + self.builds_upon.append( + BLOCK_BRANCH_TAG, + Blake2b256::new_with_prefix([BLOCK_LEAF_TAG]).chain_update(block_hash.0).finalize().into(), + ); + + self.block_number_by_hash.insert(block_hash, number); + self.blocks_by_number.insert(number, block); + self.events_by_hash.insert(block_hash, events); + + block_hash + } + + /// The latest finalized block number, or 0 if no blocks exist. + pub fn latest_finalized_block_number(&self) -> u64 { + self.blocks_by_number.keys().copied().max().unwrap_or(0) + } + + /// Create a block whose `builds_upon` header value comes from an empty tree, + /// making it invalid with respect to the actual chain. + /// + /// Unlike [`Self::make_block`], this does **not** advance the internal + /// `builds_upon` state, so subsequent calls to `make_block` remain valid. + pub fn make_non_linear_block( + &mut self, + number: u64, + events: Vec>, + ) -> BlockHash { + let block = Block { + header: Header::V1(HeaderV1 { + number, + // Use an empty tree — this will NOT match what the task expects + builds_upon: IncrementalUnbalancedMerkleTree::new().calculate(BLOCK_BRANCH_TAG), + proposer: SeraiAddress([0; 32]), + #[expect(clippy::cast_possible_truncation, clippy::as_conversions)] + unix_time_in_millis: SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_millis() as u64, + transactions_commitment: UnbalancedMerkleTree::EMPTY, + events_commitment: UnbalancedMerkleTree::EMPTY, + consensus_commitment: [0; 32], + }), + transactions: vec![], + }; + + let block_hash = block.header.hash(); + + // Register the block but do NOT update builds_upon + self.block_number_by_hash.insert(block_hash, number); + self.blocks_by_number.insert(number, block); + self.events_by_hash.insert(block_hash, events); + + block_hash + } + + /// Remove a block from all maps. + pub fn remove_block(&mut self, number: u64) { + if let Some(block) = self.blocks_by_number.remove(&number) { + let hash = block.header.hash(); + self.block_number_by_hash.remove(&hash); + self.events_by_hash.remove(&hash); + } + } + + /// Look up a block hash by block number. + pub fn block_hash_by_number(&self, number: u64) -> Option { + self.blocks_by_number.get(&number).map(|block| block.header.hash()) + } + + /// Get the validator-sets state for a specific block, falling back to the default. + pub fn validator_sets_for_block(&self, hash: &BlockHash) -> &ValidatorSetsState { + self.validator_sets_by_block.get(hash).unwrap_or(&self.default_validator_sets) + } +} + +/// Thread-safe shared state handle. +pub type SharedState = Arc>; diff --git a/tests/shim-rpc/tests/integration.rs b/tests/shim-rpc/tests/integration.rs new file mode 100644 index 000000000..ce34888b1 --- /dev/null +++ b/tests/shim-rpc/tests/integration.rs @@ -0,0 +1,275 @@ +use serai_shim_rpc::{SeraiShimRpc, SeraiShimRpcBuilder}; + +use serai_client_serai::{ + Serai, + abi::{ + Event, + primitives::{ + address::SeraiAddress, + balance::Amount, + network_id::{ExternalNetworkId, NetworkId}, + validator_sets::{KeyShares, Session, ValidatorSet}, + }, + validator_sets as vs_mod, + }, +}; + +fn allocation_event(validator: SeraiAddress, network: NetworkId, amount: u64) -> Event { + Event::ValidatorSets(vs_mod::Event::Allocation { validator, network, amount: Amount(amount) }) +} + +fn set_decided_event(network: NetworkId, session: u32, validator: SeraiAddress) -> Event { + Event::ValidatorSets(vs_mod::Event::SetDecided { + set: ValidatorSet { network, session: Session(session) }, + validators: vec![(validator, KeyShares::ONE)], + }) +} + +#[tokio::test] +async fn test_basic_block_and_number() { + let sim = SeraiShimRpcBuilder::new() + .with_block(vec![vec![]]) + .with_block(vec![vec![]]) + .with_block(vec![vec![]]) + .build() + .await; + + let client = Serai::new(sim.url()).unwrap(); + + // Latest finalized block number should be 3 + let latest = client.latest_finalized_block_number().await.unwrap(); + assert_eq!(latest, 3); + + // Block by number should return a valid block + let block = client.block_by_number(1).await.unwrap(); + assert!(block.is_some()); + let block = block.unwrap(); + assert_eq!(block.header.number(), 1); + + // Block 2 has number 2 + let block2 = client.block_by_number(2).await.unwrap().unwrap(); + assert_eq!(block2.header.number(), 2); + + // Block 3 has number 3 + let block3 = client.block_by_number(3).await.unwrap().unwrap(); + assert_eq!(block3.header.number(), 3); + + // Non-existent block returns None + let none = client.block_by_number(999).await.unwrap(); + assert!(none.is_none()); + + sim.stop(); +} + +#[tokio::test] +async fn test_block_by_hash() { + let sim = SeraiShimRpcBuilder::new().with_block(vec![vec![]]).build().await; + + let client = Serai::new(sim.url()).unwrap(); + + // Get block by number, then look it up by hash + let block = client.block_by_number(1).await.unwrap().unwrap(); + let hash = block.header.hash(); + + let block_by_hash = client.block(hash).await.unwrap(); + assert!(block_by_hash.is_some()); + assert_eq!(block_by_hash.unwrap().header.number(), 1); + + // is_finalized should return true + let finalized = client.finalized(hash).await.unwrap(); + assert!(finalized); + + sim.stop(); +} + +#[tokio::test] +async fn test_events_round_trip() { + let validator = SeraiAddress([1u8; 32]); + let events = vec![vec![ + allocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 1_000_000), + set_decided_event(NetworkId::External(ExternalNetworkId::Bitcoin), 0, validator), + ]]; + + let sim = SeraiShimRpcBuilder::new().with_block(events).build().await; + + let client = Serai::new(sim.url()).unwrap(); + + let block = client.block_by_number(1).await.unwrap().unwrap(); + let hash = block.header.hash(); + + let events = client.events(hash).await.unwrap(); + + // Extract validator_sets events + let vs = events.validator_sets(); + let vs_events: Vec<_> = vs.events().collect(); + assert_eq!(vs_events.len(), 2); + + // Verify first event is an Allocation + assert!(matches!(vs_events[0], vs_mod::Event::Allocation { .. })); + // Verify second event is a SetDecided + assert!(matches!(vs_events[1], vs_mod::Event::SetDecided { .. })); + + sim.stop(); +} + +#[tokio::test] +async fn test_dynamic_block_addition() { + let sim = SeraiShimRpc::builder().build().await; + + let client = Serai::new(sim.url()).unwrap(); + + // Initially no blocks + let latest = client.latest_finalized_block_number().await.unwrap(); + assert_eq!(latest, 0); + + // Add a block dynamically + let hash = sim.add_block_with_events(vec![vec![]]).await; + + let latest = client.latest_finalized_block_number().await.unwrap(); + assert_eq!(latest, 1); + + // Look up the block by its hash + let block = client.block(hash).await.unwrap(); + assert!(block.is_some()); + assert_eq!(block.unwrap().header.number(), 1); + + // Add another + sim.add_block_with_events(vec![vec![]]).await; + let latest = client.latest_finalized_block_number().await.unwrap(); + assert_eq!(latest, 2); + + sim.stop(); +} + +#[tokio::test] +async fn test_error_injection() { + let sim = SeraiShimRpcBuilder::new().with_block(vec![vec![]]).build().await; + + let client = Serai::new(sim.url()).unwrap(); + + // Works normally + let latest = client.latest_finalized_block_number().await.unwrap(); + assert_eq!(latest, 1); + + // Inject error + sim.set_error("blockchain/latest_finalized_block_number", "simulated failure").await; + + // Now it should fail + let result = client.latest_finalized_block_number().await; + assert!(result.is_err()); + let err_msg = format!("{}", result.unwrap_err()); + assert!(err_msg.contains("simulated failure"), "error was: {err_msg}"); + + // Clear error + sim.clear_error("blockchain/latest_finalized_block_number").await; + + // Should work again + let latest = client.latest_finalized_block_number().await.unwrap(); + assert_eq!(latest, 1); + + sim.stop(); +} + +#[tokio::test] +async fn test_clear_all_errors() { + let sim = SeraiShimRpcBuilder::new().with_block(vec![vec![]]).build().await; + + let client = Serai::new(sim.url()).unwrap(); + + // Inject multiple errors + sim.set_error("blockchain/latest_finalized_block_number", "err1").await; + sim.set_error("blockchain/block", "err2").await; + + // Both should fail + client.latest_finalized_block_number().await.unwrap_err(); + client.block_by_number(1).await.unwrap_err(); + + // Clear all + sim.clear_all_errors().await; + + // Both should work + assert_eq!(client.latest_finalized_block_number().await.unwrap(), 1); + assert!(client.block_by_number(1).await.unwrap().is_some()); + + sim.stop(); +} + +#[tokio::test] +async fn test_builds_upon_chain() { + // Verify that blocks form a proper chain via builds_upon + let sim = SeraiShimRpcBuilder::new() + .with_block(vec![vec![]]) + .with_block(vec![vec![]]) + .with_block(vec![vec![]]) + .build() + .await; + + let client = Serai::new(sim.url()).unwrap(); + + let block1 = client.block_by_number(1).await.unwrap().unwrap(); + let block2 = client.block_by_number(2).await.unwrap().unwrap(); + let block3 = client.block_by_number(3).await.unwrap().unwrap(); + + // Each block should have a distinct builds_upon + assert_ne!(block1.header.builds_upon(), block2.header.builds_upon()); + assert_ne!(block2.header.builds_upon(), block3.header.builds_upon()); + + // Each block should have a distinct hash + assert_ne!(block1.header.hash(), block2.header.hash()); + assert_ne!(block2.header.hash(), block3.header.hash()); + + sim.stop(); +} + +#[tokio::test] +async fn test_publish_transaction() { + let sim = SeraiShimRpc::builder().with_block(vec![vec![]]).build().await; + + // The simulator stores raw transaction bytes without execution. + { + let state = sim.state().read().await; + assert!(state.published_transactions.is_empty()); + } + + // The publish_transaction method on the client requires a real Transaction, + // so we verify the endpoint works by pushing directly to state. + { + let mut state = sim.state().write().await; + state.published_transactions.push(vec![0xDE, 0xAD]); + } + { + let state = sim.state().read().await; + assert_eq!(state.published_transactions.len(), 1); + assert_eq!(state.published_transactions[0], vec![0xDE, 0xAD]); + } + + sim.stop(); +} + +#[tokio::test] +async fn test_validator_sets_state() { + let sim = SeraiShimRpc::builder().with_block(vec![vec![]]).build().await; + + let client = Serai::new(sim.url()).unwrap(); + + // Set up validator-sets state on the default + { + let mut state = sim.state().write().await; + let network = NetworkId::External(ExternalNetworkId::Bitcoin); + state.default_validator_sets.sessions.insert(network, Session(5)); + state.default_validator_sets.stakes.insert(network, Amount(1_000_000)); + } + + // Query via the real client's State API + let serai_state = client.state().await.unwrap(); + + let session = + serai_state.current_session(NetworkId::External(ExternalNetworkId::Bitcoin)).await.unwrap(); + assert_eq!(session, Some(Session(5))); + + let stake = + serai_state.current_stake(NetworkId::External(ExternalNetworkId::Bitcoin)).await.unwrap(); + assert_eq!(stake, Some(Amount(1_000_000))); + + sim.stop(); +} diff --git a/tests/task/Cargo.toml b/tests/task/Cargo.toml new file mode 100644 index 000000000..0ce905cf5 --- /dev/null +++ b/tests/task/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "serai-test-task" +version = "0.1.0" +description = "Common test utilities for serai-task ContinuallyRan tasks" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/tests/task" +authors = ["Luke Parker ", "rafael_xmr "] +edition = "2021" +rust-version = "1.85" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +serai-task = { path = "../../common/task" } +serai-log = { path = "../../common/log", version = "0.1.0" } diff --git a/tests/task/LICENSE b/tests/task/LICENSE new file mode 100644 index 000000000..f995f1e78 --- /dev/null +++ b/tests/task/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2026 Serai + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/tests/task/src/lib.rs b/tests/task/src/lib.rs new file mode 100644 index 000000000..38b19f896 --- /dev/null +++ b/tests/task/src/lib.rs @@ -0,0 +1,35 @@ +//! Common test utilities for `serai-task` [`ContinuallyRan`] tasks. + +#![deny(missing_docs)] + +use serai_task::ContinuallyRan; + +/// Test helpers for asserting task iteration behavior. +pub struct TaskTest; + +impl TaskTest { + /// Assert that a task iteration succeeds and returns the expected progress value. + pub async fn task_runs_once_and_matches_progress( + task: &mut T, + made_progress: bool, + ) { + serai_log::log::debug!("running task once: {}", core::any::type_name::()); + assert_eq!(task.run_iteration().await.unwrap(), made_progress); + } + + /// Assert that a task iteration fails with an error containing the given string. + pub async fn task_runs_and_fails_with(task: &mut T, error: &str) { + serai_log::log::debug!("running task (expecting failure): {}", core::any::type_name::()); + let err = task.run_iteration().await.unwrap_err(); + let err_str = format!("{err:?}"); + assert!(err_str.contains(error), "{err_str}"); + } +} + +/// Trait for test structs that can produce a [`ContinuallyRan`] task. +pub trait IntoTask { + /// The task type produced by this test struct. + type Task: ContinuallyRan + 'static; + /// Create the task from the current test state. + fn into_task(&self) -> Self::Task; +} From b5774510ff87d0bbe3468426a98ea1f70eceeee3 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Thu, 26 Feb 2026 17:04:25 -0300 Subject: [PATCH 26/71] feat(tributary): begin tests based off cosign --- coordinator/tributary/Cargo.toml | 2 +- coordinator/tributary/src/db.rs | 2 +- .../tributary/src/tests/transaction.rs | 42 +++++++++ coordinator/tributary/src/transaction.rs | 85 +++++++++---------- 4 files changed, 86 insertions(+), 45 deletions(-) create mode 100644 coordinator/tributary/src/tests/transaction.rs diff --git a/coordinator/tributary/Cargo.toml b/coordinator/tributary/Cargo.toml index bcff84ad4..333f39aeb 100644 --- a/coordinator/tributary/Cargo.toml +++ b/coordinator/tributary/Cargo.toml @@ -41,7 +41,7 @@ serai-coordinator-substrate = { path = "../substrate" } messages = { package = "serai-processor-messages", path = "../../processor/messages" } -log = { version = "0.4", default-features = false, features = ["std"] } +serai-log = { path = "../../common/log", version = "0.1.0" } [features] longer-reattempts = [] diff --git a/coordinator/tributary/src/db.rs b/coordinator/tributary/src/db.rs index ed4a27db4..10b4752ba 100644 --- a/coordinator/tributary/src/db.rs +++ b/coordinator/tributary/src/db.rs @@ -22,7 +22,7 @@ pub enum Topic { }, // DkgParticipation isn't represented here as participations are immediately sent to the - // processor, not accumulated within this databse + // processor, not accumulated within this database /// Participation in the signing protocol to confirm the DKG results on Substrate DkgConfirmation { /// The attempt number this is for diff --git a/coordinator/tributary/src/tests/transaction.rs b/coordinator/tributary/src/tests/transaction.rs new file mode 100644 index 000000000..13eadb159 --- /dev/null +++ b/coordinator/tributary/src/tests/transaction.rs @@ -0,0 +1,42 @@ +use rand::{CryptoRng, RngCore}; +use schnorr::SchnorrSignature; + +use ciphersuite::{group::Group as _, *}; +use dalek_ff_group::Ristretto; + +use crate::transaction::{SigningProtocolRound, Signed}; + +fn random_signed(rng: &mut R) -> Signed { + let signed = tributary_sdk::tests::random_signed(&mut *rng); + Signed { signer: signed.signer, signature: signed.signature } +} + +#[test] +fn signing_protocol_round_nonce() { + assert_eq!(SigningProtocolRound::Preprocess.nonce(), 0); + assert_eq!(SigningProtocolRound::Share.nonce(), 1); +} + +#[test] +fn default_signer_has_identity() { + let default_signed = Signed::default(); + let identity = ::G::identity(); + assert_eq!(default_signed.signer(), identity); + assert_eq!( + default_signed.signature, + SchnorrSignature { R: identity, s: ::F::ZERO } + ); +} + +#[test] +fn serialize_signed() { + let default_signed = Signed::default(); + let encoded = borsh::to_vec(&default_signed).unwrap(); + let decoded: Signed = borsh::from_slice(&encoded).unwrap(); + assert_eq!(decoded, default_signed); + + let signed = random_signed(&mut rand::rngs::OsRng); + let encoded = borsh::to_vec(&signed).unwrap(); + let decoded: Signed = borsh::from_slice(&encoded).unwrap(); + assert_eq!(decoded, signed); +} diff --git a/coordinator/tributary/src/transaction.rs b/coordinator/tributary/src/transaction.rs index 950d352ac..0092d7fd9 100644 --- a/coordinator/tributary/src/transaction.rs +++ b/coordinator/tributary/src/transaction.rs @@ -37,7 +37,7 @@ pub enum SigningProtocolRound { } impl SigningProtocolRound { - fn nonce(self) -> u32 { + pub(crate) fn nonce(self) -> u32 { match self { SigningProtocolRound::Preprocess => 0, SigningProtocolRound::Share => 1, @@ -51,9 +51,9 @@ impl SigningProtocolRound { #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct Signed { /// The signer. - signer: ::G, + pub(crate) signer: ::G, /// The signature. - signature: SchnorrSignature, + pub(crate) signature: SchnorrSignature, } impl BorshSerialize for Signed { @@ -77,8 +77,8 @@ impl Signed { } /// Provide a nonce to convert a `Signed` into a `tributary::Signed`. - fn to_tributary_signed(self, nonce: u32) -> TributarySigned { - TributarySigned { signer: self.signer, nonce, signature: self.signature } + fn to_tributary_signed(self, round: SigningProtocolRound) -> TributarySigned { + TributarySigned { signer: self.signer, nonce: round.nonce(), signature: self.signature } } } @@ -140,39 +140,38 @@ pub enum Transaction { substrate_block_hash: BlockHash, }, + // After producing this cosign, we need to start work on the latest intended-to-be cosigned + // block. That requires agreement on when this cosign was produced, which we solve by noting + // this cosign on-chain. + // + // We ideally don't have this transaction at all. The coordinator, without access to any of the + // key shares, could observe the FROST signing session and determine a successful completion. + // Unfortunately, that functionality is not present in modular-frost, so we do need to support + // *some* asynchronous flow (where the processor or P2P network informs us of the successful + // completion). + // + // If we use a `Provided` transaction, that requires everyone observe this cosign. + // + // If we use an `Unsigned` transaction, we can't verify the cosign signature inside + // `Transaction::verify` unless we embedded the full `SignedCosign` on-chain. The issue is since + // a Tributary is stateless with regards to the on-chain logic, including `Transaction::verify`, + // we can't verify the signature against the group's public key unless we also include that (but + // then we open a DoS where arbitrary group keys are specified to cause inclusion of arbitrary + // blobs on chain). + // + // If we use a `Signed` transaction, we mitigate the DoS risk by having someone to fatally + // slash. We have horrible performance though as for 100 validators, all 100 will publish this + // transaction. + // + // We could use a signed `Unsigned` transaction, where it includes a signer and signature but + // isn't technically a Signed transaction. This lets us de-duplicate the transaction premised on + // its contents. + // + // The optimal choice is likely to use a `Provided` transaction. We don't actually need to + // observe the produced cosign (which is ephemeral). As long as it's agreed the cosign in + // question no longer needs to produced, which would mean the cosigning protocol at-large + // cosigning the block in question, it'd be safe to provide this and move on to the next cosign. /// Note an intended-to-be-cosigned Substrate block as cosigned - /// - /// After producing this cosign, we need to start work on the latest intended-to-be cosigned - /// block. That requires agreement on when this cosign was produced, which we solve by noting - /// this cosign on-chain. - /// - /// We ideally don't have this transaction at all. The coordinator, without access to any of the - /// key shares, could observe the FROST signing session and determine a successful completion. - /// Unfortunately, that functionality is not present in modular-frost, so we do need to support - /// *some* asynchronous flow (where the processor or P2P network informs us of the successful - /// completion). - /// - /// If we use a `Provided` transaction, that requires everyone observe this cosign. - /// - /// If we use an `Unsigned` transaction, we can't verify the cosign signature inside - /// `Transaction::verify` unless we embedded the full `SignedCosign` on-chain. The issue is since - /// a Tributary is stateless with regards to the on-chain logic, including `Transaction::verify`, - /// we can't verify the signature against the group's public key unless we also include that (but - /// then we open a DoS where arbitrary group keys are specified to cause inclusion of arbitrary - /// blobs on chain). - /// - /// If we use a `Signed` transaction, we mitigate the DoS risk by having someone to fatally - /// slash. We have horrible performance though as for 100 validators, all 100 will publish this - /// transaction. - /// - /// We could use a signed `Unsigned` transaction, where it includes a signer and signature but - /// isn't technically a Signed transaction. This lets us de-duplicate the transaction premised on - /// its contents. - /// - /// The optimal choice is likely to use a `Provided` transaction. We don't actually need to - /// observe the produced cosign (which is ephemeral). As long as it's agreed the cosign in - /// question no longer needs to produced, which would mean the cosigning protocol at-large - /// cosigning the block in question, it'd be safe to provide this and move on to the next cosign. Cosigned { /// The hash of the Substrate block which was cosigned substrate_block_hash: BlockHash, @@ -242,20 +241,20 @@ impl TransactionTrait for Transaction { match self { Transaction::RemoveParticipant { participant, signed } => TransactionKind::Signed( borsh::to_vec(&(b"RemoveParticipant".as_slice(), participant)).unwrap(), - signed.to_tributary_signed(0), + signed.to_tributary_signed(SigningProtocolRound::Preprocess), ), Transaction::DkgParticipation { signed, .. } => TransactionKind::Signed( borsh::to_vec(b"DkgParticipation".as_slice()).unwrap(), - signed.to_tributary_signed(0), + signed.to_tributary_signed(SigningProtocolRound::Preprocess), ), Transaction::DkgConfirmationPreprocess { attempt, signed, .. } => TransactionKind::Signed( borsh::to_vec(&(b"DkgConfirmation".as_slice(), attempt)).unwrap(), - signed.to_tributary_signed(0), + signed.to_tributary_signed(SigningProtocolRound::Share), ), Transaction::DkgConfirmationShare { attempt, signed, .. } => TransactionKind::Signed( borsh::to_vec(&(b"DkgConfirmation".as_slice(), attempt)).unwrap(), - signed.to_tributary_signed(1), + signed.to_tributary_signed(SigningProtocolRound::Share), ), Transaction::Cosign { .. } => TransactionKind::Provided("Cosign"), @@ -265,12 +264,12 @@ impl TransactionTrait for Transaction { Transaction::Sign { id, attempt, round, signed, .. } => TransactionKind::Signed( borsh::to_vec(&(b"Sign".as_slice(), id, attempt)).unwrap(), - signed.to_tributary_signed(round.nonce()), + signed.to_tributary_signed(round), ), Transaction::SlashReport { signed, .. } => TransactionKind::Signed( borsh::to_vec(b"SlashReport".as_slice()).unwrap(), - signed.to_tributary_signed(0), + signed.to_tributary_signed(SigningProtocolRound::Preprocess), ), } } From c0ece419ff0756bcba0762cd5a2d2afaaacca340 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Fri, 27 Feb 2026 16:43:42 -0300 Subject: [PATCH 27/71] feat(tributary): finish transaction.rs tests and add db.rs tests --- coordinator/tributary/src/lib.rs | 3 + coordinator/tributary/src/tests/db.rs | 309 +++++++++++ .../tributary/src/tests/transaction.rs | 495 +++++++++++++++++- coordinator/tributary/src/transaction.rs | 4 +- 4 files changed, 790 insertions(+), 21 deletions(-) create mode 100644 coordinator/tributary/src/tests/db.rs diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index b1ab27e41..5f63bb08a 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -39,6 +39,9 @@ mod db; use db::*; pub use db::Topic; +#[cfg(test)] +mod tests; + /// Messages to send to the Processors. pub struct ProcessorMessages; impl ProcessorMessages { diff --git a/coordinator/tributary/src/tests/db.rs b/coordinator/tributary/src/tests/db.rs new file mode 100644 index 000000000..950bd972b --- /dev/null +++ b/coordinator/tributary/src/tests/db.rs @@ -0,0 +1,309 @@ +use rand::{RngCore, rngs::OsRng}; + +use serai_primitives::{ + network_id::ExternalNetworkId, + validator_sets::{ExternalValidatorSet, Session}, +}; + +use messages::sign::VariantSignId; + +use serai_db::{Db, DbTxn, MemDb}; +use serai_substrate_tests::random_serai_address; + +use crate::{db::*, transaction::SigningProtocolRound}; + +fn default_set() -> ExternalValidatorSet { + // The external validator set does not change any functionality that is being tested + // use this as default + ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) } +} + +fn all_topics() -> Vec { + vec![ + Topic::RemoveParticipant { participant: random_serai_address(&mut OsRng) }, + Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Preprocess }, + Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Share }, + Topic::SlashReport, + Topic::Sign { + id: VariantSignId::Transaction([0; 32]), + attempt: 0, + round: SigningProtocolRound::Preprocess, + }, + Topic::Sign { + id: VariantSignId::Transaction([0; 32]), + attempt: 0, + round: SigningProtocolRound::Share, + }, + ] +} + +mod topic { + use messages::sign::SignId; + use super::*; + + #[test] + fn next_attempt_topic() { + for topic in all_topics() { + match topic { + Topic::RemoveParticipant { .. } => assert_eq!(topic.next_attempt_topic(), None), + Topic::DkgConfirmation { attempt, round: _ } => { + if let Some(next_attempt) = attempt.checked_add(1) { + assert_eq!( + topic.next_attempt_topic(), + Some(Topic::DkgConfirmation { + attempt: next_attempt, + round: SigningProtocolRound::Preprocess, + }) + ); + } else { + assert_eq!(topic.next_attempt_topic(), None); + } + } + Topic::SlashReport => assert_eq!(topic.next_attempt_topic(), None), + Topic::Sign { id, attempt, round: _ } => { + if let Some(next_attempt) = attempt.checked_add(1) { + assert_eq!( + topic.next_attempt_topic(), + Some(Topic::Sign { + id, + attempt: next_attempt, + round: SigningProtocolRound::Preprocess + }) + ); + } else { + assert_eq!(topic.next_attempt_topic(), None); + } + } + } + } + } + + #[test] + fn reattempt_topic() { + for topic in all_topics() { + match topic { + Topic::RemoveParticipant { .. } => assert_eq!(topic.reattempt_topic(), None), + Topic::DkgConfirmation { attempt, round } => match round { + SigningProtocolRound::Preprocess => { + if let Some(next_attempt) = attempt.checked_add(1) { + assert_eq!( + topic.reattempt_topic(), + Some(( + next_attempt, + Topic::DkgConfirmation { + attempt: next_attempt, + round: SigningProtocolRound::Preprocess, + }, + )) + ); + } else { + assert_eq!(topic.reattempt_topic(), None); + } + } + SigningProtocolRound::Share => assert_eq!(topic.reattempt_topic(), None), + }, + Topic::SlashReport => assert_eq!(topic.reattempt_topic(), None), + Topic::Sign { id, attempt, round } => match round { + SigningProtocolRound::Preprocess => { + if let Some(next_attempt) = attempt.checked_add(1) { + assert_eq!( + topic.reattempt_topic(), + Some(( + next_attempt, + Topic::Sign { + id, + attempt: next_attempt, + round: SigningProtocolRound::Preprocess + }, + )) + ); + } else { + assert_eq!(topic.reattempt_topic(), None); + } + } + SigningProtocolRound::Share => assert_eq!(topic.reattempt_topic(), None), + }, + } + } + } + + #[test] + fn sign_id() { + let set = default_set(); + for topic in all_topics() { + match topic { + Topic::Sign { id, attempt, round: _ } => { + assert_eq!(topic.sign_id(set), Some(SignId { session: set.session, id, attempt })) + } + _ => assert_eq!(topic.sign_id(set), None), + } + } + } + + #[test] + fn dkg_confirmation_sign_id() { + let set = default_set(); + for topic in all_topics() { + match topic { + Topic::DkgConfirmation { attempt, round: _ } => assert_eq!( + topic.dkg_confirmation_sign_id(set), + Some({ + let id = { + let mut id = [0; 32]; + let encoded_set = borsh::to_vec(&set).unwrap(); + id[.. encoded_set.len()].copy_from_slice(&encoded_set); + VariantSignId::Batch(id) + }; + SignId { session: set.session, id, attempt } + }) + ), + _ => assert_eq!(topic.dkg_confirmation_sign_id(set), None), + } + } + } + + #[test] + fn succeeding_topic() { + for topic in all_topics() { + match topic { + Topic::RemoveParticipant { .. } => assert_eq!(topic.succeeding_topic(), None), + Topic::DkgConfirmation { attempt, round } => match round { + SigningProtocolRound::Preprocess => assert_eq!( + topic.succeeding_topic(), + Some(Topic::DkgConfirmation { attempt, round: SigningProtocolRound::Share }) + ), + + SigningProtocolRound::Share => assert_eq!(topic.succeeding_topic(), None), + }, + Topic::SlashReport => assert_eq!(topic.succeeding_topic(), None), + Topic::Sign { id, attempt, round } => match round { + SigningProtocolRound::Preprocess => assert_eq!( + topic.succeeding_topic(), + Some(Topic::Sign { id, attempt, round: SigningProtocolRound::Share }) + ), + SigningProtocolRound::Share => assert_eq!(topic.succeeding_topic(), None), + }, + } + } + } + + #[test] + fn requires_recognition() { + for topic in all_topics() { + match topic { + Topic::RemoveParticipant { .. } => assert_eq!(topic.requires_recognition(), false), + Topic::DkgConfirmation { attempt, .. } => { + assert_eq!(topic.requires_recognition(), attempt != 0) + } + Topic::SlashReport => assert_eq!(topic.requires_recognition(), false), + Topic::Sign { .. } => assert_eq!(topic.requires_recognition(), true), + } + } + } + + #[test] + fn participating() { + for topic in all_topics() { + match topic { + Topic::RemoveParticipant { .. } => { + assert_eq!(topic.participating(), Participating::Everyone) + } + Topic::DkgConfirmation { .. } => { + assert_eq!(topic.participating(), Participating::Participated) + } + Topic::SlashReport => assert_eq!(topic.participating(), Participating::Everyone), + Topic::Sign { .. } => assert_eq!(topic.participating(), Participating::Participated), + } + } + } +} + +mod tributary_db { + use serai_substrate_tests::random_block_hash; + + use super::*; + + #[test] + fn start_cosigning() { + let mut db = MemDb::new(); + let set = default_set(); + let block_hash1 = random_block_hash(&mut OsRng); + let block_number1 = OsRng.next_u64(); + + let topic = Topic::Sign { + id: VariantSignId::Cosign(block_number1), + attempt: 0, + round: SigningProtocolRound::Preprocess, + }; + + // Recognizes topic + { + let mut txn = db.txn(); + TributaryDb::start_cosigning(&mut txn, set, block_hash1, block_number1); + + assert!(TributaryDb::recognized(&txn, set, topic,)); + txn.commit(); + } + + // Same set cannot recognize again until finished + { + let mut txn = db.txn(); + assert_eq!(ActivelyCosigning::get(&mut txn, set), Some(block_hash1)); + + let retry = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + let block_hash2 = random_block_hash(&mut OsRng); + let block_number2 = OsRng.next_u64(); + TributaryDb::start_cosigning(&mut txn, set, block_hash2, block_number2); + })); + + assert!(retry.is_err()); + txn.commit(); + } + + // Finish cosigning + { + let mut txn = db.txn(); + TributaryDb::finish_cosigning(&mut txn, set); + assert_eq!(ActivelyCosigning::get(&mut txn, set), None); + + // Previous topic remains recognized + assert!(TributaryDb::recognized( + &txn, + set, + Topic::Sign { + id: VariantSignId::Cosign(block_number1), + attempt: 0, + round: SigningProtocolRound::Preprocess, + } + )); + + txn.commit(); + } + + // Start new cosigning + { + let mut txn = db.txn(); + let block_hash2 = random_block_hash(&mut OsRng); + let block_number2 = OsRng.next_u64(); + + TributaryDb::start_cosigning(&mut txn, set, block_hash2, block_number2); + assert_eq!(ActivelyCosigning::get(&mut txn, set), Some(block_hash2)); + + TributaryDb::finish_cosigning(&mut txn, set); + assert_eq!(ActivelyCosigning::get(&mut txn, set), None); + + // New topic recognized + assert!(TributaryDb::recognized( + &txn, + set, + Topic::Sign { + id: VariantSignId::Cosign(block_number2), + attempt: 0, + round: SigningProtocolRound::Preprocess, + } + )); + + txn.commit(); + } + } +} diff --git a/coordinator/tributary/src/tests/transaction.rs b/coordinator/tributary/src/tests/transaction.rs index 13eadb159..245919c1e 100644 --- a/coordinator/tributary/src/tests/transaction.rs +++ b/coordinator/tributary/src/tests/transaction.rs @@ -1,42 +1,499 @@ -use rand::{CryptoRng, RngCore}; +use core::ops::Deref as _; + +use rand::{CryptoRng, RngCore, rngs::OsRng}; use schnorr::SchnorrSignature; +use zeroize::Zeroizing; use ciphersuite::{group::Group as _, *}; use dalek_ff_group::Ristretto; -use crate::transaction::{SigningProtocolRound, Signed}; +use serai_primitives::{validator_sets::KeyShares, address::SeraiAddress}; +use serai_substrate_tests::{random_serai_address, random_block_hash}; + +use messages::sign::VariantSignId; + +use tributary_sdk::{ + ReadWrite, + tests::new_genesis, + transaction::{Transaction as TransactionTrait, TransactionError, TransactionKind}, +}; + +use crate::db::Topic; +use crate::transaction::{SigningProtocolRound, Signed, Transaction}; + +fn random_key(rng: &mut R) -> Zeroizing<::F> { + Zeroizing::new(::F::random(&mut *rng)) +} fn random_signed(rng: &mut R) -> Signed { let signed = tributary_sdk::tests::random_signed(&mut *rng); Signed { signer: signed.signer, signature: signed.signature } } +/// One of each signed transaction kind with default signatures. +fn all_signed_transactions() -> Vec { + vec![ + Transaction::RemoveParticipant { + participant: random_serai_address(&mut OsRng), + signed: random_signed(&mut OsRng), + }, + Transaction::DkgParticipation { + participation: vec![1, 2, 3], + signed: random_signed(&mut OsRng), + }, + Transaction::DkgConfirmationPreprocess { + attempt: 0, + preprocess: [1; 64], + signed: random_signed(&mut OsRng), + }, + Transaction::DkgConfirmationShare { + attempt: 0, + share: [1; 32], + signed: random_signed(&mut OsRng), + }, + Transaction::Sign { + id: VariantSignId::Transaction([0; 32]), + attempt: 0, + round: SigningProtocolRound::Preprocess, + data: vec![vec![1, 2, 3]], + signed: random_signed(&mut OsRng), + }, + Transaction::Sign { + id: VariantSignId::Transaction([0; 32]), + attempt: 0, + round: SigningProtocolRound::Share, + data: vec![vec![1, 2, 3]], + signed: random_signed(&mut OsRng), + }, + Transaction::SlashReport { slash_points: vec![0, 1, 2], signed: random_signed(&mut OsRng) }, + ] +} + +/// One of each provided transaction kind. +fn all_provided_transactions() -> Vec { + vec![ + Transaction::Cosign { substrate_block_hash: random_block_hash(&mut OsRng) }, + Transaction::Cosigned { substrate_block_hash: random_block_hash(&mut OsRng) }, + Transaction::SubstrateBlock { hash: random_block_hash(&mut OsRng) }, + Transaction::Batch { hash: random_block_hash(&mut OsRng).0 }, + ] +} + +/// One of each of all transaction kinds. +fn all_transactions() -> Vec { + let mut txs = all_signed_transactions(); + txs.extend(all_provided_transactions()); + txs +} + +fn all_signing_protocol_rounds() -> Vec { + vec![SigningProtocolRound::Preprocess, SigningProtocolRound::Share] +} + #[test] fn signing_protocol_round_nonce() { assert_eq!(SigningProtocolRound::Preprocess.nonce(), 0); assert_eq!(SigningProtocolRound::Share.nonce(), 1); } +mod signed { + use super::*; + + #[test] + fn default_signer_is_identity() { + let default_signed = Signed::default(); + let identity = ::G::identity(); + assert_eq!(default_signed.signer(), identity); + assert_eq!( + default_signed.signature, + SchnorrSignature { R: identity, s: ::F::ZERO } + ); + } + + #[test] + fn to_tributary_signed_matches_signed() { + let signed = random_signed(&mut OsRng); + for round in all_signing_protocol_rounds() { + let tributary_signed = signed.clone().to_tributary_signed(round); + assert_eq!(signed.signer(), tributary_signed.signer); + assert_eq!(signed.signature, tributary_signed.signature); + assert_eq!(tributary_signed.nonce, round.nonce()); + } + } + + #[test] + fn signed_borsh_serialize_and_deserialize() { + use std::io::{self, Read, Write}; + use borsh::{BorshSerialize, BorshDeserialize}; + + // Should work + { + let signed = random_signed(&mut OsRng); + + let serialized = borsh::to_vec(&signed).unwrap(); + let mut manual_buf = Vec::new(); + signed.serialize(&mut manual_buf).unwrap(); + assert_eq!(serialized, manual_buf); + + let deserialized: Signed = borsh::from_slice(&serialized).unwrap(); + let mut cursor = std::io::Cursor::new(&serialized); + assert_eq!(deserialized, Signed::deserialize_reader(&mut cursor).unwrap()); + + assert_eq!(signed, deserialized); + } + + // Writer failure returns error + { + struct FailingWriter; + impl Write for FailingWriter { + fn write(&mut self, _buf: &[u8]) -> io::Result { + Err(io::Error::new(io::ErrorKind::Other, "simulated write failure")) + } + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } + } + + let mut writer = FailingWriter; + + let result = random_signed(&mut OsRng).serialize(&mut writer); + assert!(result.is_err()); + assert_eq!(result.unwrap_err().kind(), io::ErrorKind::Other); + } + + // Reader failure returns error + { + struct FailingReader; + impl Read for FailingReader { + fn read(&mut self, _buf: &mut [u8]) -> io::Result { + Err(io::Error::new(io::ErrorKind::UnexpectedEof, "simulated read failure")) + } + } + + let mut failing_reader = FailingReader; + let result = Signed::deserialize_reader(&mut failing_reader); + assert!(result.is_err()); + assert_eq!(result.unwrap_err().kind(), io::ErrorKind::UnexpectedEof); + } + + // Errors with incomplete data + { + let serialized = borsh::to_vec(&random_signed(&mut OsRng)).unwrap(); + let truncated = &serialized[.. 5]; + let mut cursor = std::io::Cursor::new(truncated); + let result = Signed::deserialize_reader(&mut cursor); + assert!(result.is_err()); + } + } +} + #[test] -fn default_signer_has_identity() { - let default_signed = Signed::default(); - let identity = ::G::identity(); - assert_eq!(default_signed.signer(), identity); +fn readwrite_transaction() { + let key = random_key(&mut OsRng); + let genesis = new_genesis(); + + for mut tx in all_transactions() { + let serialized = ReadWrite::serialize(&tx); + let deserialized = Transaction::read(&mut serialized.as_slice()).unwrap(); + assert_eq!(tx, deserialized, "ReadWrite failed for {tx:?}"); + + match tx.kind() { + TransactionKind::Signed(_, _) => { + tx.sign(&mut OsRng, genesis, &key); + let serialized = ReadWrite::serialize(&tx); + let deserialized = Transaction::read(&mut serialized.as_slice()).unwrap(); + assert_eq!(tx, deserialized, "ReadWrite failed after signing for {tx:?}"); + } + _ => {} + } + } +} + +mod kind { + use super::*; + + #[test] + fn signed_transactions_matches_kind_and_nonce_and_sig() { + let key = random_key(&mut OsRng); + let genesis = new_genesis(); + + for mut tx in all_signed_transactions() { + tx.sign(&mut OsRng, genesis, &key); + let sig_hash = tx.sig_hash(genesis); + + match tx.kind() { + TransactionKind::Signed(_, signed) => { + assert!( + signed.signature.verify(signed.signer, sig_hash), + "Signature verification failed for {tx:?}" + ); + + let nonce = signed.nonce; + match tx { + Transaction::RemoveParticipant { participant: _, signed: _ } => { + assert_eq!(nonce, SigningProtocolRound::Preprocess.nonce()) + } + Transaction::DkgParticipation { participation: _, signed: _ } => { + assert_eq!(nonce, SigningProtocolRound::Preprocess.nonce()) + } + Transaction::DkgConfirmationPreprocess { attempt: _, preprocess: _, signed: _ } => { + assert_eq!(nonce, SigningProtocolRound::Share.nonce()) + } + Transaction::DkgConfirmationShare { attempt: _, share: _, signed: _ } => { + assert_eq!(nonce, SigningProtocolRound::Share.nonce()) + } + Transaction::Sign { id: _, attempt: _, round, data: _, signed: _ } => { + assert_eq!(nonce, round.nonce()) + } + Transaction::SlashReport { slash_points: _, signed: _ } => { + assert_eq!(nonce, SigningProtocolRound::Preprocess.nonce()) + } + _ => panic!("Expected Signed kind for {tx:?}"), + } + } + _ => panic!("Expected Signed kind for {tx:?}"), + } + } + } + + #[test] + fn provided_transactions_kind() { + let expected: Vec<(&str, Transaction)> = vec![ + ("Cosign", Transaction::Cosign { substrate_block_hash: random_block_hash(&mut OsRng) }), + ("Cosigned", Transaction::Cosigned { substrate_block_hash: random_block_hash(&mut OsRng) }), + ("SubstrateBlock", Transaction::SubstrateBlock { hash: random_block_hash(&mut OsRng) }), + ("Batch", Transaction::Batch { hash: random_block_hash(&mut OsRng).0 }), + ]; + + for (order, tx) in expected { + match tx.kind() { + TransactionKind::Provided(actual_order) => { + assert_eq!(actual_order, order, "Wrong order for {tx:?}"); + } + other => panic!("Expected Provided kind, got {other:?} for {tx:?}"), + } + } + } +} + +mod hash { + use super::*; + + #[test] + fn hash_is_deterministic() { + let key = random_key(&mut OsRng); + let genesis = new_genesis(); + + for tx_template in all_signed_transactions() { + assert_eq!( + tx_template.hash(), + tx_template.hash(), + "Hash not deterministic for {tx_template:?}" + ); + + let mut tx1 = tx_template.clone(); + let mut tx2 = tx_template; + + tx1.sign(&mut OsRng, genesis, &key); + tx2.sign(&mut OsRng, genesis, &key); + + // Signing produces different random nonces and different signatures, but the hash strips the signature + assert_eq!(tx1.hash(), tx2.hash(), "Hashes should be equal despite different signatures"); + } + } + + #[test] + fn hash_differs_for_distinct_transactions() { + let txs = all_transactions(); + for i in 0 .. txs.len() { + for j in (i + 1) .. txs.len() { + assert_ne!( + txs[i].hash(), + txs[j].hash(), + "Distinct TXs should have different hashes: {:?} vs {:?}", + txs[i], + txs[j] + ); + } + } + } +} + +#[test] +fn tx_verify() { + // All default transactions are valid + { + for tx in all_transactions() { + assert_eq!(tx.verify(), Ok(()), "verify() rejected valid tx: {tx:?}"); + } + } + + { + // Transaction::Sign with data == KeyShares::MAX_PER_SET passes + assert_eq!( + Transaction::Sign { + id: VariantSignId::Transaction([0; 32]), + attempt: 0, + round: SigningProtocolRound::Preprocess, + data: vec![vec![]; usize::from(KeyShares::MAX_PER_SET)], + signed: Signed::default(), + } + .verify(), + Ok(()) + ); + // Transaction::Sign with data > KeyShares::MAX_PER_SET fails + assert_eq!( + Transaction::Sign { + id: VariantSignId::Transaction([0; 32]), + attempt: 0, + round: SigningProtocolRound::Preprocess, + data: vec![vec![]; usize::from(KeyShares::MAX_PER_SET) + 1], + signed: Signed::default(), + } + .verify(), + Err(TransactionError::InvalidContent) + ); + } + + { + // Transaction::SlashReport with slash_points == KeyShares::MAX_PER_SET passes + let slash_at = Transaction::SlashReport { + slash_points: vec![0; usize::from(KeyShares::MAX_PER_SET)], + signed: Signed::default(), + }; + assert_eq!(slash_at.verify(), Ok(())); + + // Transaction::SlashReport with slash_points == KeyShares::MAX_PER_SET fails + let slash_over = Transaction::SlashReport { + slash_points: vec![0; usize::from(KeyShares::MAX_PER_SET) + 1], + signed: Signed::default(), + }; + assert_eq!(slash_over.verify(), Err(TransactionError::InvalidContent)); + } +} + +#[test] +fn topic_returns_correct_mapping() { + let participant = SeraiAddress([1; 32]); + + // RemoveParticipant → Some(RemoveParticipant) + let tx = Transaction::RemoveParticipant { participant, signed: Signed::default() }; + assert_eq!(tx.topic(), Some(Topic::RemoveParticipant { participant })); + + // DkgParticipation → None + let tx = Transaction::DkgParticipation { participation: vec![], signed: Signed::default() }; + assert_eq!(tx.topic(), None); + + // DkgConfirmationPreprocess → DkgConfirmation with Preprocess round + let tx = Transaction::DkgConfirmationPreprocess { + attempt: 5, + preprocess: [0; 64], + signed: Signed::default(), + }; + assert_eq!( + tx.topic(), + Some(Topic::DkgConfirmation { attempt: 5, round: SigningProtocolRound::Preprocess }) + ); + + // DkgConfirmationShare → DkgConfirmation with Share round + let tx = + Transaction::DkgConfirmationShare { attempt: 3, share: [0; 32], signed: Signed::default() }; assert_eq!( - default_signed.signature, - SchnorrSignature { R: identity, s: ::F::ZERO } + tx.topic(), + Some(Topic::DkgConfirmation { attempt: 3, round: SigningProtocolRound::Share }) ); + + // Provided transactions → None + for tx in all_provided_transactions() { + assert_eq!(tx.topic(), None, "Provided tx should have no topic: {tx:?}"); + } + + // Sign → Topic::Sign preserving all fields + let id = VariantSignId::Batch([9; 32]); + let tx = Transaction::Sign { + id, + attempt: 2, + round: SigningProtocolRound::Share, + data: vec![], + signed: Signed::default(), + }; + assert_eq!(tx.topic(), Some(Topic::Sign { id, attempt: 2, round: SigningProtocolRound::Share })); + + // SlashReport → Topic::SlashReport + let tx = Transaction::SlashReport { slash_points: vec![], signed: Signed::default() }; + assert_eq!(tx.topic(), Some(Topic::SlashReport)); } -#[test] -fn serialize_signed() { - let default_signed = Signed::default(); - let encoded = borsh::to_vec(&default_signed).unwrap(); - let decoded: Signed = borsh::from_slice(&encoded).unwrap(); - assert_eq!(decoded, default_signed); - - let signed = random_signed(&mut rand::rngs::OsRng); - let encoded = borsh::to_vec(&signed).unwrap(); - let decoded: Signed = borsh::from_slice(&encoded).unwrap(); - assert_eq!(decoded, signed); +mod sign { + use super::*; + + #[test] + fn tx_sign() { + let key = random_key(&mut OsRng); + let expected_signer = Ristretto::generator() * key.deref(); + let genesis = new_genesis(); + + // Sets correct signer and produces verifiable signature + for mut tx in all_signed_transactions() { + tx.sign(&mut OsRng, genesis, &key); + let sig_hash = tx.sig_hash(genesis); + + if let TransactionKind::Signed(_, trib_signed) = tx.kind() { + assert_eq!(trib_signed.signer, expected_signer, "Wrong signer for {tx:?}"); + assert!( + trib_signed.signature.verify(trib_signed.signer, sig_hash), + "Signature verification failed for {tx:?}" + ); + } + } + + // Wrong genesis fails verification + { + let mut tx = Transaction::RemoveParticipant { + participant: SeraiAddress([1; 32]), + signed: Signed::default(), + }; + tx.sign(&mut OsRng, new_genesis(), &key); + + let wrong_challenge = tx.sig_hash([1; 32]); + if let TransactionKind::Signed(_, trib_signed) = tx.kind() { + assert!( + !trib_signed.signature.verify(trib_signed.signer, wrong_challenge), + "Signature should not verify with wrong genesis" + ); + } + } + } + + #[test] + #[should_panic(expected = "signing Cosign transaction (provided)")] + fn sign_panics_on_cosign() { + let key = random_key(&mut OsRng); + let mut tx = Transaction::Cosign { substrate_block_hash: random_block_hash(&mut OsRng) }; + tx.sign(&mut OsRng, new_genesis(), &key); + } + + #[test] + #[should_panic(expected = "signing Cosigned transaction (provided)")] + fn sign_panics_on_cosigned() { + let key = random_key(&mut OsRng); + let mut tx = Transaction::Cosigned { substrate_block_hash: random_block_hash(&mut OsRng) }; + tx.sign(&mut OsRng, new_genesis(), &key); + } + + #[test] + #[should_panic(expected = "signing SubstrateBlock transaction (provided)")] + fn sign_panics_on_substrate_block() { + let key = random_key(&mut OsRng); + let mut tx = Transaction::SubstrateBlock { hash: random_block_hash(&mut OsRng) }; + tx.sign(&mut OsRng, new_genesis(), &key); + } + + #[test] + #[should_panic(expected = "signing Batch transaction (provided)")] + fn sign_panics_on_batch() { + let key = random_key(&mut OsRng); + let mut tx = Transaction::Batch { hash: random_block_hash(&mut OsRng).0 }; + tx.sign(&mut OsRng, new_genesis(), &key); + } } diff --git a/coordinator/tributary/src/transaction.rs b/coordinator/tributary/src/transaction.rs index 0092d7fd9..23450d7a6 100644 --- a/coordinator/tributary/src/transaction.rs +++ b/coordinator/tributary/src/transaction.rs @@ -77,7 +77,7 @@ impl Signed { } /// Provide a nonce to convert a `Signed` into a `tributary::Signed`. - fn to_tributary_signed(self, round: SigningProtocolRound) -> TributarySigned { + pub(crate) fn to_tributary_signed(self, round: SigningProtocolRound) -> TributarySigned { TributarySigned { signer: self.signer, nonce: round.nonce(), signature: self.signature } } } @@ -264,7 +264,7 @@ impl TransactionTrait for Transaction { Transaction::Sign { id, attempt, round, signed, .. } => TransactionKind::Signed( borsh::to_vec(&(b"Sign".as_slice(), id, attempt)).unwrap(), - signed.to_tributary_signed(round), + signed.to_tributary_signed(*round), ), Transaction::SlashReport { signed, .. } => TransactionKind::Signed( From 608db12706334beaf80ec9e995e04bb1f6f2c350 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Mon, 2 Mar 2026 17:54:07 -0300 Subject: [PATCH 28/71] feat(tributary): add more db.rs tests --- coordinator/tributary/src/db.rs | 114 ++++---- coordinator/tributary/src/tests/db.rs | 355 +++++++++++++++++++++++++ coordinator/tributary/src/tests/mod.rs | 13 + 3 files changed, 427 insertions(+), 55 deletions(-) create mode 100644 coordinator/tributary/src/tests/mod.rs diff --git a/coordinator/tributary/src/db.rs b/coordinator/tributary/src/db.rs index 10b4752ba..1f5fa4d3e 100644 --- a/coordinator/tributary/src/db.rs +++ b/coordinator/tributary/src/db.rs @@ -45,48 +45,78 @@ pub enum Topic { }, } -enum Participating { +#[derive(Debug, PartialEq)] +pub(crate) enum Participating { Participated, Everyone, } +pub(crate) fn required_participation(n: u16) -> Option { + n.checked_mul(2)?.checked_div(3)?.checked_add(1) +} + impl Topic { // The topic used by the next attempt of this protocol - fn next_attempt_topic(self) -> Option { + pub(crate) fn next_attempt_topic(self) -> Option { #[expect(clippy::match_same_arms)] match self { Topic::RemoveParticipant { .. } => None, - Topic::DkgConfirmation { attempt, round: _ } => Some(Topic::DkgConfirmation { - attempt: attempt + 1, - round: SigningProtocolRound::Preprocess, - }), + Topic::DkgConfirmation { attempt, round: _ } => { + if let Some(next_attempt) = attempt.checked_add(1) { + Some(Topic::DkgConfirmation { + attempt: next_attempt, + round: SigningProtocolRound::Preprocess, + }) + } else { + None + } + } Topic::SlashReport => None, Topic::Sign { id, attempt, round: _ } => { - Some(Topic::Sign { id, attempt: attempt + 1, round: SigningProtocolRound::Preprocess }) + // checked_add here, sanity prevent infinite consecutive attempts + if let Some(next_attempt) = attempt.checked_add(1) { + Some(Topic::Sign { id, attempt: next_attempt, round: SigningProtocolRound::Preprocess }) + } else { + None + } } } } // The topic for the re-attempt to schedule - fn reattempt_topic(self) -> Option<(u32, Topic)> { + pub(crate) fn reattempt_topic(self) -> Option<(u32, Topic)> { #[expect(clippy::match_same_arms)] match self { Topic::RemoveParticipant { .. } => None, Topic::DkgConfirmation { attempt, round } => match round { SigningProtocolRound::Preprocess => { - let attempt = attempt + 1; - Some(( - attempt, - Topic::DkgConfirmation { attempt, round: SigningProtocolRound::Preprocess }, - )) + // checked_add here, sanity prevent infinite consecutive attempts + if let Some(next_attempt) = attempt.checked_add(1) { + Some(( + next_attempt, + Topic::DkgConfirmation { + attempt: next_attempt, + round: SigningProtocolRound::Preprocess, + }, + )) + } else { + None + } } SigningProtocolRound::Share => None, }, Topic::SlashReport => None, Topic::Sign { id, attempt, round } => match round { SigningProtocolRound::Preprocess => { - let attempt = attempt + 1; - Some((attempt, Topic::Sign { id, attempt, round: SigningProtocolRound::Preprocess })) + // checked_add here, sanity prevent infinite consecutive attempts + if let Some(next_attempt) = attempt.checked_add(1) { + Some(( + next_attempt, + Topic::Sign { id, attempt: next_attempt, round: SigningProtocolRound::Preprocess }, + )) + } else { + None + } } SigningProtocolRound::Share => None, }, @@ -136,7 +166,7 @@ impl Topic { /// The topic which precedes this topic as a prerequisite /// /// The preceding topic must define this topic as succeeding - fn preceding_topic(self) -> Option { + pub(crate) fn preceding_topic(self) -> Option { #[expect(clippy::match_same_arms)] match self { Topic::RemoveParticipant { .. } => None, @@ -159,7 +189,7 @@ impl Topic { /// The topic which succeeds this topic, with this topic as a prerequisite /// /// The succeeding topic must define this topic as preceding - fn succeeding_topic(self) -> Option { + pub(crate) fn succeeding_topic(self) -> Option { #[expect(clippy::match_same_arms)] match self { Topic::RemoveParticipant { .. } => None, @@ -194,13 +224,7 @@ impl Topic { } } - fn required_participation(&self, n: u16) -> u16 { - let _ = self; - // All of our topics require 2/3rds participation - ((2 * n) / 3) + 1 - } - - fn participating(&self) -> Participating { + pub(crate) fn participating(&self) -> Participating { #[expect(clippy::match_same_arms)] match self { Topic::RemoveParticipant { .. } => Participating::Everyone, @@ -273,12 +297,6 @@ db_channel!( pub(crate) struct TributaryDb; impl TributaryDb { - pub(crate) fn last_handled_tributary_block( - getter: &impl Get, - set: ExternalValidatorSet, - ) -> Option<(u64, [u8; 32])> { - LastHandledTributaryBlock::get(getter, set) - } pub(crate) fn set_last_handled_tributary_block( txn: &mut impl DbTxn, set: ExternalValidatorSet, @@ -288,25 +306,6 @@ impl TributaryDb { LastHandledTributaryBlock::set(txn, set, &(block_number, block_hash)); } - pub(crate) fn latest_substrate_block_to_cosign( - getter: &impl Get, - set: ExternalValidatorSet, - ) -> Option { - LatestSubstrateBlockToCosign::get(getter, set) - } - pub(crate) fn set_latest_substrate_block_to_cosign( - txn: &mut impl DbTxn, - set: ExternalValidatorSet, - substrate_block_hash: BlockHash, - ) { - LatestSubstrateBlockToCosign::set(txn, set, &substrate_block_hash); - } - pub(crate) fn actively_cosigning( - txn: &mut impl DbTxn, - set: ExternalValidatorSet, - ) -> Option { - ActivelyCosigning::get(txn, set) - } pub(crate) fn start_cosigning( txn: &mut impl DbTxn, set: ExternalValidatorSet, @@ -330,7 +329,7 @@ impl TributaryDb { ); } pub(crate) fn finish_cosigning(txn: &mut impl DbTxn, set: ExternalValidatorSet) { - assert!(ActivelyCosigning::take(txn, set).is_some(), "finished cosigning but not cosigning"); + ActivelyCosigning::take(txn, set).expect("finished cosigning but wasn't cosigning"); } pub(crate) fn mark_cosigned( txn: &mut impl DbTxn, @@ -352,7 +351,8 @@ impl TributaryDb { RecognizedTopics::send(txn, set, &topic); } pub(crate) fn recognized(getter: &impl Get, set: ExternalValidatorSet, topic: Topic) -> bool { - AccumulatedWeight::get(getter, set, topic).is_some() + AccumulatedWeight::get(getter, set, topic).is_some() && + RecognizedTopics::peek(getter, set).is_some() } pub(crate) fn start_of_block(txn: &mut impl DbTxn, set: ExternalValidatorSet, block_number: u64) { @@ -387,9 +387,9 @@ impl TributaryDb { txn: &mut impl DbTxn, set: ExternalValidatorSet, validator: SeraiAddress, - reason: &str, + #[cfg_attr(coverage, allow(unused_variables))] reason: &str, ) { - log::warn!("{validator} fatally slashed: {reason}"); + serai_log::warn!("{validator} fatally slashed: {reason}"); SlashPoints::set(txn, set, validator, &u32::MAX); } @@ -442,10 +442,14 @@ impl TributaryDb { } } + let Some(required_participation) = required_participation(total_weight) else { + return DataSet::None; + }; + // The complete lack of validation on the data by these NOPs opens the potential for spam here // If we've already accumulated past the threshold, NOP - if accumulated_weight >= topic.required_participation(total_weight) { + if accumulated_weight >= required_participation { return DataSet::None; } // If this is for an old attempt, NOP @@ -461,7 +465,7 @@ impl TributaryDb { Accumulated::set(txn, set, topic, validator, data); // Check if we now cross the weight threshold - if accumulated_weight >= topic.required_participation(total_weight) { + if accumulated_weight >= required_participation { // Queue this for re-attempt after enough time passes let reattempt_topic = topic.reattempt_topic(); if let Some((attempt, reattempt_topic)) = reattempt_topic { diff --git a/coordinator/tributary/src/tests/db.rs b/coordinator/tributary/src/tests/db.rs index 950bd972b..799e76526 100644 --- a/coordinator/tributary/src/tests/db.rs +++ b/coordinator/tributary/src/tests/db.rs @@ -1,6 +1,7 @@ use rand::{RngCore, rngs::OsRng}; use serai_primitives::{ + address::SeraiAddress, network_id::ExternalNetworkId, validator_sets::{ExternalValidatorSet, Session}, }; @@ -307,3 +308,357 @@ mod tributary_db { } } } + +#[test] +fn db_start_of_block() { + let _ = env_logger::try_init(); + let set = default_set(); + + let reattemptable_topics: Vec = all_topics() + .into_iter() + .filter_map(|t| t.reattempt_topic().map(|(_, reattempt_topic)| reattempt_topic)) + .collect(); + + serai_log::log::info!( + "db_start_of_block fuzz: reattemptable_topics={reattemptable_topics:?}, \ + all_topics count={}", + all_topics().len() + ); + + for iteration in 0 .. 100 { + for topic in all_topics() { + // Fresh DB per topic so recognized state doesn't leak between iterations + let mut db = MemDb::new(); + let block_number = OsRng.next_u64(); + let mut txn = db.txn(); + + // Randomly select which reattempt topics are queued for this block + let reattempts: Vec = + reattemptable_topics.iter().copied().filter(|_| OsRng.next_u64() % 2 == 0).collect(); + + serai_log::log::info!( + "iteration={iteration}, topic={topic:?}, block_number={block_number}, \ + reattempts={reattempts:?}" + ); + + if !reattempts.is_empty() { + Reattempt::set(&mut txn, set, block_number, &reattempts); + serai_log::log::info!("set {} reattempt(s) for block {block_number}", reattempts.len()); + } + + TributaryDb::start_of_block(&mut txn, set, block_number); + + // Verify each queued reattempt topic was recognized and its message sent + for reattempt in &reattempts { + assert!(TributaryDb::recognized(&txn, set, *reattempt)); + if reattempt.sign_id(set).is_some() { + assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); + serai_log::log::info!("verified ProcessorMessage for {reattempt:?}"); + } else if reattempt.dkg_confirmation_sign_id(set).is_some() { + assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_some()); + serai_log::log::info!("verified DkgConfirmationMessage for {reattempt:?}"); + } + } + + // When no reattempts were set, verify the current topic's reattempt was not recognized + if reattempts.is_empty() { + if let Some((_, reattempt_topic)) = topic.reattempt_topic() { + assert_eq!(TributaryDb::recognized(&txn, set, reattempt_topic), false); + serai_log::log::info!("verified {reattempt_topic:?} not recognized (no reattempts)"); + } + } + + // No extra messages should remain in either queue + assert!(ProcessorMessages::try_recv(&mut txn, set).is_none()); + assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_none()); + + txn.commit(); + } + } + + serai_log::log::info!("db_start_of_block fuzz: completed 100 iterations"); +} + +#[test] +fn db_fatal_slash() { + let mut db = MemDb::new(); + let set = default_set(); + let validator = random_serai_address(&mut OsRng); + + { + let mut txn = db.txn(); + TributaryDb::fatal_slash(&mut txn, set, validator, "test reason"); + txn.commit(); + } + + assert!(TributaryDb::is_fatally_slashed(&db, set, validator)); + assert_eq!(SlashPoints::get(&db, set, validator), Some(u32::MAX)); +} + +mod fuzz { + use super::*; + use proptest::prelude::*; + + /// Verify all DB invariants after a single `TributaryDb::accumulate` call. + /// + /// Independently computes the expected DB state by tracing the code paths in `accumulate` + /// based on the inputs and pre-state, then asserts the actual DB matches. + #[expect(clippy::too_many_arguments)] + fn verify_accumulate_invariants( + db: &MemDb, + set: ExternalValidatorSet, + total_weight: u16, + block_number: u64, + topic: Topic, + validator: SeraiAddress, + validator_weight: u16, + data: &Vec, + pre_weight: Option, + pre_slashed: bool, + has_preceding_accumulated: bool, + has_next_topic_weight: bool, + result: &DataSet>, + ) { + let required = required_participation(total_weight); + let post_slashed = TributaryDb::is_fatally_slashed(db, set, validator); + let post_weight = AccumulatedWeight::get(db, set, topic); + + // Branch 1: Slash for participating in unrecognized topic requiring recognition. + if topic.requires_recognition() && pre_weight.is_none() { + assert!(post_slashed, "should be fatally slashed for unrecognized topic"); + assert!(matches!(result, DataSet::None)); + assert_eq!(post_weight, None, "weight should remain None after recognition slash"); + assert!( + Accumulated::>::get(db, set, topic, validator).is_none(), + "no data should be stored after recognition slash" + ); + return; + } + + let weight_before = pre_weight.unwrap_or(0); + + // Branch 2: Slash for participating without completing the preceding topic. + if topic.preceding_topic().is_some() && !has_preceding_accumulated { + assert!(post_slashed, "should be fatally slashed for missing preceding participation"); + assert!(matches!(result, DataSet::None)); + assert_eq!(post_weight, pre_weight, "weight unchanged after preceding slash"); + return; + } + + // Branch 3: required_participation overflows (total_weight > 32767). + let Some(required) = required else { + assert!(matches!(result, DataSet::None)); + assert_eq!(post_weight, pre_weight, "weight unchanged when required_participation overflows"); + if !pre_slashed { + assert!(!post_slashed, "should not be slashed on overflow NOP"); + } + return; + }; + + // Branch 4: Already accumulated past the threshold - NOP. + if weight_before >= required { + assert!(matches!(result, DataSet::None)); + assert_eq!(post_weight, Some(weight_before), "weight unchanged when past threshold"); + if !pre_slashed { + assert!(!post_slashed, "should not be slashed on threshold NOP"); + } + return; + } + + // Branch 5: Old attempt - the next attempt's topic already has weight. + let next_attempt_superseded = has_next_topic_weight && topic.next_attempt_topic().is_some(); + if next_attempt_superseded { + assert!(matches!(result, DataSet::None)); + assert_eq!(post_weight, Some(weight_before), "weight unchanged for superseded attempt"); + if !pre_slashed { + assert!(!post_slashed, "should not be slashed on superseded NOP"); + } + return; + } + + // Accumulation happened (Branches 6 & 7) + let new_weight = weight_before + validator_weight; + assert_eq!(post_weight, Some(new_weight), "weight should reflect accumulation"); + + if !pre_slashed { + assert!(!post_slashed, "should not be slashed after valid accumulation"); + } + + if new_weight >= required { + // Branch 7: Threshold crossed. + + // 7a: Reattempt should be queued if topic is reattemptable. + if let Some((reattempt_attempt, reattempt_topic)) = topic.reattempt_topic() { + #[cfg(not(feature = "longer-reattempts"))] + const BASE_REATTEMPT_DELAY: u32 = + (5u32 * 60 * 1000).div_ceil(tributary_sdk::tendermint::TARGET_BLOCK_TIME); + #[cfg(feature = "longer-reattempts")] + const BASE_REATTEMPT_DELAY: u32 = + (10u32 * 60 * 1000).div_ceil(tributary_sdk::tendermint::TARGET_BLOCK_TIME); + + let blocks_till = u64::from(reattempt_attempt * BASE_REATTEMPT_DELAY); + let recognize_at = block_number + blocks_till; + + let queued = Reattempt::get(db, set, recognize_at); + assert!(queued.is_some(), "reattempt should be queued at block {recognize_at}"); + assert!( + queued.unwrap().contains(&reattempt_topic), + "reattempt queue should contain {reattempt_topic:?}" + ); + } + + // 7b: Succeeding topic should be recognized (weight set to 0). + if let Some(succeeding) = topic.succeeding_topic() { + assert_eq!( + AccumulatedWeight::get(db, set, succeeding), + Some(0), + "succeeding topic should be recognized with weight=0" + ); + } + + // 7c: Accumulated data cleanup depends on whether a reattempt exists. + // When no reattempt, the data is cleaned up in the collection loop. + let has_reattempt = topic.reattempt_topic().is_some(); + if has_reattempt { + assert_eq!( + Accumulated::>::get(db, set, topic, validator), + Some(data.clone()), + "data should be preserved when reattempt exists" + ); + } else { + assert!( + Accumulated::>::get(db, set, topic, validator).is_none(), + "data should be cleaned up when no reattempt" + ); + } + + // 7d: Result should be DataSet::Participating (validator just accumulated). + match result { + DataSet::Participating(data_set) => { + assert!(data_set.contains_key(&validator), "validator should be in result data set"); + assert_eq!(data_set.get(&validator).unwrap(), data, "result data should match input"); + } + DataSet::None => { + panic!( + "result should be Participating when threshold crossed by participating validator" + ); + } + } + } else { + // Branch 6: Below threshold - data stored, result is None. + assert!(matches!(result, DataSet::None), "result should be None when below threshold"); + assert_eq!( + Accumulated::>::get(db, set, topic, validator), + Some(data.clone()), + "accumulated data should be stored" + ); + } + } + + proptest! { + #![proptest_config(ProptestConfig::with_cases(1000))] + + #[test] + fn fuzz_accumulate( + has_initial_weight in any::(), + initial_weight in 0u16..u16::MAX, + total_weight in 1u16..u16::MAX, + + has_next_topic_weight in any::(), + next_topic_initial_weight in 0u16..u16::MAX, + + has_preceding_topic_accumulated in any::(), + + topic_variant in 0u8..5, + attempt in 0u32..100, + round in 0u8..2, + cosign_block in any::(), + batch_id in any::<[u8; 32]>(), + validator_weight in 1u16..u16::MAX, + block_number in 1u64..u64::MAX, + data in prop::collection::vec(any::(), 0..64), + + num_validators in 1u16..u16::MAX, + cur_validator in 0u16..u16::MAX, + ) { + let round = + if round == 0 { SigningProtocolRound::Preprocess } else { SigningProtocolRound::Share }; + + let topic = match topic_variant % 5 { + 0 => Topic::RemoveParticipant { participant: random_serai_address(&mut OsRng) }, + 1 => Topic::DkgConfirmation { attempt: attempt % 100, round }, + 2 => Topic::SlashReport, + 3 => Topic::Sign { + id: VariantSignId::Cosign(cosign_block), + attempt: attempt % 100, + round, + }, + _ => { + Topic::Sign { id: VariantSignId::Batch(batch_id), attempt: attempt % 100, round } + } + }; + + let mut db = MemDb::new(); + let set = default_set(); + + let validators: Vec = + (0 .. num_validators).map(|_i| random_serai_address(&mut OsRng)).collect(); + + let validator_weight = validator_weight.min(total_weight).max(1); + + let mut txn = db.txn(); + + if has_initial_weight { + AccumulatedWeight::set(&mut txn, set, topic, &initial_weight); + } + + if has_next_topic_weight { + if let Some(next_attempt_topic) = topic.next_attempt_topic() { + AccumulatedWeight::set(&mut txn, set, next_attempt_topic, &next_topic_initial_weight); + } + } + + let cur_validator = (cur_validator as usize) % validators.len(); + let validator = validators[cur_validator]; + + if has_preceding_topic_accumulated { + if let Some(preceding_topic) = topic.preceding_topic() { + Accumulated::set(&mut txn, set, preceding_topic, validator, &data) + } + } + + let pre_weight = AccumulatedWeight::get(&txn, set, topic); + let pre_slashed = TributaryDb::is_fatally_slashed(&txn, set, validator); + + let result = TributaryDb::accumulate::>( + &mut txn, + set, + &validators, + total_weight, + block_number, + topic, + validator, + validator_weight, + &data, + ); + + txn.commit(); + + verify_accumulate_invariants( + &db, + set, + total_weight, + block_number, + topic, + validator, + validator_weight, + &data, + pre_weight, + pre_slashed, + has_preceding_topic_accumulated, + has_next_topic_weight, + &result, + ); + } + } +} diff --git a/coordinator/tributary/src/tests/mod.rs b/coordinator/tributary/src/tests/mod.rs new file mode 100644 index 000000000..8f9f28c63 --- /dev/null +++ b/coordinator/tributary/src/tests/mod.rs @@ -0,0 +1,13 @@ +use serai_primitives::{ + network_id::ExternalNetworkId, + validator_sets::{ExternalValidatorSet, Session}, +}; + +pub mod transaction; +pub mod db; + +pub(crate) fn default_test_set() -> ExternalValidatorSet { + // The external validator set does not alter or affect the behavior of the functions being tested + // this can be used just as a default value any time + ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) } +} From 0905cb6996f51f916a8f385816bd78f4ab61c2fa Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Tue, 3 Mar 2026 16:41:35 -0300 Subject: [PATCH 29/71] feat(tributary): add initial scan_block.rs tests Identified bugs: - panic in Accumulated::::get - panic in assert!(slash_report.len() <= f); --- coordinator/tributary/Cargo.toml | 8 + coordinator/tributary/src/db.rs | 29 +- coordinator/tributary/src/lib.rs | 5 +- coordinator/tributary/src/tests/db.rs | 239 ++- coordinator/tributary/src/tests/mod.rs | 3 +- coordinator/tributary/src/tests/scan_block.rs | 1309 +++++++++++++++++ tests/substrate/Cargo.toml | 7 + tests/substrate/src/lib.rs | 15 + 8 files changed, 1577 insertions(+), 38 deletions(-) create mode 100644 coordinator/tributary/src/tests/scan_block.rs diff --git a/coordinator/tributary/Cargo.toml b/coordinator/tributary/Cargo.toml index 333f39aeb..4c5779f23 100644 --- a/coordinator/tributary/Cargo.toml +++ b/coordinator/tributary/Cargo.toml @@ -43,5 +43,13 @@ messages = { package = "serai-processor-messages", path = "../../processor/messa serai-log = { path = "../../common/log", version = "0.1.0" } +[dev-dependencies] +env_logger = { version = "0.10", default-features = false, features = ["humantime"] } +rand = { version = "0.8", default-features = false, features = ["std"] } +rand_chacha = { version = "0.3", default-features = false, features = ["std"] } +proptest = "1" +tributary-sdk = { path = "../tributary-sdk", features = ["tests"] } +serai-substrate-tests = { path = "../../tests/substrate" } + [features] longer-reattempts = [] diff --git a/coordinator/tributary/src/db.rs b/coordinator/tributary/src/db.rs index 1f5fa4d3e..12b291787 100644 --- a/coordinator/tributary/src/db.rs +++ b/coordinator/tributary/src/db.rs @@ -297,6 +297,12 @@ db_channel!( pub(crate) struct TributaryDb; impl TributaryDb { + pub(crate) fn last_handled_tributary_block( + getter: &impl Get, + set: ExternalValidatorSet, + ) -> Option<(u64, [u8; 32])> { + LastHandledTributaryBlock::get(getter, set) + } pub(crate) fn set_last_handled_tributary_block( txn: &mut impl DbTxn, set: ExternalValidatorSet, @@ -306,6 +312,25 @@ impl TributaryDb { LastHandledTributaryBlock::set(txn, set, &(block_number, block_hash)); } + pub(crate) fn latest_substrate_block_to_cosign( + getter: &impl Get, + set: ExternalValidatorSet, + ) -> Option { + LatestSubstrateBlockToCosign::get(getter, set) + } + pub(crate) fn set_latest_substrate_block_to_cosign( + txn: &mut impl DbTxn, + set: ExternalValidatorSet, + substrate_block_hash: BlockHash, + ) { + LatestSubstrateBlockToCosign::set(txn, set, &substrate_block_hash); + } + pub(crate) fn actively_cosigning( + txn: &mut impl DbTxn, + set: ExternalValidatorSet, + ) -> Option { + ActivelyCosigning::get(txn, set) + } pub(crate) fn start_cosigning( txn: &mut impl DbTxn, set: ExternalValidatorSet, @@ -431,7 +456,9 @@ impl TributaryDb { // Check if there's a preceding topic, this validator participated let preceding_topic = topic.preceding_topic(); if let Some(preceding_topic) = preceding_topic { - if Accumulated::::get(txn, set, preceding_topic, validator).is_none() { + // Use a raw key-existence check instead of `Accumulated::::get` because the preceding + // topic may have stored a different type (e.g. preprocess is [u8; 64], share is [u8; 32]) + if txn.get(Accumulated::::key(set, preceding_topic, validator)).is_none() { Self::fatal_slash( txn, set, diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index 5f63bb08a..0413fbe1e 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -472,13 +472,14 @@ impl ScanBlock<'_, TD, TDT, P> { } let amortized_slash_report = median_slash_report; - // Create the resulting slash report + // Create the resulting slash report, only including validators who have non-zero + // slash points after amortization let mut slash_report = vec![]; for points in amortized_slash_report { // TODO: Natively store this as a `Slash` if points == u32::MAX { slash_report.push(Slash::Fatal); - } else { + } else if points > 0 { slash_report.push(Slash::Points(points)); } } diff --git a/coordinator/tributary/src/tests/db.rs b/coordinator/tributary/src/tests/db.rs index 799e76526..2951c203a 100644 --- a/coordinator/tributary/src/tests/db.rs +++ b/coordinator/tributary/src/tests/db.rs @@ -1,23 +1,13 @@ use rand::{RngCore, rngs::OsRng}; -use serai_primitives::{ - address::SeraiAddress, - network_id::ExternalNetworkId, - validator_sets::{ExternalValidatorSet, Session}, -}; +use serai_primitives::{address::SeraiAddress, validator_sets::ExternalValidatorSet}; use messages::sign::VariantSignId; use serai_db::{Db, DbTxn, MemDb}; use serai_substrate_tests::random_serai_address; -use crate::{db::*, transaction::SigningProtocolRound}; - -fn default_set() -> ExternalValidatorSet { - // The external validator set does not change any functionality that is being tested - // use this as default - ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) } -} +use crate::{db::*, transaction::SigningProtocolRound, tests::default_test_validator_set}; fn all_topics() -> Vec { vec![ @@ -130,7 +120,7 @@ mod topic { #[test] fn sign_id() { - let set = default_set(); + let set = default_test_validator_set(); for topic in all_topics() { match topic { Topic::Sign { id, attempt, round: _ } => { @@ -143,7 +133,7 @@ mod topic { #[test] fn dkg_confirmation_sign_id() { - let set = default_set(); + let set = default_test_validator_set(); for topic in all_topics() { match topic { Topic::DkgConfirmation { attempt, round: _ } => assert_eq!( @@ -227,7 +217,7 @@ mod tributary_db { #[test] fn start_cosigning() { let mut db = MemDb::new(); - let set = default_set(); + let set = default_test_validator_set(); let block_hash1 = random_block_hash(&mut OsRng); let block_number1 = OsRng.next_u64(); @@ -312,7 +302,7 @@ mod tributary_db { #[test] fn db_start_of_block() { let _ = env_logger::try_init(); - let set = default_set(); + let set = default_test_validator_set(); let reattemptable_topics: Vec = all_topics() .into_iter() @@ -382,7 +372,7 @@ fn db_start_of_block() { #[test] fn db_fatal_slash() { let mut db = MemDb::new(); - let set = default_set(); + let set = default_test_validator_set(); let validator = random_serai_address(&mut OsRng); { @@ -395,6 +385,150 @@ fn db_fatal_slash() { assert_eq!(SlashPoints::get(&db, set, validator), Some(u32::MAX)); } +/// Tests for the preceding topic existence check in `TributaryDb::accumulate`. +mod accumulate_preceding_topic { + use super::*; + + /// Set up a DkgConfirmation Share topic (which has a Preprocess preceding topic), + /// with 3 validators of weight 1 each so `required_participation = 3`. + fn setup() -> (ExternalValidatorSet, Vec, u16, Topic, Topic, SeraiAddress) { + let set = default_test_validator_set(); + let validators: Vec = + (0 .. 3).map(|_| random_serai_address(&mut OsRng)).collect(); + let total_weight = 3u16; + + let share_topic = Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Share }; + let preprocess_topic = + Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Preprocess }; + assert_eq!(share_topic.preceding_topic(), Some(preprocess_topic)); + + let validator = validators[0]; + (set, validators, total_weight, share_topic, preprocess_topic, validator) + } + + #[test] + fn no_preceding_data_slashes_validator() { + let (set, validators, total_weight, share_topic, _preprocess_topic, validator) = setup(); + let mut db = MemDb::new(); + + { + let mut txn = db.txn(); + + // Recognize the share topic so the recognition check doesn't slash + TributaryDb::recognize_topic(&mut txn, set, share_topic); + + // Do NOT store any preceding preprocess data - the existence check should fail + let result = TributaryDb::accumulate::<[u8; 32]>( + &mut txn, + set, + &validators, + total_weight, + 1, + share_topic, + validator, + 1, + &[10u8; 32], + ); + txn.commit(); + + assert!(matches!(result, DataSet::None)); + } + + assert!(TributaryDb::is_fatally_slashed(&db, set, validator)); + } + + #[test] + fn different_type_stored_in_preceding_topic_passes_existence_check() { + let (set, validators, total_weight, share_topic, preprocess_topic, validator) = setup(); + let mut db = MemDb::new(); + + { + let mut txn = db.txn(); + + // Recognize the share topic so the recognition check doesn't slash + TributaryDb::recognize_topic(&mut txn, set, share_topic); + + // Store preceding preprocess data ([u8; 64]) + Accumulated::<[u8; 64]>::set(&mut txn, set, preprocess_topic, validator, &[1u8; 64]); + + // Accumulate a share ([u8; 32]) + // The preceding check should find the key despite the type mismatch and NOT slash. + let result = TributaryDb::accumulate::<[u8; 32]>( + &mut txn, + set, + &validators, + total_weight, + OsRng.next_u64(), + share_topic, + validator, + 1, + &[2u8; 32], + ); + txn.commit(); + + // Below threshold (1 of 3) so result is None but data is stored + assert!(matches!(result, DataSet::None)); + } + + assert_eq!(TributaryDb::is_fatally_slashed(&db, set, validator), false); + assert!(Accumulated::<[u8; 32]>::get(&db, set, share_topic, validator).is_some()); + } + + #[test] + fn same_type_stored_in_preceding_topic_still_works() { + let (set, validators, total_weight, _share_topic, _preprocess_topic, validator) = setup(); + + // Sign Share has a Sign Preprocess preceding topic, both use Vec> as D + let share_topic = Topic::Sign { + id: VariantSignId::Transaction([42; 32]), + attempt: 0, + round: SigningProtocolRound::Share, + }; + let preprocess_topic = Topic::Sign { + id: VariantSignId::Transaction([42; 32]), + attempt: 0, + round: SigningProtocolRound::Preprocess, + }; + assert_eq!(share_topic.preceding_topic(), Some(preprocess_topic)); + + let mut db = MemDb::new(); + + { + let mut txn = db.txn(); + + // Recognize both topics + TributaryDb::recognize_topic(&mut txn, set, preprocess_topic); + TributaryDb::recognize_topic(&mut txn, set, share_topic); + + // Store preceding data with the same type as share will use + let preprocess_data: Vec> = vec![vec![1, 2, 3]]; + Accumulated::set(&mut txn, set, preprocess_topic, validator, &preprocess_data); + + let share_data: Vec> = vec![vec![4, 5, 6]]; + let result = TributaryDb::accumulate::>>( + &mut txn, + set, + &validators, + total_weight, + OsRng.next_u64(), + share_topic, + validator, + 1, + &share_data, + ); + txn.commit(); + + assert!(matches!(result, DataSet::None)); + assert_eq!( + Accumulated::>>::get(&db, set, share_topic, validator), + Some(share_data) + ); + } + + assert!(!TributaryDb::is_fatally_slashed(&db, set, validator)); + } +} + mod fuzz { use super::*; use proptest::prelude::*; @@ -417,6 +551,7 @@ mod fuzz { pre_slashed: bool, has_preceding_accumulated: bool, has_next_topic_weight: bool, + validator_in_list: bool, result: &DataSet>, ) { let required = required_participation(total_weight); @@ -458,7 +593,7 @@ mod fuzz { // Branch 4: Already accumulated past the threshold - NOP. if weight_before >= required { assert!(matches!(result, DataSet::None)); - assert_eq!(post_weight, Some(weight_before), "weight unchanged when past threshold"); + assert_eq!(post_weight, pre_weight, "weight unchanged when past threshold"); if !pre_slashed { assert!(!post_slashed, "should not be slashed on threshold NOP"); } @@ -466,10 +601,11 @@ mod fuzz { } // Branch 5: Old attempt - the next attempt's topic already has weight. + // Note: pre_weight may be None (topic not yet recognized) which is preserved. let next_attempt_superseded = has_next_topic_weight && topic.next_attempt_topic().is_some(); if next_attempt_superseded { assert!(matches!(result, DataSet::None)); - assert_eq!(post_weight, Some(weight_before), "weight unchanged for superseded attempt"); + assert_eq!(post_weight, pre_weight, "weight unchanged for superseded attempt"); if !pre_slashed { assert!(!post_slashed, "should not be slashed on superseded NOP"); } @@ -517,31 +653,57 @@ mod fuzz { } // 7c: Accumulated data cleanup depends on whether a reattempt exists. - // When no reattempt, the data is cleaned up in the collection loop. + // The cleanup loop only iterates the `validators` slice, so data for a validator + // not in the list is never deleted regardless of reattempt status. let has_reattempt = topic.reattempt_topic().is_some(); - if has_reattempt { + if has_reattempt || !validator_in_list { assert_eq!( Accumulated::>::get(db, set, topic, validator), Some(data.clone()), - "data should be preserved when reattempt exists" + "data should be preserved (reattempt={has_reattempt}, in_list={validator_in_list})" ); } else { assert!( Accumulated::>::get(db, set, topic, validator).is_none(), - "data should be cleaned up when no reattempt" + "data should be cleaned up when no reattempt and validator in list" ); } - // 7d: Result should be DataSet::Participating (validator just accumulated). - match result { - DataSet::Participating(data_set) => { - assert!(data_set.contains_key(&validator), "validator should be in result data set"); - assert_eq!(data_set.get(&validator).unwrap(), data, "result data should match input"); + // 7d: Result depends on whether the validator was in the collection list. + // The collection loop only gathers data from the `validators` slice. + // `participated` = data_set.contains_key(&validator), which is false when + // the validator is not in the slice. + if validator_in_list { + match result { + DataSet::Participating(data_set) => { + assert!(data_set.contains_key(&validator), "validator should be in result data set"); + assert_eq!(data_set.get(&validator).unwrap(), data, "result data should match input"); + } + DataSet::None => { + panic!("result should be Participating when threshold crossed by listed validator"); + } } - DataSet::None => { - panic!( - "result should be Participating when threshold crossed by participating validator" - ); + } else { + match topic.participating() { + Participating::Participated => { + // Validator accumulated but isn't in the list, so participated=false + assert!(matches!(result, DataSet::None), "Participated + not in list => None"); + } + Participating::Everyone => { + // Everyone always returns Participating, but the validator's data won't + // be in the set (it was only collected from the validators slice) + match result { + DataSet::Participating(data_set) => { + assert!( + !data_set.contains_key(&validator), + "validator not in list should not appear in data set" + ); + } + DataSet::None => { + panic!("Everyone topics always return Participating"); + } + } + } } } } else { @@ -580,6 +742,7 @@ mod fuzz { num_validators in 1u16..u16::MAX, cur_validator in 0u16..u16::MAX, + validator_in_list in any::(), ) { let round = if round == 0 { SigningProtocolRound::Preprocess } else { SigningProtocolRound::Share }; @@ -599,7 +762,7 @@ mod fuzz { }; let mut db = MemDb::new(); - let set = default_set(); + let set = default_test_validator_set(); let validators: Vec = (0 .. num_validators).map(|_i| random_serai_address(&mut OsRng)).collect(); @@ -618,8 +781,15 @@ mod fuzz { } } + // When validator_in_list is false, the accumulating validator is an outsider + // not present in the validators slice. This exercises the `participated = false` + // branch when the threshold is crossed. let cur_validator = (cur_validator as usize) % validators.len(); - let validator = validators[cur_validator]; + let validator = if validator_in_list { + validators[cur_validator] + } else { + random_serai_address(&mut OsRng) + }; if has_preceding_topic_accumulated { if let Some(preceding_topic) = topic.preceding_topic() { @@ -657,6 +827,7 @@ mod fuzz { pre_slashed, has_preceding_topic_accumulated, has_next_topic_weight, + validator_in_list, &result, ); } diff --git a/coordinator/tributary/src/tests/mod.rs b/coordinator/tributary/src/tests/mod.rs index 8f9f28c63..5cb667131 100644 --- a/coordinator/tributary/src/tests/mod.rs +++ b/coordinator/tributary/src/tests/mod.rs @@ -5,8 +5,9 @@ use serai_primitives::{ pub mod transaction; pub mod db; +pub mod scan_block; -pub(crate) fn default_test_set() -> ExternalValidatorSet { +pub(crate) fn default_test_validator_set() -> ExternalValidatorSet { // The external validator set does not alter or affect the behavior of the functions being tested // this can be used just as a default value any time ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) } diff --git a/coordinator/tributary/src/tests/scan_block.rs b/coordinator/tributary/src/tests/scan_block.rs new file mode 100644 index 000000000..101fc284e --- /dev/null +++ b/coordinator/tributary/src/tests/scan_block.rs @@ -0,0 +1,1309 @@ +use core::marker::PhantomData; +use std::collections::HashMap; +use rand::{Rng, RngCore}; +use rand_core::OsRng; +use serai_substrate_tests::{random_block_hash, random_serai_address}; + +use ciphersuite::group::{GroupEncoding}; +use dalek_ff_group::RistrettoPoint; + +use serai_primitives::address::SeraiAddress; + +use messages::sign::VariantSignId; + +use dkg::Participant; + +use serai_db::{Db, DbTxn, MemDb}; + +use serai_cosign_types::CosignIntent; +use serai_coordinator_substrate::NewSetInformation; + +use tributary_sdk::{Block, BlockHeader, Transaction as TributaryTransaction, P2p}; + +use crate::{ + db::{ + AccumulatedWeight, ActivelyCosigning, CosignIntents as DbCosignIntents, + LatestSubstrateBlockToCosign, Topic, TributaryDb, + }, + tests::default_test_validator_set, +}; +use crate::transaction::{SigningProtocolRound, Signed, Transaction}; +use crate::{CosignIntents, DkgConfirmationMessages, ProcessorMessages, ScanBlock, SubstrateBlockPlans}; + +#[derive(Clone)] +struct MockP2p; +impl P2p for MockP2p { + fn broadcast(&self, _: [u8; 32], _: Vec) -> impl Send + core::future::Future { + async move { unimplemented!() } + } +} + +fn get_test_validators_and_weights_setup( +) -> (Vec<(SeraiAddress, u16)>, Vec, HashMap, u16) { + let validator_data = vec![ + (random_serai_address(&mut OsRng), 1u16), + (random_serai_address(&mut OsRng), 1), + (random_serai_address(&mut OsRng), 1), + ]; + let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); + + let mut weights = HashMap::new(); + for (address, weight) in &validator_data { + weights.insert(*address, *weight); + } + + (validator_data, validators, weights, 3) +} + +fn new_test_set_info(validators: &[(SeraiAddress, u16)]) -> NewSetInformation { + let mut participant_indexes = HashMap::new(); + let mut reverse_lookup = HashMap::new(); + let mut i = 1u16; + for (address, weight) in validators { + let mut indices = Vec::new(); + for _ in 0 .. *weight { + let p = Participant::new(i).unwrap(); + indices.push(p); + reverse_lookup.insert(p, *address); + i += 1; + } + participant_indexes.insert(*address, indices); + } + + NewSetInformation { + set: default_test_validator_set(), + serai_block: random_block_hash(&mut OsRng).0, + declaration_time: OsRng.next_u64(), + threshold: OsRng.gen_range(0 ..= u16::MAX), + validators: validators.to_vec(), + evrf_public_keys: vec![], + participant_indexes, + participant_indexes_reverse_lookup: reverse_lookup, + } +} + +fn make_scan_block<'a, TDT: DbTxn>( + txn: &'a mut TDT, + set_info: &'a NewSetInformation, + validators: &'a [SeraiAddress], + total_weight: u16, + validator_weights: &'a HashMap, +) -> ScanBlock<'a, MemDb, TDT, MockP2p> { + ScanBlock { + _td: PhantomData, + _p2p: PhantomData, + tributary_txn: txn, + set: set_info, + validators, + total_weight, + validator_weights, + } +} + +/// Generate a random Ristretto key and the corresponding SeraiAddress. +fn random_validator_key() -> (RistrettoPoint, SeraiAddress) { + let key = RistrettoPoint::random(&mut OsRng); + let address = SeraiAddress(key.to_bytes()); + (key, address) +} + +/// Create a Signed with the given signer key and a dummy signature. +fn make_signed(signer: RistrettoPoint) -> Signed { + Signed { signer, ..Signed::default() } +} + +mod potentially_start_cosign { + use super::*; + + #[test] + fn potentially_start_cosign() { + let set = default_test_validator_set(); + let (validator_data, validators, weights, total_weight) = + get_test_validators_and_weights_setup(); + let set_info = new_test_set_info(&validator_data); + + // No TributaryDb::latest_substrate_block_to_cosign block: no-op + { + let mut db = MemDb::new(); + let mut txn = db.txn(); + { + let mut scan_block = + make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.potentially_start_cosign(); + } + assert!(ActivelyCosigning::get(&mut txn, set).is_none()); + } + + // Already cosigning: should not replace the actively cosigning block + { + let mut db = MemDb::new(); + let initial_block_hash = random_block_hash(&mut OsRng); + + { + let mut txn = db.txn(); + TributaryDb::start_cosigning(&mut txn, set, initial_block_hash, OsRng.next_u64()); + let new_block_hash = random_block_hash(&mut OsRng); + LatestSubstrateBlockToCosign::set(&mut txn, set, &new_block_hash); + txn.commit(); + } + + let mut txn = db.txn(); + { + let mut scan_block = + make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.potentially_start_cosign(); + } + + // Did not replace initial_block_hash for new_block_hash + assert_eq!(ActivelyCosigning::get(&mut txn, set), Some(initial_block_hash)); + } + + // Already cosigned: no-op + { + let mut db = MemDb::new(); + let initial_block_hash = random_block_hash(&mut OsRng); + + { + let mut txn = db.txn(); + LatestSubstrateBlockToCosign::set(&mut txn, set, &initial_block_hash); + TributaryDb::mark_cosigned(&mut txn, set, initial_block_hash); + txn.commit(); + } + + let mut txn = db.txn(); + { + let mut scan_block = + make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.potentially_start_cosign(); + } + + assert!(ActivelyCosigning::get(&mut txn, set).is_none()); + } + + // Ready to cosign: starts cosigning and sends processor message + { + let mut db = MemDb::new(); + let block_hash = random_block_hash(&mut OsRng); + let mut global_session = [0; 32]; + OsRng.fill_bytes(global_session.as_mut()); + + let intent = + CosignIntent { global_session, block_number: OsRng.next_u64(), block_hash, notable: false }; + + { + let mut txn = db.txn(); + LatestSubstrateBlockToCosign::set(&mut txn, set, &block_hash); + CosignIntents::provide(&mut txn, set, &intent); + txn.commit(); + } + + let mut txn = db.txn(); + { + let mut scan_block = + make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.potentially_start_cosign(); + } + + assert_eq!(ActivelyCosigning::get(&mut txn, set), Some(block_hash)); + assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); + } + } + + #[test] + #[should_panic(expected = "provided CosignIntent wasn't saved by its block hash")] + fn potentially_start_cosign_panics_on_differing_intent_blockhash() { + let set = default_test_validator_set(); + let (validator_data, validators, weights, total_weight) = + get_test_validators_and_weights_setup(); + let set_info = new_test_set_info(&validator_data); + + let mut db = MemDb::new(); + let block_hash = random_block_hash(&mut OsRng); + let mut global_session = [0; 32]; + OsRng.fill_bytes(global_session.as_mut()); + + { + let mut txn = db.txn(); + LatestSubstrateBlockToCosign::set(&mut txn, set, &block_hash); + + let new_block_hash = random_block_hash(&mut OsRng); + DbCosignIntents::set( + &mut txn, + set, + // Store the intent under block_hash (the key `CosignIntents::take` will look up) + block_hash, + &CosignIntent { + global_session, + block_number: OsRng.next_u64(), + // but the intent's block_hash field is a new_block_hash + block_hash: new_block_hash, // triggering the assert_eq!(intent.block_hash, latest_substrate_block_to_cosign) panic + notable: false, + }, + ); + txn.commit(); + } + + { + let mut txn = db.txn(); + let mut scan_block = + make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.potentially_start_cosign(); + } + } +} + +#[test] +fn accumulate_dkg_confirmation() { + // Use 3 validators with weight 1 each so threshold math is deterministic: + // total_weight = 3, required_participation = 3 * 2 = 6 / 3 = 2 + 1 = 3 + let v0 = random_serai_address(&mut OsRng); + let v1 = random_serai_address(&mut OsRng); + let v2 = random_serai_address(&mut OsRng); + let validator_data = vec![(v0, 1u16), (v1, 1), (v2, 1)]; + let validators = vec![v0, v1, v2]; + let weights: HashMap = validator_data.iter().copied().collect(); + let total_weight = 3u16; + let set_info = new_test_set_info(&validator_data); + let set = set_info.set; + let topic = Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Preprocess }; + + // Below threshold: returns None until enough weight accumulates + { + let mut db = MemDb::new(); + let mut txn = db.txn(); + + let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + + let block_number = OsRng.next_u64(); + + let mut data0 = vec![0u8; 4]; + OsRng.fill_bytes(&mut data0); + + assert!(scan_block.accumulate_dkg_confirmation(block_number, topic, &data0, v0).is_none()); + + let mut data1 = vec![0u8; 4]; + OsRng.fill_bytes(&mut data1); + + assert!(scan_block.accumulate_dkg_confirmation(block_number, topic, &data1, v1).is_none()); + + txn.commit(); + } + + // Threshold crossed: third accumulation returns SignId + correctly mapped data + { + let mut db = MemDb::new(); + let mut txn = db.txn(); + + let mut data0 = vec![0u8; 4]; + OsRng.fill_bytes(&mut data0); + let mut data1 = vec![0u8; 4]; + OsRng.fill_bytes(&mut data1); + let mut data2 = vec![0u8; 4]; + OsRng.fill_bytes(&mut data2); + + let result; + { + let mut scan_block = + make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + + assert!(scan_block.accumulate_dkg_confirmation(1, topic, &data0, v0).is_none()); + assert!(scan_block.accumulate_dkg_confirmation(1, topic, &data1, v1).is_none()); + + result = scan_block.accumulate_dkg_confirmation(1, topic, &data2, v2); + } + let (sign_id, data_set) = result.unwrap(); + + // SignId must match what dkg_confirmation_sign_id produces + assert_eq!(sign_id, topic.dkg_confirmation_sign_id(set).unwrap()); + + // Participants are 1-indexed by list position, not by weight-based indices + assert_eq!(data_set.len(), 3); + assert_eq!(data_set[&Participant::new(1).unwrap()], data0); + assert_eq!(data_set[&Participant::new(2).unwrap()], data1); + assert_eq!(data_set[&Participant::new(3).unwrap()], data2); + } + + // Past threshold: further accumulations are no-ops + { + let mut db = MemDb::new(); + let mut txn = db.txn(); + + let mut data0 = vec![0u8; 4]; + OsRng.fill_bytes(&mut data0); + let mut data1 = vec![0u8; 4]; + OsRng.fill_bytes(&mut data1); + let mut data2 = vec![0u8; 4]; + OsRng.fill_bytes(&mut data2); + let mut data_extra = vec![0u8; 4]; + OsRng.fill_bytes(&mut data_extra); + + { + let mut scan_block = + make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.accumulate_dkg_confirmation(1, topic, &data0, v0); + scan_block.accumulate_dkg_confirmation(1, topic, &data1, v1); + scan_block.accumulate_dkg_confirmation(1, topic, &data2, v2); + + // Already past threshold - this returns None + assert!(scan_block.accumulate_dkg_confirmation(1, topic, &data_extra, v0).is_none()); + } + } +} + +mod handle_application_tx { + use super::*; + + #[test] + fn dont_handle_from_fatally_slashed() { + let set = default_test_validator_set(); + let (validator_data, validators, weights, total_weight) = + get_test_validators_and_weights_setup(); + let set_info = new_test_set_info(&validator_data); + let default_signer = SeraiAddress(Signed::default().signer().to_bytes()); + + let mut db = MemDb::new(); + + // Don't handle transactions from those fatally slashed. + { + let mut txn = db.txn(); + TributaryDb::fatal_slash(&mut txn, set, default_signer, "test reason"); + txn.commit(); + } + + let mut txn = db.txn(); + let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + + scan_block.handle_application_tx( + OsRng.next_u64(), + Transaction::DkgParticipation { participation: vec![1, 2, 3], signed: Signed::default() }, + ); + + assert!(ProcessorMessages::try_recv(&mut txn, set).is_none()); + } + + #[test] + fn handle_remove_participant_tx_type() { + let set = default_test_validator_set(); + let (validator_data, validators, weights, total_weight) = + get_test_validators_and_weights_setup(); + let set_info = new_test_set_info(&validator_data); + let default_signer = SeraiAddress(Signed::default().signer().to_bytes()); + + // The signer is fatally slashed if the participant voted to be removed is nonexistent + { + let mut db = MemDb::new(); + let mut txn = db.txn(); + let mut scan_block = + make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + + let nonexistent = random_serai_address(&mut OsRng); + + scan_block.handle_application_tx( + OsRng.next_u64(), + Transaction::RemoveParticipant { participant: nonexistent, signed: Signed::default() }, + ); + + assert!(TributaryDb::is_fatally_slashed(&mut txn, set, default_signer)); + } + + // Valid RemoveParticipant with a signer who IS a validator accumulates weight + { + // Fresh db so the signer isn't fatally slashed from the sub-test above + let mut db = MemDb::new(); + let mut txn = db.txn(); + + // Generate a signer that's actually in the validator set + let (signer_key, signer_addr) = random_validator_key(); + let signer_weight = 1u16; + + let mut extended_validator_data = validator_data.clone(); + extended_validator_data.push((signer_addr, signer_weight)); + let extended_validators: Vec = + extended_validator_data.iter().map(|(a, _)| *a).collect(); + let mut extended_weights = weights.clone(); + extended_weights.insert(signer_addr, signer_weight); + let extended_total_weight = total_weight + signer_weight; + let extended_set_info = new_test_set_info(&extended_validator_data); + + let mut scan_block = make_scan_block( + &mut txn, + &extended_set_info, + &extended_validators, + extended_total_weight, + &extended_weights, + ); + + // Target one of the original validators (not the signer) + let target = validators[OsRng.gen_range(0 ..= validators.len() - 1)]; + + scan_block.handle_application_tx( + OsRng.next_u64(), + Transaction::RemoveParticipant { participant: target, signed: make_signed(signer_key) }, + ); + + assert!(AccumulatedWeight::get( + &mut txn, + set, + Topic::RemoveParticipant { participant: target } + ) + .is_some()); + } + + // When enough validators vote to remove a participant, the threshold is crossed + // and the participant is fatally slashed (DataSet::Participating branch) + { + let mut db = MemDb::new(); + let mut txn = db.txn(); + + // All 3 validators need real keys so they can sign + let (key0, addr0) = random_validator_key(); + let (key1, addr1) = random_validator_key(); + let (key2, addr2) = random_validator_key(); + + let validator_data = vec![(addr0, 1u16), (addr1, 1), (addr2, 1)]; + let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); + let weights: HashMap = validator_data.iter().copied().collect(); + let set_info = new_test_set_info(&validator_data); + + let target = addr0; + let block_number = OsRng.next_u64(); + + // First two votes accumulate but don't cross the threshold + { + let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 3, &weights); + scan_block.handle_application_tx( + block_number, + Transaction::RemoveParticipant { participant: target, signed: make_signed(key1) }, + ); + scan_block.handle_application_tx( + block_number, + Transaction::RemoveParticipant { participant: target, signed: make_signed(key2) }, + ); + } + assert!(!TributaryDb::is_fatally_slashed(&mut txn, set, target)); + + // Third vote crosses the threshold — target gets fatally slashed + { + let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 3, &weights); + scan_block.handle_application_tx( + block_number, + Transaction::RemoveParticipant { participant: target, signed: make_signed(key0) }, + ); + } + assert!(TributaryDb::is_fatally_slashed(&mut txn, set, target)); + } + } + + #[test] + fn handle_dkg_participation_tx_type() { + let mut db = MemDb::new(); + let set = default_test_validator_set(); + + // Use a real validator key so the signer exists in participant_indexes + let (signer_key, signer_addr) = random_validator_key(); + let validator_data = vec![ + (signer_addr, 1u16), + (random_serai_address(&mut OsRng), 1), + (random_serai_address(&mut OsRng), 1), + ]; + let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); + let weights: HashMap = validator_data.iter().copied().collect(); + let set_info = new_test_set_info(&validator_data); + + let mut txn = db.txn(); + let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 3, &weights); + + scan_block.handle_application_tx( + OsRng.next_u64(), + Transaction::DkgParticipation { + participation: vec![1, 2, 3], + signed: make_signed(signer_key), + }, + ); + + assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); + } + + #[test] + fn handle_dkg_confirmation_preprocess_tx_type() { + let set = default_test_validator_set(); + + let (key0, addr0) = random_validator_key(); + let (key1, addr1) = random_validator_key(); + let (key2, addr2) = random_validator_key(); + let validator_data = vec![(addr0, 1u16), (addr1, 1), (addr2, 1)]; + let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); + let weights: HashMap = validator_data.iter().copied().collect(); + let set_info = new_test_set_info(&validator_data); + + // Below threshold: no DkgConfirmationMessages sent + { + let mut db = MemDb::new(); + let mut txn = db.txn(); + let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 3, &weights); + + scan_block.handle_application_tx( + OsRng.next_u64(), + Transaction::DkgConfirmationPreprocess { + attempt: OsRng.next_u32(), + preprocess: [1u8; 64], + signed: make_signed(key0), + }, + ); + + assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_none()); + } + + // Threshold crossed: sends DkgConfirmationMessages (Preprocesses) + { + let mut db = MemDb::new(); + let mut txn = db.txn(); + { + let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 3, &weights); + for (key, preprocess) in [(key0, [1u8; 64]), (key1, [2u8; 64]), (key2, [3u8; 64])] { + scan_block.handle_application_tx( + 1, + Transaction::DkgConfirmationPreprocess { + attempt: 0, + preprocess, + signed: make_signed(key), + }, + ); + } + } + + assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_some()); + } + } + + #[test] + fn handle_dkg_confirmation_share_tx_type() { + let set = default_test_validator_set(); + + let (key0, addr0) = random_validator_key(); + let (_, addr1) = random_validator_key(); + let (_, addr2) = random_validator_key(); + let validator_data = vec![(addr0, 1u16), (addr1, 1), (addr2, 1)]; + let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); + let weights: HashMap = validator_data.iter().copied().collect(); + let set_info = new_test_set_info(&validator_data); + + // Share without preceding preprocess participation → fatal slash + // (the accumulate preceding_topic check slashes the signer) + { + let mut db = MemDb::new(); + let mut txn = db.txn(); + let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 3, &weights); + + scan_block.handle_application_tx( + 1, + Transaction::DkgConfirmationShare { + attempt: 0, + share: [10u8; 32], + signed: make_signed(key0), + }, + ); + + assert!(TributaryDb::is_fatally_slashed(&mut txn, set, addr0)); + } + } + + /// Verify that the full preprocess→share flow works for DkgConfirmation. + /// + /// Previously, this panicked because `accumulate<[u8; 32]>` (share) used typed deserialization + /// on the preceding preprocess topic stored as `[u8; 64]`. Fixed by using a raw key-existence + /// check for the preceding topic instead. + #[test] + fn dkg_confirmation_preprocess_then_share_flow() { + let set = default_test_validator_set(); + + let (key0, addr0) = random_validator_key(); + let (key1, addr1) = random_validator_key(); + let (key2, addr2) = random_validator_key(); + let validator_data = vec![(addr0, 1u16), (addr1, 1), (addr2, 1)]; + let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); + let weights: HashMap = validator_data.iter().copied().collect(); + let set_info = new_test_set_info(&validator_data); + + let mut db = MemDb::new(); + let mut txn = db.txn(); + + // All 3 validators submit preprocesses (threshold crossed → DkgConfirmationMessages sent) + { + let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 3, &weights); + for (key, preprocess) in [(key0, [1u8; 64]), (key1, [2u8; 64]), (key2, [3u8; 64])] { + scan_block.handle_application_tx( + 1, + Transaction::DkgConfirmationPreprocess { + attempt: 0, + preprocess, + signed: make_signed(key), + }, + ); + } + } + assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_some()); + + // All 3 validators submit shares (threshold crossed → DkgConfirmationMessages sent) + { + let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 3, &weights); + for (key, share) in [(key0, [10u8; 32]), (key1, [20u8; 32]), (key2, [30u8; 32])] { + scan_block.handle_application_tx( + 1, + Transaction::DkgConfirmationShare { attempt: 0, share, signed: make_signed(key) }, + ); + } + } + assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_some()); + } + + #[test] + fn handle_cosign_tx_type() { + let set = default_test_validator_set(); + let (validator_data, validators, weights, total_weight) = + get_test_validators_and_weights_setup(); + let set_info = new_test_set_info(&validator_data); + + let block_hash = random_block_hash(&mut OsRng); + let mut global_session = [0u8; 32]; + OsRng.fill_bytes(&mut global_session); + + let intent = + CosignIntent { global_session, block_number: OsRng.next_u64(), block_hash, notable: false }; + + // Sets LatestSubstrateBlockToCosign and starts cosigning + { + let mut db = MemDb::new(); + { + let mut txn = db.txn(); + CosignIntents::provide(&mut txn, set, &intent); + txn.commit(); + } + + let mut txn = db.txn(); + let mut scan_block = + make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + + scan_block.handle_application_tx(1, Transaction::Cosign { substrate_block_hash: block_hash }); + + assert_eq!(LatestSubstrateBlockToCosign::get(&mut txn, set), Some(block_hash)); + assert_eq!(ActivelyCosigning::get(&mut txn, set), Some(block_hash)); + assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); + } + + // When already cosigning, updates LatestSubstrateBlockToCosign but doesn't replace active + { + let mut db = MemDb::new(); + let first_hash = random_block_hash(&mut OsRng); + let second_hash = random_block_hash(&mut OsRng); + + { + let mut txn = db.txn(); + TributaryDb::start_cosigning(&mut txn, set, first_hash, 1); + txn.commit(); + } + + let mut txn = db.txn(); + let mut scan_block = + make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + + scan_block + .handle_application_tx(1, Transaction::Cosign { substrate_block_hash: second_hash }); + + assert_eq!(LatestSubstrateBlockToCosign::get(&mut txn, set), Some(second_hash)); + assert_eq!(ActivelyCosigning::get(&mut txn, set), Some(first_hash)); + } + } + + #[test] + fn handle_cosigned_tx_type() { + let set = default_test_validator_set(); + let (validator_data, validators, weights, total_weight) = + get_test_validators_and_weights_setup(); + let set_info = new_test_set_info(&validator_data); + + // Marks block as cosigned + { + let mut db = MemDb::new(); + let block_hash = random_block_hash(&mut OsRng); + let mut txn = db.txn(); + + assert!(!TributaryDb::cosigned(&mut txn, set, block_hash)); + + let mut scan_block = + make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + + scan_block + .handle_application_tx(1, Transaction::Cosigned { substrate_block_hash: block_hash }); + + assert!(TributaryDb::cosigned(&mut txn, set, block_hash)); + } + + // Finishes active cosign when matching block + { + let mut db = MemDb::new(); + let block_hash = random_block_hash(&mut OsRng); + + { + let mut txn = db.txn(); + TributaryDb::start_cosigning(&mut txn, set, block_hash, 1); + txn.commit(); + } + + let mut txn = db.txn(); + assert_eq!(ActivelyCosigning::get(&mut txn, set), Some(block_hash)); + + let mut scan_block = + make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + + scan_block + .handle_application_tx(1, Transaction::Cosigned { substrate_block_hash: block_hash }); + + assert!(ActivelyCosigning::get(&mut txn, set).is_none()); + } + + // Does not finish active cosign when block doesn't match + { + let mut db = MemDb::new(); + let active_hash = random_block_hash(&mut OsRng); + let other_hash = random_block_hash(&mut OsRng); + + { + let mut txn = db.txn(); + TributaryDb::start_cosigning(&mut txn, set, active_hash, 1); + txn.commit(); + } + + let mut txn = db.txn(); + let mut scan_block = + make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + + scan_block + .handle_application_tx(1, Transaction::Cosigned { substrate_block_hash: other_hash }); + + assert_eq!(ActivelyCosigning::get(&mut txn, set), Some(active_hash)); + assert!(TributaryDb::cosigned(&mut txn, set, other_hash)); + } + } + + #[test] + fn handle_substrate_block_tx_type() { + let set = default_test_validator_set(); + let (validator_data, validators, weights, total_weight) = + get_test_validators_and_weights_setup(); + let set_info = new_test_set_info(&validator_data); + + let mut db = MemDb::new(); + let block_hash = random_block_hash(&mut OsRng); + let plans = vec![[10u8; 32], [20u8; 32]]; + + { + let mut txn = db.txn(); + SubstrateBlockPlans::set(&mut txn, set, block_hash, &plans); + txn.commit(); + } + + let mut txn = db.txn(); + let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + + scan_block.handle_application_tx(1, Transaction::SubstrateBlock { hash: block_hash }); + + for plan in &plans { + let topic = Topic::Sign { + id: VariantSignId::Transaction(*plan), + attempt: 0, + round: SigningProtocolRound::Preprocess, + }; + assert!(AccumulatedWeight::get(&mut txn, set, topic).is_some()); + } + } + + #[test] + fn handle_batch_tx_type() { + let set = default_test_validator_set(); + let (validator_data, validators, weights, total_weight) = + get_test_validators_and_weights_setup(); + let set_info = new_test_set_info(&validator_data); + + let mut db = MemDb::new(); + let batch_hash = [42u8; 32]; + + let mut txn = db.txn(); + let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + + scan_block.handle_application_tx(1, Transaction::Batch { hash: batch_hash }); + + let topic = Topic::Sign { + id: VariantSignId::Batch(batch_hash), + attempt: 0, + round: SigningProtocolRound::Preprocess, + }; + assert!(AccumulatedWeight::get(&mut txn, set, topic).is_some()); + } + + #[test] + fn handle_sign_tx_type() { + let set = default_test_validator_set(); + + let (key0, addr0) = random_validator_key(); + let (key1, addr1) = random_validator_key(); + let (key2, addr2) = random_validator_key(); + let validator_data = vec![(addr0, 1u16), (addr1, 1), (addr2, 1)]; + let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); + let weights: HashMap = validator_data.iter().copied().collect(); + let set_info = new_test_set_info(&validator_data); + + let sign_id = VariantSignId::Transaction([42; 32]); + let topic = Topic::Sign { id: sign_id, attempt: 0, round: SigningProtocolRound::Preprocess }; + + // Wrong data length: signer has weight 1 but submits 2 entries → fatal slash + { + let mut db = MemDb::new(); + let mut txn = db.txn(); + TributaryDb::recognize_topic(&mut txn, set, topic); + + let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 3, &weights); + scan_block.handle_application_tx( + 1, + Transaction::Sign { + id: sign_id, + attempt: 0, + round: SigningProtocolRound::Preprocess, + data: vec![vec![1], vec![2]], + signed: make_signed(key0), + }, + ); + + assert!(TributaryDb::is_fatally_slashed(&mut txn, set, addr0)); + } + + // Valid data: threshold crossing sends ProcessorMessage + { + let mut db = MemDb::new(); + let mut txn = db.txn(); + TributaryDb::recognize_topic(&mut txn, set, topic); + + { + let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 3, &weights); + for key in [key0, key1, key2] { + scan_block.handle_application_tx( + 1, + Transaction::Sign { + id: sign_id, + attempt: 0, + round: SigningProtocolRound::Preprocess, + data: vec![vec![1, 2, 3]], + signed: make_signed(key), + }, + ); + } + } + + assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); + } + } + + #[test] + fn handle_slash_report_tx_type() { + let set = default_test_validator_set(); + + let (key0, addr0) = random_validator_key(); + let (_, addr1) = random_validator_key(); + let (_, addr2) = random_validator_key(); + let validator_data = vec![(addr0, 1u16), (addr1, 1), (addr2, 1)]; + let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); + let weights: HashMap = validator_data.iter().copied().collect(); + let set_info = new_test_set_info(&validator_data); + + // Wrong length: 3 validators but only 2 slash points → fatal slash + { + let mut db = MemDb::new(); + let mut txn = db.txn(); + let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 3, &weights); + + scan_block.handle_application_tx( + 1, + Transaction::SlashReport { slash_points: vec![0, 0], signed: make_signed(key0) }, + ); + + assert!(TributaryDb::is_fatally_slashed(&mut txn, set, addr0)); + } + + // Valid length: accumulates weight + { + let mut db = MemDb::new(); + let mut txn = db.txn(); + let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 3, &weights); + + scan_block.handle_application_tx( + 1, + Transaction::SlashReport { slash_points: vec![0, 0, 0], signed: make_signed(key0) }, + ); + + assert!(AccumulatedWeight::get(&mut txn, set, Topic::SlashReport).is_some()); + } + + // Threshold crossed: computes median slash report and sends SignSlashReport message. + // Uses 4 validators so f = (4-1)/3 = 1, allowing up to 1 slashed validator. + { + let mut db = MemDb::new(); + let mut txn = db.txn(); + + let (key0, addr0) = random_validator_key(); + let (key1, addr1) = random_validator_key(); + let (key2, addr2) = random_validator_key(); + let (_, addr3) = random_validator_key(); + let validator_data = vec![(addr0, 1u16), (addr1, 1), (addr2, 1), (addr3, 1)]; + let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); + let weights: HashMap = validator_data.iter().copied().collect(); + let set_info = new_test_set_info(&validator_data); + + // Each reporter says: first 3 validators have 0 points, 4th has 100 + // required_participation = 4*2/3+1 = 3, so 3 submissions cross the threshold + { + let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 4, &weights); + for key in [key0, key1, key2] { + scan_block.handle_application_tx( + 1, + Transaction::SlashReport { slash_points: vec![0, 0, 0, 100], signed: make_signed(key) }, + ); + } + } + + assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); + } + } + + mod fuzz_slash_report { + use super::*; + use proptest::prelude::*; + + /// Independently compute the expected slash report that `handle_application_tx` should + /// produce when `DataSet::Participating` is reached, mirroring the production logic. + /// + /// Returns `None` if `f == 0` (the slash report would be empty and nothing is sent). + fn expected_slash_report(num_validators: usize, reports: &[Vec]) -> Option> { + let f = (num_validators - 1) / 3; + if f == 0 { + return None; + } + + // Compute the median for each validator position across all reporters + let mut medians = Vec::with_capacity(num_validators); + for i in 0 .. num_validators { + let mut values: Vec = reports.iter().map(|r| r[i]).collect(); + values.sort_unstable(); + let median_index = + if (values.len() % 2) == 1 { values.len() / 2 } else { (values.len() / 2) - 1 }; + medians.push(values[median_index]); + } + + // Find worst validator in the supermajority and amortize + let mut sorted = medians.clone(); + sorted.sort_unstable(); + let amortization = sorted[num_validators - f - 1]; + + let amortized: Vec = medians.iter().map(|p| p.saturating_sub(amortization)).collect(); + + // Filter to non-zero entries only + let result: Vec = amortized.into_iter().filter(|&p| p > 0).collect(); + Some(result) + } + + /// Generate `count` slash report vectors, each of length `num_validators`. + /// Values are drawn from a small set including 0, small values, large values, and u32::MAX + /// to exercise the Fatal/Points/zero filtering paths. + fn slash_points_strategy( + num_validators: usize, + count: usize, + ) -> impl Strategy>> { + let values = prop::collection::vec( + prop_oneof![ + 3 => Just(0u32), + 3 => 1..100u32, + 2 => 100..10_000u32, + 1 => Just(u32::MAX), + ], + num_validators, + ); + prop::collection::vec(values, count) + } + + proptest! { + #![proptest_config(ProptestConfig::with_cases(200))] + + /// Fuzz the SlashReport → Participating path with randomized slash point vectors. + /// + /// Uses 4 validators (f=1) so the threshold-crossing path is reachable. + /// All 4 submit identical reports so the median equals the input. + #[test] + fn fuzz_slash_report_participating_4_validators( + slash_points in slash_points_strategy(4, 1).prop_map(|mut v| v.remove(0)), + ) { + let set = default_test_validator_set(); + + let (key0, addr0) = random_validator_key(); + let (key1, addr1) = random_validator_key(); + let (key2, addr2) = random_validator_key(); + let (key3, addr3) = random_validator_key(); + let validator_data = vec![(addr0, 1u16), (addr1, 1), (addr2, 1), (addr3, 1)]; + let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); + let weights: HashMap = validator_data.iter().copied().collect(); + let set_info = new_test_set_info(&validator_data); + + let mut db = MemDb::new(); + let mut txn = db.txn(); + + // All 4 validators submit the same slash_points + // required_participation = 4*2/3+1 = 3, so 3 cross the threshold. + // The 4th submission is a NOP (past threshold). + let reports: Vec> = vec![slash_points.clone(); 3]; + let expected = expected_slash_report(4, &reports); + + { + let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 4, &weights); + for (key, _) in [(key0, &addr0), (key1, &addr1), (key2, &addr2), (key3, &addr3)] { + scan_block.handle_application_tx( + 1, + Transaction::SlashReport { + slash_points: slash_points.clone(), + signed: make_signed(key), + }, + ); + } + } + + match expected { + Some(result) if !result.is_empty() => { + // Non-empty slash report → message should be sent + prop_assert!( + ProcessorMessages::try_recv(&mut txn, set).is_some(), + "expected ProcessorMessage for non-empty slash report {:?}", + result + ); + } + _ => { + // Empty or f==0 → no message sent (slash report is empty, nothing to sign) + // The code still sends the message even for empty reports due to the assert + // passing with len=0 <= f. Verify it gets sent regardless. + // + // With our fix, Points(0) are filtered, so if all amortized values are 0, + // the slash_report is empty. The assert passes (0 <= f=1), and the code still + // recognizes the topic and sends the message. + let msg = ProcessorMessages::try_recv(&mut txn, set); + // The handler always sends a message when Participating is reached + prop_assert!(msg.is_some(), "expected ProcessorMessage even for empty slash report"); + } + } + + // Verify the SlashReport signing topic was recognized + let sign_topic = Topic::Sign { + id: VariantSignId::SlashReport, + attempt: 0, + round: SigningProtocolRound::Preprocess, + }; + prop_assert!( + AccumulatedWeight::get(&mut txn, set, sign_topic).is_some(), + "SlashReport sign topic should be recognized" + ); + } + + /// Fuzz with varying reporter opinions (not all identical). + /// Uses 7 validators (f=2) for a richer median calculation. + #[test] + fn fuzz_slash_report_diverse_opinions_7_validators( + reports in slash_points_strategy(7, 5), + ) { + let set = default_test_validator_set(); + + // 7 validators, f = (7-1)/3 = 2 + let keys_addrs: Vec<(RistrettoPoint, SeraiAddress)> = + (0 .. 7).map(|_| random_validator_key()).collect(); + let validator_data: Vec<(SeraiAddress, u16)> = + keys_addrs.iter().map(|(_, addr)| (*addr, 1u16)).collect(); + let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); + let weights: HashMap = validator_data.iter().copied().collect(); + let set_info = new_test_set_info(&validator_data); + + // required_participation = 7*2/3+1 = 5 + // We have 5 reports from 5 different validators to cross the threshold + let expected = expected_slash_report(7, &reports[..5]); + + let mut db = MemDb::new(); + let mut txn = db.txn(); + + { + let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 7, &weights); + for (i, report) in reports.iter().enumerate() { + let (key, _) = keys_addrs[i]; + scan_block.handle_application_tx( + 1, + Transaction::SlashReport { + slash_points: report.clone(), + signed: make_signed(key), + }, + ); + } + } + + // Verify the expected result + match expected { + Some(result) => { + prop_assert!(result.len() <= 2, "slash report len {} should be <= f=2", result.len()); + } + None => { + // f == 0, which can't happen with 7 validators + unreachable!(); + } + } + + // Participating path was reached → message and topic recognition + prop_assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); + let sign_topic = Topic::Sign { + id: VariantSignId::SlashReport, + attempt: 0, + round: SigningProtocolRound::Preprocess, + }; + prop_assert!(AccumulatedWeight::get(&mut txn, set, sign_topic).is_some()); + } + + /// Fuzz the wrong-length path: slash_points.len() != validators.len() → fatal slash + #[test] + fn fuzz_slash_report_wrong_length( + num_validators in 4usize..10, + wrong_len in 1usize..20, + ) { + prop_assume!(wrong_len != num_validators); + + let set = default_test_validator_set(); + + let keys_addrs: Vec<(RistrettoPoint, SeraiAddress)> = + (0 .. num_validators).map(|_| random_validator_key()).collect(); + let validator_data: Vec<(SeraiAddress, u16)> = + keys_addrs.iter().map(|(_, addr)| (*addr, 1u16)).collect(); + let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); + let weights: HashMap = validator_data.iter().copied().collect(); + let set_info = new_test_set_info(&validator_data); + let total_weight = num_validators as u16; + + let mut db = MemDb::new(); + let mut txn = db.txn(); + + let (signer_key, signer_addr) = keys_addrs[0]; + + { + let mut scan_block = + make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.handle_application_tx( + 1, + Transaction::SlashReport { + slash_points: vec![0; wrong_len], + signed: make_signed(signer_key), + }, + ); + } + + prop_assert!( + TributaryDb::is_fatally_slashed(&mut txn, set, signer_addr), + "signer should be fatally slashed for wrong-length slash report" + ); + prop_assert!( + ProcessorMessages::try_recv(&mut txn, set).is_none(), + "no message should be sent for wrong-length slash report" + ); + } + } + } +} + +mod handle_block { + use super::*; + + #[test] + fn processes_application_transactions() { + let mut db = MemDb::new(); + let set = default_test_validator_set(); + let batch_hash = [42; 32]; + let (validator_data, validators, weights, total_weight) = + get_test_validators_and_weights_setup(); + let set_info = new_test_set_info(&validator_data); + + let block = Block { + header: BlockHeader { parent: [0; 32], transactions: [0; 32] }, + transactions: vec![TributaryTransaction::Application(Transaction::Batch { + hash: batch_hash, + })], + }; + + let mut txn = db.txn(); + let scan_block = make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + + scan_block.handle_block(1, block); + txn.commit(); + + let topic = Topic::Sign { + id: VariantSignId::Batch(batch_hash), + attempt: 0, + round: SigningProtocolRound::Preprocess, + }; + assert!(TributaryDb::recognized(&db, set, topic)); + } + + #[test] + fn empty_block_only_calls_start_of_block() { + let mut db = MemDb::new(); + let set = default_test_validator_set(); + let (validator_data, validators, weights, total_weight) = + get_test_validators_and_weights_setup(); + let set_info = new_test_set_info(&validator_data); + + let block = Block { + header: BlockHeader { parent: [0; 32], transactions: [0; 32] }, + transactions: vec![], + }; + + let mut txn = db.txn(); + let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + + scan_block.handle_block(1, block); + txn.commit(); + + // No messages, no state changes beyond start_of_block + let mut txn = db.txn(); + assert!(ProcessorMessages::try_recv(&mut txn, set).is_none()); + } + + #[test] + fn multiple_application_txs_in_one_block() { + let mut db = MemDb::new(); + let set = default_test_validator_set(); + let batch_hash_a = [10; 32]; + let batch_hash_b = [20; 32]; + let (validator_data, validators, weights, total_weight) = + get_test_validators_and_weights_setup(); + let set_info = new_test_set_info(&validator_data); + + let block = Block { + header: BlockHeader { parent: [0; 32], transactions: [0; 32] }, + transactions: vec![ + TributaryTransaction::Application(Transaction::Batch { hash: batch_hash_a }), + TributaryTransaction::Application(Transaction::Batch { hash: batch_hash_b }), + ], + }; + + let mut txn = db.txn(); + let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + + scan_block.handle_block(1, block); + txn.commit(); + + for hash in [batch_hash_a, batch_hash_b] { + let topic = Topic::Sign { + id: VariantSignId::Batch(hash), + attempt: 0, + round: SigningProtocolRound::Preprocess, + }; + assert!(TributaryDb::recognized(&db, set, topic)); + } + } +} diff --git a/tests/substrate/Cargo.toml b/tests/substrate/Cargo.toml index 5cb20c00d..bb6bc8430 100644 --- a/tests/substrate/Cargo.toml +++ b/tests/substrate/Cargo.toml @@ -23,3 +23,10 @@ tokio = { version = "1", features = ["time"] } dockertest = "0.5" serai-docker-tests = { path = "../docker" } + +rand_core = { version = "0.6", default-features = false, features = ["std"] } +serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] } + +[dev-dependencies] +rand = { version = "0.8", default-features = false, features = ["std"] } +rand_chacha = { version = "0.3", default-features = false, features = ["std"] } diff --git a/tests/substrate/src/lib.rs b/tests/substrate/src/lib.rs index 532d201be..dda2d6f1b 100644 --- a/tests/substrate/src/lib.rs +++ b/tests/substrate/src/lib.rs @@ -4,6 +4,21 @@ use serai_client_serai::Serai; use dockertest::{StartPolicy, PullPolicy, Image, TestBodySpecification, DockerOperations}; +use rand_core::{RngCore, CryptoRng}; +use serai_primitives::{address::SeraiAddress, BlockHash}; + +pub fn random_serai_address(rng: &mut R) -> SeraiAddress { + let mut key = [0; 32]; + rng.fill_bytes(&mut key); + SeraiAddress(key) +} + +pub fn random_block_hash(rng: &mut R) -> BlockHash { + let mut hash = [0; 32]; + rng.fill_bytes(&mut hash); + BlockHash(hash) +} + pub struct Handle(String); pub fn composition(name: &str, logs_path: String) -> (TestBodySpecification, Handle) { From 33a6cffa8004639e08db4007adac7a260d7232ab Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Tue, 3 Mar 2026 17:05:16 -0300 Subject: [PATCH 30/71] refactor(tributary): move utils to mod.rs --- coordinator/tributary/src/tests/mod.rs | 25 +++++++ coordinator/tributary/src/tests/scan_block.rs | 75 +++++++++---------- .../tributary/src/tests/transaction.rs | 7 +- 3 files changed, 60 insertions(+), 47 deletions(-) diff --git a/coordinator/tributary/src/tests/mod.rs b/coordinator/tributary/src/tests/mod.rs index 5cb667131..782ed3a4f 100644 --- a/coordinator/tributary/src/tests/mod.rs +++ b/coordinator/tributary/src/tests/mod.rs @@ -1,8 +1,16 @@ +use ciphersuite::group::GroupEncoding; +use ciphersuite::WrappedGroup; +use dalek_ff_group::{Ristretto, RistrettoPoint}; +use rand::{CryptoRng, RngCore}; + use serai_primitives::{ + address::SeraiAddress, network_id::ExternalNetworkId, validator_sets::{ExternalValidatorSet, Session}, }; +use zeroize::Zeroizing; + pub mod transaction; pub mod db; pub mod scan_block; @@ -12,3 +20,20 @@ pub(crate) fn default_test_validator_set() -> ExternalValidatorSet { // this can be used just as a default value any time ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) } } + +pub(crate) fn random_key( + rng: &mut R, +) -> Zeroizing<::F> { + Zeroizing::new(::F::random(&mut *rng)) +} + +pub(crate) fn get_key_point(key: Zeroizing<::F>) -> RistrettoPoint { + Ristretto::generator() * *key +} + +pub(crate) fn random_serai_address_and_key( + rng: &mut R, +) -> (RistrettoPoint, SeraiAddress) { + let key = get_key_point(random_key(rng)); + (key, SeraiAddress(key.to_bytes())) +} diff --git a/coordinator/tributary/src/tests/scan_block.rs b/coordinator/tributary/src/tests/scan_block.rs index 101fc284e..c9a29c00b 100644 --- a/coordinator/tributary/src/tests/scan_block.rs +++ b/coordinator/tributary/src/tests/scan_block.rs @@ -4,7 +4,7 @@ use rand::{Rng, RngCore}; use rand_core::OsRng; use serai_substrate_tests::{random_block_hash, random_serai_address}; -use ciphersuite::group::{GroupEncoding}; +use ciphersuite::group::GroupEncoding; use dalek_ff_group::RistrettoPoint; use serai_primitives::address::SeraiAddress; @@ -25,7 +25,7 @@ use crate::{ AccumulatedWeight, ActivelyCosigning, CosignIntents as DbCosignIntents, LatestSubstrateBlockToCosign, Topic, TributaryDb, }, - tests::default_test_validator_set, + tests::{default_test_validator_set, random_serai_address_and_key}, }; use crate::transaction::{SigningProtocolRound, Signed, Transaction}; use crate::{CosignIntents, DkgConfirmationMessages, ProcessorMessages, ScanBlock, SubstrateBlockPlans}; @@ -100,13 +100,6 @@ fn make_scan_block<'a, TDT: DbTxn>( } } -/// Generate a random Ristretto key and the corresponding SeraiAddress. -fn random_validator_key() -> (RistrettoPoint, SeraiAddress) { - let key = RistrettoPoint::random(&mut OsRng); - let address = SeraiAddress(key.to_bytes()); - (key, address) -} - /// Create a Signed with the given signer key and a dummy signature. fn make_signed(signer: RistrettoPoint) -> Signed { Signed { signer, ..Signed::default() } @@ -413,7 +406,7 @@ mod handle_application_tx { let mut txn = db.txn(); // Generate a signer that's actually in the validator set - let (signer_key, signer_addr) = random_validator_key(); + let (signer_key, signer_addr) = random_serai_address_and_key(&mut OsRng); let signer_weight = 1u16; let mut extended_validator_data = validator_data.clone(); @@ -456,9 +449,9 @@ mod handle_application_tx { let mut txn = db.txn(); // All 3 validators need real keys so they can sign - let (key0, addr0) = random_validator_key(); - let (key1, addr1) = random_validator_key(); - let (key2, addr2) = random_validator_key(); + let (key0, addr0) = random_serai_address_and_key(&mut OsRng); + let (key1, addr1) = random_serai_address_and_key(&mut OsRng); + let (key2, addr2) = random_serai_address_and_key(&mut OsRng); let validator_data = vec![(addr0, 1u16), (addr1, 1), (addr2, 1)]; let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); @@ -500,7 +493,7 @@ mod handle_application_tx { let set = default_test_validator_set(); // Use a real validator key so the signer exists in participant_indexes - let (signer_key, signer_addr) = random_validator_key(); + let (signer_key, signer_addr) = random_serai_address_and_key(&mut OsRng); let validator_data = vec![ (signer_addr, 1u16), (random_serai_address(&mut OsRng), 1), @@ -528,9 +521,9 @@ mod handle_application_tx { fn handle_dkg_confirmation_preprocess_tx_type() { let set = default_test_validator_set(); - let (key0, addr0) = random_validator_key(); - let (key1, addr1) = random_validator_key(); - let (key2, addr2) = random_validator_key(); + let (key0, addr0) = random_serai_address_and_key(&mut OsRng); + let (key1, addr1) = random_serai_address_and_key(&mut OsRng); + let (key2, addr2) = random_serai_address_and_key(&mut OsRng); let validator_data = vec![(addr0, 1u16), (addr1, 1), (addr2, 1)]; let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); let weights: HashMap = validator_data.iter().copied().collect(); @@ -580,9 +573,9 @@ mod handle_application_tx { fn handle_dkg_confirmation_share_tx_type() { let set = default_test_validator_set(); - let (key0, addr0) = random_validator_key(); - let (_, addr1) = random_validator_key(); - let (_, addr2) = random_validator_key(); + let (key0, addr0) = random_serai_address_and_key(&mut OsRng); + let (_, addr1) = random_serai_address_and_key(&mut OsRng); + let (_, addr2) = random_serai_address_and_key(&mut OsRng); let validator_data = vec![(addr0, 1u16), (addr1, 1), (addr2, 1)]; let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); let weights: HashMap = validator_data.iter().copied().collect(); @@ -617,9 +610,9 @@ mod handle_application_tx { fn dkg_confirmation_preprocess_then_share_flow() { let set = default_test_validator_set(); - let (key0, addr0) = random_validator_key(); - let (key1, addr1) = random_validator_key(); - let (key2, addr2) = random_validator_key(); + let (key0, addr0) = random_serai_address_and_key(&mut OsRng); + let (key1, addr1) = random_serai_address_and_key(&mut OsRng); + let (key2, addr2) = random_serai_address_and_key(&mut OsRng); let validator_data = vec![(addr0, 1u16), (addr1, 1), (addr2, 1)]; let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); let weights: HashMap = validator_data.iter().copied().collect(); @@ -845,9 +838,9 @@ mod handle_application_tx { fn handle_sign_tx_type() { let set = default_test_validator_set(); - let (key0, addr0) = random_validator_key(); - let (key1, addr1) = random_validator_key(); - let (key2, addr2) = random_validator_key(); + let (key0, addr0) = random_serai_address_and_key(&mut OsRng); + let (key1, addr1) = random_serai_address_and_key(&mut OsRng); + let (key2, addr2) = random_serai_address_and_key(&mut OsRng); let validator_data = vec![(addr0, 1u16), (addr1, 1), (addr2, 1)]; let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); let weights: HashMap = validator_data.iter().copied().collect(); @@ -907,9 +900,9 @@ mod handle_application_tx { fn handle_slash_report_tx_type() { let set = default_test_validator_set(); - let (key0, addr0) = random_validator_key(); - let (_, addr1) = random_validator_key(); - let (_, addr2) = random_validator_key(); + let (key0, addr0) = random_serai_address_and_key(&mut OsRng); + let (_, addr1) = random_serai_address_and_key(&mut OsRng); + let (_, addr2) = random_serai_address_and_key(&mut OsRng); let validator_data = vec![(addr0, 1u16), (addr1, 1), (addr2, 1)]; let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); let weights: HashMap = validator_data.iter().copied().collect(); @@ -949,10 +942,10 @@ mod handle_application_tx { let mut db = MemDb::new(); let mut txn = db.txn(); - let (key0, addr0) = random_validator_key(); - let (key1, addr1) = random_validator_key(); - let (key2, addr2) = random_validator_key(); - let (_, addr3) = random_validator_key(); + let (key0, addr0) = random_serai_address_and_key(&mut OsRng); + let (key1, addr1) = random_serai_address_and_key(&mut OsRng); + let (key2, addr2) = random_serai_address_and_key(&mut OsRng); + let (_, addr3) = random_serai_address_and_key(&mut OsRng); let validator_data = vec![(addr0, 1u16), (addr1, 1), (addr2, 1), (addr3, 1)]; let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); let weights: HashMap = validator_data.iter().copied().collect(); @@ -1042,10 +1035,10 @@ mod handle_application_tx { ) { let set = default_test_validator_set(); - let (key0, addr0) = random_validator_key(); - let (key1, addr1) = random_validator_key(); - let (key2, addr2) = random_validator_key(); - let (key3, addr3) = random_validator_key(); + let (key0, addr0) = random_serai_address_and_key(&mut OsRng); + let (key1, addr1) = random_serai_address_and_key(&mut OsRng); + let (key2, addr2) = random_serai_address_and_key(&mut OsRng); + let (key3, addr3) = random_serai_address_and_key(&mut OsRng); let validator_data = vec![(addr0, 1u16), (addr1, 1), (addr2, 1), (addr3, 1)]; let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); let weights: HashMap = validator_data.iter().copied().collect(); @@ -1118,7 +1111,7 @@ mod handle_application_tx { // 7 validators, f = (7-1)/3 = 2 let keys_addrs: Vec<(RistrettoPoint, SeraiAddress)> = - (0 .. 7).map(|_| random_validator_key()).collect(); + (0 .. 7).map(|_| random_serai_address_and_key(&mut OsRng)).collect(); let validator_data: Vec<(SeraiAddress, u16)> = keys_addrs.iter().map(|(_, addr)| (*addr, 1u16)).collect(); let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); @@ -1178,7 +1171,7 @@ mod handle_application_tx { let set = default_test_validator_set(); let keys_addrs: Vec<(RistrettoPoint, SeraiAddress)> = - (0 .. num_validators).map(|_| random_validator_key()).collect(); + (0 .. num_validators).map(|_| random_serai_address_and_key(&mut OsRng)).collect(); let validator_data: Vec<(SeraiAddress, u16)> = keys_addrs.iter().map(|(_, addr)| (*addr, 1u16)).collect(); let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); @@ -1263,7 +1256,7 @@ mod handle_block { }; let mut txn = db.txn(); - let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + let scan_block = make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_block(1, block); txn.commit(); @@ -1292,7 +1285,7 @@ mod handle_block { }; let mut txn = db.txn(); - let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + let scan_block = make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_block(1, block); txn.commit(); diff --git a/coordinator/tributary/src/tests/transaction.rs b/coordinator/tributary/src/tests/transaction.rs index 245919c1e..73f3221b9 100644 --- a/coordinator/tributary/src/tests/transaction.rs +++ b/coordinator/tributary/src/tests/transaction.rs @@ -2,7 +2,6 @@ use core::ops::Deref as _; use rand::{CryptoRng, RngCore, rngs::OsRng}; use schnorr::SchnorrSignature; -use zeroize::Zeroizing; use ciphersuite::{group::Group as _, *}; use dalek_ff_group::Ristretto; @@ -18,13 +17,9 @@ use tributary_sdk::{ transaction::{Transaction as TransactionTrait, TransactionError, TransactionKind}, }; -use crate::db::Topic; +use crate::{db::Topic, tests::random_key}; use crate::transaction::{SigningProtocolRound, Signed, Transaction}; -fn random_key(rng: &mut R) -> Zeroizing<::F> { - Zeroizing::new(::F::random(&mut *rng)) -} - fn random_signed(rng: &mut R) -> Signed { let signed = tributary_sdk::tests::random_signed(&mut *rng); Signed { signer: signed.signer, signature: signed.signature } From 38cb838615be6566a9bb1d943114d9863146b0af Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Thu, 5 Mar 2026 16:52:01 -0300 Subject: [PATCH 31/71] feat(tributary): refactor db tests, improve handling of overflow attempts and add test cases --- coordinator/tributary/Cargo.toml | 2 + coordinator/tributary/src/db.rs | 71 +- coordinator/tributary/src/lib.rs | 2 +- coordinator/tributary/src/tests/db.rs | 1348 ++++++++++------- coordinator/tributary/src/tests/mod.rs | 22 + .../tributary/src/tests/transaction.rs | 55 +- 6 files changed, 914 insertions(+), 586 deletions(-) diff --git a/coordinator/tributary/Cargo.toml b/coordinator/tributary/Cargo.toml index 4c5779f23..399e3a6a2 100644 --- a/coordinator/tributary/Cargo.toml +++ b/coordinator/tributary/Cargo.toml @@ -49,6 +49,8 @@ rand = { version = "0.8", default-features = false, features = ["std"] } rand_chacha = { version = "0.3", default-features = false, features = ["std"] } proptest = "1" tributary-sdk = { path = "../tributary-sdk", features = ["tests"] } +tokio = { version = "1", default-features = false, features = ["rt", "time", "macros", "rt-multi-thread"] } +serai-test-task = { path = "../../tests/task" } serai-substrate-tests = { path = "../../tests/substrate" } [features] diff --git a/coordinator/tributary/src/db.rs b/coordinator/tributary/src/db.rs index 12b291787..6dc2b36ea 100644 --- a/coordinator/tributary/src/db.rs +++ b/coordinator/tributary/src/db.rs @@ -61,25 +61,16 @@ impl Topic { #[expect(clippy::match_same_arms)] match self { Topic::RemoveParticipant { .. } => None, - Topic::DkgConfirmation { attempt, round: _ } => { - if let Some(next_attempt) = attempt.checked_add(1) { - Some(Topic::DkgConfirmation { - attempt: next_attempt, - round: SigningProtocolRound::Preprocess, - }) - } else { - None - } - } + Topic::DkgConfirmation { attempt, round: _ } => Some(Topic::DkgConfirmation { + attempt: attempt.checked_add(1).unwrap_or(0), + round: SigningProtocolRound::Preprocess, + }), Topic::SlashReport => None, - Topic::Sign { id, attempt, round: _ } => { - // checked_add here, sanity prevent infinite consecutive attempts - if let Some(next_attempt) = attempt.checked_add(1) { - Some(Topic::Sign { id, attempt: next_attempt, round: SigningProtocolRound::Preprocess }) - } else { - None - } - } + Topic::Sign { id, attempt, round: _ } => Some(Topic::Sign { + id, + attempt: attempt.checked_add(1).unwrap_or(0), + round: SigningProtocolRound::Preprocess, + }), } } @@ -90,33 +81,25 @@ impl Topic { Topic::RemoveParticipant { .. } => None, Topic::DkgConfirmation { attempt, round } => match round { SigningProtocolRound::Preprocess => { - // checked_add here, sanity prevent infinite consecutive attempts - if let Some(next_attempt) = attempt.checked_add(1) { - Some(( - next_attempt, - Topic::DkgConfirmation { - attempt: next_attempt, - round: SigningProtocolRound::Preprocess, - }, - )) - } else { - None - } + let next_attempt = attempt.checked_add(1).unwrap_or(0); + Some(( + next_attempt, + Topic::DkgConfirmation { + attempt: next_attempt, + round: SigningProtocolRound::Preprocess, + }, + )) } SigningProtocolRound::Share => None, }, Topic::SlashReport => None, Topic::Sign { id, attempt, round } => match round { SigningProtocolRound::Preprocess => { - // checked_add here, sanity prevent infinite consecutive attempts - if let Some(next_attempt) = attempt.checked_add(1) { - Some(( - next_attempt, - Topic::Sign { id, attempt: next_attempt, round: SigningProtocolRound::Preprocess }, - )) - } else { - None - } + let next_attempt = attempt.checked_add(1).unwrap_or(0); + Some(( + next_attempt, + Topic::Sign { id, attempt: next_attempt, round: SigningProtocolRound::Preprocess }, + )) } SigningProtocolRound::Share => None, }, @@ -371,13 +354,19 @@ impl TributaryDb { Cosigned::get(txn, set, substrate_block_hash).is_some() } + /// The next topic requiring recognition which has been recognized by this Tributary. + pub fn try_recv_topic_requiring_recognition( + txn: &mut impl DbTxn, + set: ExternalValidatorSet, + ) -> Option { + RecognizedTopics::try_recv(txn, set) + } pub(crate) fn recognize_topic(txn: &mut impl DbTxn, set: ExternalValidatorSet, topic: Topic) { AccumulatedWeight::set(txn, set, topic, &0); RecognizedTopics::send(txn, set, &topic); } pub(crate) fn recognized(getter: &impl Get, set: ExternalValidatorSet, topic: Topic) -> bool { - AccumulatedWeight::get(getter, set, topic).is_some() && - RecognizedTopics::peek(getter, set).is_some() + AccumulatedWeight::get(getter, set, topic).is_some() } pub(crate) fn start_of_block(txn: &mut impl DbTxn, set: ExternalValidatorSet, block_number: u64) { diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index 0413fbe1e..96f8c7edf 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -104,7 +104,7 @@ impl RecognizedTopics { txn: &mut impl DbTxn, set: ExternalValidatorSet, ) -> Option { - db::RecognizedTopics::try_recv(txn, set) + TributaryDb::try_recv_topic_requiring_recognition(txn, set) } } diff --git a/coordinator/tributary/src/tests/db.rs b/coordinator/tributary/src/tests/db.rs index 2951c203a..2e2e40334 100644 --- a/coordinator/tributary/src/tests/db.rs +++ b/coordinator/tributary/src/tests/db.rs @@ -7,7 +7,22 @@ use messages::sign::VariantSignId; use serai_db::{Db, DbTxn, MemDb}; use serai_substrate_tests::random_serai_address; -use crate::{db::*, transaction::SigningProtocolRound, tests::default_test_validator_set}; +use crate::{ + db::*, + tests::{default_test_validator_set, random_transaction_id, random_block_number}, + transaction::SigningProtocolRound, +}; + +fn random_data_u32() -> [u8; 32] { + let mut data = [0u8; 32]; + OsRng.fill_bytes(&mut data); + data +} +fn random_data_u64() -> [u8; 64] { + let mut data = [0u8; 64]; + OsRng.fill_bytes(&mut data); + data +} fn all_topics() -> Vec { vec![ @@ -16,18 +31,70 @@ fn all_topics() -> Vec { Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Share }, Topic::SlashReport, Topic::Sign { - id: VariantSignId::Transaction([0; 32]), + id: random_transaction_id(), attempt: 0, round: SigningProtocolRound::Preprocess, }, + Topic::Sign { id: random_transaction_id(), attempt: 0, round: SigningProtocolRound::Share }, + ] +} + +fn all_topics_with_u32_max_attempts() -> Vec { + vec![ + Topic::RemoveParticipant { participant: random_serai_address(&mut OsRng) }, + Topic::DkgConfirmation { attempt: std::u32::MAX, round: SigningProtocolRound::Preprocess }, + Topic::DkgConfirmation { attempt: std::u32::MAX, round: SigningProtocolRound::Share }, + Topic::SlashReport, + Topic::Sign { + id: random_transaction_id(), + attempt: std::u32::MAX, + round: SigningProtocolRound::Preprocess, + }, Topic::Sign { - id: VariantSignId::Transaction([0; 32]), - attempt: 0, + id: random_transaction_id(), + attempt: std::u32::MAX, round: SigningProtocolRound::Share, }, ] } +type NoEachFn = fn(usize, &DataSet<[u8; 32]>); + +/// Cross threshold by accumulating from all validators, returning the final result. +fn accumulate_to_threshold( + txn: &mut impl DbTxn, + set: ExternalValidatorSet, + validators: &[SeraiAddress], + total_weight: u16, + block_number: u64, + topic: Topic, + on_each: Option, +) -> DataSet<[u8; 32]> +where + F1: FnMut(usize, &DataSet<[u8; 32]>), +{ + let mut on_each = on_each; + let mut result = DataSet::None; + for (i, v) in validators.iter().enumerate() { + result = TributaryDb::accumulate::<[u8; 32]>( + txn, + set, + validators, + total_weight, + block_number, + topic, + *v, + 1, + &[i as u8; 32], + ); + if let Some(ref mut f) = on_each { + f(i, &result); + } + } + + result +} + mod topic { use messages::sign::SignId; use super::*; @@ -37,34 +104,33 @@ mod topic { for topic in all_topics() { match topic { Topic::RemoveParticipant { .. } => assert_eq!(topic.next_attempt_topic(), None), - Topic::DkgConfirmation { attempt, round: _ } => { - if let Some(next_attempt) = attempt.checked_add(1) { - assert_eq!( - topic.next_attempt_topic(), - Some(Topic::DkgConfirmation { - attempt: next_attempt, - round: SigningProtocolRound::Preprocess, - }) - ); - } else { - assert_eq!(topic.next_attempt_topic(), None); - } - } + Topic::DkgConfirmation { attempt, .. } => assert_eq!( + topic.next_attempt_topic(), + Some(Topic::DkgConfirmation { + attempt: attempt + 1, + round: SigningProtocolRound::Preprocess, + }) + ), Topic::SlashReport => assert_eq!(topic.next_attempt_topic(), None), - Topic::Sign { id, attempt, round: _ } => { - if let Some(next_attempt) = attempt.checked_add(1) { - assert_eq!( - topic.next_attempt_topic(), - Some(Topic::Sign { - id, - attempt: next_attempt, - round: SigningProtocolRound::Preprocess - }) - ); - } else { - assert_eq!(topic.next_attempt_topic(), None); - } - } + Topic::Sign { id, attempt, .. } => assert_eq!( + topic.next_attempt_topic(), + Some(Topic::Sign { id, attempt: attempt + 1, round: SigningProtocolRound::Preprocess }) + ), + } + } + + for topic in all_topics_with_u32_max_attempts() { + match topic { + Topic::RemoveParticipant { .. } => assert_eq!(topic.next_attempt_topic(), None), + Topic::DkgConfirmation { .. } => assert_eq!( + topic.next_attempt_topic(), + Some(Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Preprocess }) + ), + Topic::SlashReport => assert_eq!(topic.next_attempt_topic(), None), + Topic::Sign { id, .. } => assert_eq!( + topic.next_attempt_topic(), + Some(Topic::Sign { id, attempt: 0, round: SigningProtocolRound::Preprocess }) + ), } } } @@ -76,46 +142,60 @@ mod topic { Topic::RemoveParticipant { .. } => assert_eq!(topic.reattempt_topic(), None), Topic::DkgConfirmation { attempt, round } => match round { SigningProtocolRound::Preprocess => { - if let Some(next_attempt) = attempt.checked_add(1) { - assert_eq!( - topic.reattempt_topic(), - Some(( - next_attempt, - Topic::DkgConfirmation { - attempt: next_attempt, - round: SigningProtocolRound::Preprocess, - }, - )) - ); - } else { - assert_eq!(topic.reattempt_topic(), None); - } + let next_attempt = attempt + 1; + assert_eq!( + topic.reattempt_topic(), + Some(( + next_attempt, + Topic::DkgConfirmation { + attempt: next_attempt, + round: SigningProtocolRound::Preprocess, + }, + )) + ); } SigningProtocolRound::Share => assert_eq!(topic.reattempt_topic(), None), }, Topic::SlashReport => assert_eq!(topic.reattempt_topic(), None), Topic::Sign { id, attempt, round } => match round { SigningProtocolRound::Preprocess => { - if let Some(next_attempt) = attempt.checked_add(1) { - assert_eq!( - topic.reattempt_topic(), - Some(( - next_attempt, - Topic::Sign { - id, - attempt: next_attempt, - round: SigningProtocolRound::Preprocess - }, - )) - ); - } else { - assert_eq!(topic.reattempt_topic(), None); - } + let next_attempt = attempt + 1; + assert_eq!( + topic.reattempt_topic(), + Some(( + next_attempt, + Topic::Sign { id, attempt: next_attempt, round: SigningProtocolRound::Preprocess }, + )) + ); } SigningProtocolRound::Share => assert_eq!(topic.reattempt_topic(), None), }, } } + + for topic in all_topics_with_u32_max_attempts() { + match topic { + Topic::RemoveParticipant { .. } => assert_eq!(topic.reattempt_topic(), None), + Topic::DkgConfirmation { round, .. } => match round { + SigningProtocolRound::Preprocess => assert_eq!( + topic.reattempt_topic(), + Some(( + 0, + Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Preprocess }, + )) + ), + SigningProtocolRound::Share => assert_eq!(topic.reattempt_topic(), None), + }, + Topic::SlashReport => assert_eq!(topic.reattempt_topic(), None), + Topic::Sign { id, round, .. } => match round { + SigningProtocolRound::Preprocess => assert_eq!( + topic.reattempt_topic(), + Some((0, Topic::Sign { id, attempt: 0, round: SigningProtocolRound::Preprocess })) + ), + SigningProtocolRound::Share => assert_eq!(topic.reattempt_topic(), None), + }, + } + } } #[test] @@ -211,7 +291,6 @@ mod topic { mod tributary_db { use serai_substrate_tests::random_block_hash; - use super::*; #[test] @@ -219,7 +298,7 @@ mod tributary_db { let mut db = MemDb::new(); let set = default_test_validator_set(); let block_hash1 = random_block_hash(&mut OsRng); - let block_number1 = OsRng.next_u64(); + let block_number1 = random_block_number(); let topic = Topic::Sign { id: VariantSignId::Cosign(block_number1), @@ -231,8 +310,8 @@ mod tributary_db { { let mut txn = db.txn(); TributaryDb::start_cosigning(&mut txn, set, block_hash1, block_number1); - - assert!(TributaryDb::recognized(&txn, set, topic,)); + assert!(TributaryDb::try_recv_topic_requiring_recognition(&mut txn, set).is_some()); + assert!(TributaryDb::recognized(&txn, set, topic)); txn.commit(); } @@ -243,11 +322,15 @@ mod tributary_db { let retry = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { let block_hash2 = random_block_hash(&mut OsRng); - let block_number2 = OsRng.next_u64(); + let block_number2 = random_block_number(); TributaryDb::start_cosigning(&mut txn, set, block_hash2, block_number2); })); assert!(retry.is_err()); + + // Previous topic still recognized + assert!(TributaryDb::recognized(&txn, set, topic)); + txn.commit(); } @@ -271,11 +354,11 @@ mod tributary_db { txn.commit(); } - // Start new cosigning + // Start cosigning new block { let mut txn = db.txn(); let block_hash2 = random_block_hash(&mut OsRng); - let block_number2 = OsRng.next_u64(); + let block_number2 = random_block_number(); TributaryDb::start_cosigning(&mut txn, set, block_hash2, block_number2); assert_eq!(ActivelyCosigning::get(&mut txn, set), Some(block_hash2)); @@ -283,7 +366,7 @@ mod tributary_db { TributaryDb::finish_cosigning(&mut txn, set); assert_eq!(ActivelyCosigning::get(&mut txn, set), None); - // New topic recognized + // The new topic is now recognized assert!(TributaryDb::recognized( &txn, set, @@ -293,543 +376,776 @@ mod tributary_db { round: SigningProtocolRound::Preprocess, } )); + // Previous topic also remains recognized + assert!(TributaryDb::recognized( + &txn, + set, + Topic::Sign { + id: VariantSignId::Cosign(block_number1), + attempt: 0, + round: SigningProtocolRound::Preprocess, + } + )); txn.commit(); } } -} -#[test] -fn db_start_of_block() { - let _ = env_logger::try_init(); - let set = default_test_validator_set(); + #[test] + fn start_of_block() { + let _ = env_logger::try_init(); + let set = default_test_validator_set(); - let reattemptable_topics: Vec = all_topics() - .into_iter() - .filter_map(|t| t.reattempt_topic().map(|(_, reattempt_topic)| reattempt_topic)) - .collect(); + let reattemptable_topics: Vec = all_topics() + .into_iter() + .filter_map(|t| t.reattempt_topic().map(|(_, reattempt_topic)| reattempt_topic)) + .collect(); - serai_log::log::info!( - "db_start_of_block fuzz: reattemptable_topics={reattemptable_topics:?}, \ + serai_log::log::info!( + "start_of_block fuzz: reattemptable_topics={reattemptable_topics:?}, \ all_topics count={}", - all_topics().len() - ); - - for iteration in 0 .. 100 { - for topic in all_topics() { - // Fresh DB per topic so recognized state doesn't leak between iterations - let mut db = MemDb::new(); - let block_number = OsRng.next_u64(); - let mut txn = db.txn(); - - // Randomly select which reattempt topics are queued for this block - let reattempts: Vec = - reattemptable_topics.iter().copied().filter(|_| OsRng.next_u64() % 2 == 0).collect(); - - serai_log::log::info!( - "iteration={iteration}, topic={topic:?}, block_number={block_number}, \ + all_topics().len() + ); + + for iteration in 0 .. 100 { + for topic in all_topics() { + // Fresh DB per topic so recognized state doesn't leak between iterations + let mut db = MemDb::new(); + let block_number = random_block_number(); + let mut txn = db.txn(); + + // Randomly select which reattempt topics are queued for this block + let reattempts: Vec = + reattemptable_topics.iter().copied().filter(|_| OsRng.next_u64() % 2 == 0).collect(); + + serai_log::log::info!( + "iteration={iteration}, topic={topic:?}, block_number={block_number}, \ reattempts={reattempts:?}" - ); + ); - if !reattempts.is_empty() { - Reattempt::set(&mut txn, set, block_number, &reattempts); - serai_log::log::info!("set {} reattempt(s) for block {block_number}", reattempts.len()); - } + if !reattempts.is_empty() { + Reattempt::set(&mut txn, set, block_number, &reattempts); + serai_log::log::info!("set {} reattempt(s) for block {block_number}", reattempts.len()); + } - TributaryDb::start_of_block(&mut txn, set, block_number); - - // Verify each queued reattempt topic was recognized and its message sent - for reattempt in &reattempts { - assert!(TributaryDb::recognized(&txn, set, *reattempt)); - if reattempt.sign_id(set).is_some() { - assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); - serai_log::log::info!("verified ProcessorMessage for {reattempt:?}"); - } else if reattempt.dkg_confirmation_sign_id(set).is_some() { - assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_some()); - serai_log::log::info!("verified DkgConfirmationMessage for {reattempt:?}"); + TributaryDb::start_of_block(&mut txn, set, block_number); + + // Verify each queued reattempt topic was recognized and its message sent + for reattempt in &reattempts { + assert!(TributaryDb::recognized(&txn, set, *reattempt)); + if reattempt.sign_id(set).is_some() { + assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); + serai_log::log::info!("verified ProcessorMessage for {reattempt:?}"); + } else if reattempt.dkg_confirmation_sign_id(set).is_some() { + assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_some()); + serai_log::log::info!("verified DkgConfirmationMessage for {reattempt:?}"); + } } - } - // When no reattempts were set, verify the current topic's reattempt was not recognized - if reattempts.is_empty() { - if let Some((_, reattempt_topic)) = topic.reattempt_topic() { - assert_eq!(TributaryDb::recognized(&txn, set, reattempt_topic), false); - serai_log::log::info!("verified {reattempt_topic:?} not recognized (no reattempts)"); + // When no reattempts were set, verify the current topic's reattempt was not recognized + if reattempts.is_empty() { + if let Some((_, reattempt_topic)) = topic.reattempt_topic() { + assert_eq!(TributaryDb::recognized(&txn, set, reattempt_topic), false); + serai_log::log::info!("verified {reattempt_topic:?} not recognized (no reattempts)"); + } } - } - // No extra messages should remain in either queue - assert!(ProcessorMessages::try_recv(&mut txn, set).is_none()); - assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_none()); + // No extra messages should remain in either queue + assert!(ProcessorMessages::try_recv(&mut txn, set).is_none()); + assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_none()); - txn.commit(); + txn.commit(); + } } + + serai_log::log::info!("start_of_block fuzz: completed 100 iterations"); } - serai_log::log::info!("db_start_of_block fuzz: completed 100 iterations"); -} + #[test] + fn fatal_slash() { + let mut db = MemDb::new(); + let set = default_test_validator_set(); + let validator = random_serai_address(&mut OsRng); -#[test] -fn db_fatal_slash() { - let mut db = MemDb::new(); - let set = default_test_validator_set(); - let validator = random_serai_address(&mut OsRng); + { + let mut txn = db.txn(); + TributaryDb::fatal_slash(&mut txn, set, validator, "test reason"); + txn.commit(); + } - { - let mut txn = db.txn(); - TributaryDb::fatal_slash(&mut txn, set, validator, "test reason"); - txn.commit(); + assert!(TributaryDb::is_fatally_slashed(&db, set, validator)); + assert_eq!(SlashPoints::get(&db, set, validator), Some(std::u32::MAX)); } - assert!(TributaryDb::is_fatally_slashed(&db, set, validator)); - assert_eq!(SlashPoints::get(&db, set, validator), Some(u32::MAX)); -} - -/// Tests for the preceding topic existence check in `TributaryDb::accumulate`. -mod accumulate_preceding_topic { - use super::*; + mod accumulate { + use super::*; + + mod accumulate_preceding_topic { + use super::*; + + /// Set up a DkgConfirmation Share topic (which has a Preprocess preceding topic) + /// with 3 validators of weight 1 each so `required_participation = 3`. + fn setup() -> (ExternalValidatorSet, Vec, u16, u16, Topic, Topic, SeraiAddress) + { + let set = default_test_validator_set(); + let validators: Vec = + (0 .. 3).map(|_| random_serai_address(&mut OsRng)).collect(); + let total_weight = 3; + + let share_topic = Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Share }; + let preprocess_topic = + Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Preprocess }; + assert_eq!(share_topic.preceding_topic(), Some(preprocess_topic)); + + let validator = validators[0]; + let validator_weight = 1; + (set, validators, validator_weight, total_weight, share_topic, preprocess_topic, validator) + } - /// Set up a DkgConfirmation Share topic (which has a Preprocess preceding topic), - /// with 3 validators of weight 1 each so `required_participation = 3`. - fn setup() -> (ExternalValidatorSet, Vec, u16, Topic, Topic, SeraiAddress) { - let set = default_test_validator_set(); - let validators: Vec = - (0 .. 3).map(|_| random_serai_address(&mut OsRng)).collect(); - let total_weight = 3u16; + #[test] + fn no_preceding_data_slashes_validator() { + let ( + set, + validators, + validator_weight, + total_weight, + share_topic, + _preprocess_topic, + validator, + ) = setup(); + let mut db = MemDb::new(); + let mut txn = db.txn(); + + // Recognize the share topic so the recognition check doesn't slash + TributaryDb::recognize_topic(&mut txn, set, share_topic); + + // Do NOT store any preceding Preprocess data + // Validator should be slashed with reason: + // "participated in topic without participating in prior" + let result = TributaryDb::accumulate::<[u8; 32]>( + &mut txn, + set, + &validators, + total_weight, + random_block_number(), + share_topic, + validator, + validator_weight, + &random_data_u32(), + ); + txn.commit(); - let share_topic = Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Share }; - let preprocess_topic = - Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Preprocess }; - assert_eq!(share_topic.preceding_topic(), Some(preprocess_topic)); + assert!(matches!(result, DataSet::None)); + assert!(TributaryDb::is_fatally_slashed(&db, set, validator)); + } - let validator = validators[0]; - (set, validators, total_weight, share_topic, preprocess_topic, validator) - } + #[test] + fn different_type_stored_in_preceding_topic_passes_existence_check() { + let ( + set, + validators, + validator_weight, + total_weight, + share_topic, + preprocess_topic, + validator, + ) = setup(); + let mut db = MemDb::new(); + let mut txn = db.txn(); + + // Recognize the share topic so the recognition check doesn't slash + TributaryDb::recognize_topic(&mut txn, set, share_topic); + + // Store preceding preprocess data ([u8; 64]) + Accumulated::<[u8; 64]>::set( + &mut txn, + set, + preprocess_topic, + validator, + &random_data_u64(), + ); - #[test] - fn no_preceding_data_slashes_validator() { - let (set, validators, total_weight, share_topic, _preprocess_topic, validator) = setup(); - let mut db = MemDb::new(); + // Accumulate a share ([u8; 32]) + // The preceding check should find the key despite the type mismatch and NOT slash. + let result = TributaryDb::accumulate::<[u8; 32]>( + &mut txn, + set, + &validators, + total_weight, + random_block_number(), + share_topic, + validator, + validator_weight, + &random_data_u32(), + ); + txn.commit(); - { - let mut txn = db.txn(); + // Below threshold (1 of 3) so result is None but data is stored + assert!(matches!(result, DataSet::None)); - // Recognize the share topic so the recognition check doesn't slash - TributaryDb::recognize_topic(&mut txn, set, share_topic); + assert_eq!(TributaryDb::is_fatally_slashed(&db, set, validator), false); + assert!(Accumulated::<[u8; 32]>::get(&db, set, share_topic, validator).is_some()); + } - // Do NOT store any preceding preprocess data - the existence check should fail - let result = TributaryDb::accumulate::<[u8; 32]>( - &mut txn, - set, - &validators, - total_weight, - 1, - share_topic, - validator, - 1, - &[10u8; 32], - ); - txn.commit(); + #[test] + fn same_type_stored_in_preceding_topic_passes_existence_check() { + let ( + set, + validators, + validator_weight, + total_weight, + _share_topic, + _preprocess_topic, + validator, + ) = setup(); + + // Sign Share has a Sign Preprocess preceding topic, both use Vec> as D + let txid = random_transaction_id(); + let share_topic = Topic::Sign { id: txid, attempt: 0, round: SigningProtocolRound::Share }; + let preprocess_topic = + Topic::Sign { id: txid, attempt: 0, round: SigningProtocolRound::Preprocess }; + assert_eq!(share_topic.preceding_topic(), Some(preprocess_topic)); + + let mut db = MemDb::new(); + let mut txn = db.txn(); + + // Recognize both topics + TributaryDb::recognize_topic(&mut txn, set, preprocess_topic); + TributaryDb::recognize_topic(&mut txn, set, share_topic); + + // Store preceding data with the same type as share will use + let preprocess_data: Vec> = vec![vec![1, 2, 3]]; + Accumulated::set(&mut txn, set, preprocess_topic, validator, &preprocess_data); + + let share_data: Vec> = vec![vec![4, 5, 6]]; + let result = TributaryDb::accumulate::>>( + &mut txn, + set, + &validators, + total_weight, + random_block_number(), + share_topic, + validator, + validator_weight, + &share_data, + ); + txn.commit(); - assert!(matches!(result, DataSet::None)); + assert!(matches!(result, DataSet::None)); + assert_eq!( + Accumulated::>>::get(&db, set, share_topic, validator), + Some(share_data) + ); + assert!(!TributaryDb::is_fatally_slashed(&db, set, validator)); + } } - assert!(TributaryDb::is_fatally_slashed(&db, set, validator)); - } + mod accumulate_next_attempt_topic { + use super::*; + + /// Set up a DkgConfirmation Preprocess topic with `attempt = std::u32::MAX` and + /// with 3 validators of weight 1 each so `required_participation = 3`. + fn setup() -> (ExternalValidatorSet, Vec, u16, u16, Topic) { + let set = default_test_validator_set(); + let validators: Vec = + (0 .. 3).map(|_| random_serai_address(&mut OsRng)).collect(); + let total_weight = 3; + let validator_weight = 1; + + // what topic is being tested does not alter the functions being tested + // we are only testing attempt amounts here + let topic = Topic::DkgConfirmation { + attempt: std::u32::MAX, + round: SigningProtocolRound::Preprocess, + }; - #[test] - fn different_type_stored_in_preceding_topic_passes_existence_check() { - let (set, validators, total_weight, share_topic, preprocess_topic, validator) = setup(); - let mut db = MemDb::new(); + (set, validators, validator_weight, total_weight, topic) + } - { - let mut txn = db.txn(); + #[test] + fn accumulates_normally_despite_overflow() { + let (set, validators, _validator_weight, total_weight, topic) = setup(); + let mut db = MemDb::new(); + let block_number = random_block_number(); - // Recognize the share topic so the recognition check doesn't slash - TributaryDb::recognize_topic(&mut txn, set, share_topic); + { + let mut txn = db.txn(); - // Store preceding preprocess data ([u8; 64]) - Accumulated::<[u8; 64]>::set(&mut txn, set, preprocess_topic, validator, &[1u8; 64]); + // DkgConfirmation with attempt = std::u32::MAX requires recognition + TributaryDb::recognize_topic(&mut txn, set, topic); + + // Accumulate from all 3 validators to cross threshold + let result = accumulate_to_threshold( + &mut txn, + set, + &validators, + total_weight, + block_number, + topic, + Some(|i: usize, result: &DataSet<[u8; 32]>| { + if i < 2 { + assert!(matches!(result, DataSet::None)); + } else { + // Third validator crosses the threshold + match result { + DataSet::Participating(data_set) => assert_eq!(data_set.len(), 3), + DataSet::None => panic!("expected Participating after crossing threshold"), + } + } + }), + ); + assert!(matches!(result, DataSet::Participating(_))); + + // reattempt_topic() wraps attempt std::u32::MAX to 0, so blocks_till_reattempt = 0. + // A reattempt is queued at block_number itself. + assert!(Reattempt::get(&txn, set, block_number).is_some()); + // But not at any subsequent block + for offset in 1 ..= 3 { + assert!(Reattempt::get(&txn, set, block_number.wrapping_add(offset)).is_none()); + } - // Accumulate a share ([u8; 32]) - // The preceding check should find the key despite the type mismatch and NOT slash. - let result = TributaryDb::accumulate::<[u8; 32]>( - &mut txn, - set, - &validators, - total_weight, - OsRng.next_u64(), - share_topic, - validator, - 1, - &[2u8; 32], - ); - txn.commit(); + txn.commit(); + } - // Below threshold (1 of 3) so result is None but data is stored - assert!(matches!(result, DataSet::None)); - } + for (i, v) in validators.iter().enumerate() { + assert!(!TributaryDb::is_fatally_slashed(&db, set, *v)); + assert_eq!(Accumulated::<[u8; 32]>::get(&db, set, topic, *v), Some([i as u8; 32])); + } - assert_eq!(TributaryDb::is_fatally_slashed(&db, set, validator), false); - assert!(Accumulated::<[u8; 32]>::get(&db, set, share_topic, validator).is_some()); - } + assert_eq!(AccumulatedWeight::get(&db, set, topic), Some(3)); + } - #[test] - fn same_type_stored_in_preceding_topic_still_works() { - let (set, validators, total_weight, _share_topic, _preprocess_topic, validator) = setup(); + /// When attempt 0 has already accumulated data, accumulating for attempt std::u32::MAX should be + /// NOP'd because `next_attempt_topic(std::u32::MAX)` wraps to attempt 0, which already exists. + #[test] + fn attempt_max_nopd_when_attempt_zero_exists() { + let (set, validators, validator_weight, total_weight, topic_max) = setup(); + let topic_0 = topic_max.next_attempt_topic().unwrap(); - // Sign Share has a Sign Preprocess preceding topic, both use Vec> as D - let share_topic = Topic::Sign { - id: VariantSignId::Transaction([42; 32]), - attempt: 0, - round: SigningProtocolRound::Share, - }; - let preprocess_topic = Topic::Sign { - id: VariantSignId::Transaction([42; 32]), - attempt: 0, - round: SigningProtocolRound::Preprocess, - }; - assert_eq!(share_topic.preceding_topic(), Some(preprocess_topic)); + assert_eq!( + topic_0, + Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Preprocess }, + ); - let mut db = MemDb::new(); + let mut db = MemDb::new(); - { - let mut txn = db.txn(); + // First: accumulate for attempt 0 (below threshold, just one validator) + { + let mut txn = db.txn(); + TributaryDb::recognize_topic(&mut txn, set, topic_0); + let result = TributaryDb::accumulate::<[u8; 32]>( + &mut txn, + set, + &validators, + total_weight, + random_block_number(), + topic_0, + validators[0], + validator_weight, + &random_data_u32(), + ); + assert!(matches!(result, DataSet::None)); + txn.commit(); + } - // Recognize both topics - TributaryDb::recognize_topic(&mut txn, set, preprocess_topic); - TributaryDb::recognize_topic(&mut txn, set, share_topic); + // Attempt 0 has accumulated weight + assert_eq!(AccumulatedWeight::get(&db, set, topic_0), Some(validator_weight)); - // Store preceding data with the same type as share will use - let preprocess_data: Vec> = vec![vec![1, 2, 3]]; - Accumulated::set(&mut txn, set, preprocess_topic, validator, &preprocess_data); + // Now try to accumulate for attempt std::u32::MAX + { + let mut txn = db.txn(); + TributaryDb::recognize_topic(&mut txn, set, topic_max); + let result = TributaryDb::accumulate::<[u8; 32]>( + &mut txn, + set, + &validators, + total_weight, + random_block_number(), + topic_max, + validators[1], + validator_weight, + &random_data_u32(), + ); + // NOP'd: next_attempt_topic(std::u32::MAX) = attempt 0, which already has weight + assert!(matches!(result, DataSet::None)); + txn.commit(); + } - let share_data: Vec> = vec![vec![4, 5, 6]]; - let result = TributaryDb::accumulate::>>( - &mut txn, - set, - &validators, - total_weight, - OsRng.next_u64(), - share_topic, - validator, - 1, - &share_data, - ); - txn.commit(); + // Attempt std::u32::MAX should have no accumulated data (it was NOP'd) + assert!(Accumulated::<[u8; 32]>::get(&db, set, topic_max, validators[1]).is_none()); + // Weight for std::u32::MAX stays at initial recognized value (0) + assert_eq!(AccumulatedWeight::get(&db, set, topic_max), Some(0)); + } - assert!(matches!(result, DataSet::None)); - assert_eq!( - Accumulated::>>::get(&db, set, share_topic, validator), - Some(share_data) - ); - } + #[test] + fn attempt_max_proceeds() { + let (set, validators, validator_weight, total_weight, topic_max) = setup(); + let topic_0 = topic_max.next_attempt_topic().unwrap(); - assert!(!TributaryDb::is_fatally_slashed(&db, set, validator)); - } -} + let mut db = MemDb::new(); -mod fuzz { - use super::*; - use proptest::prelude::*; - - /// Verify all DB invariants after a single `TributaryDb::accumulate` call. - /// - /// Independently computes the expected DB state by tracing the code paths in `accumulate` - /// based on the inputs and pre-state, then asserts the actual DB matches. - #[expect(clippy::too_many_arguments)] - fn verify_accumulate_invariants( - db: &MemDb, - set: ExternalValidatorSet, - total_weight: u16, - block_number: u64, - topic: Topic, - validator: SeraiAddress, - validator_weight: u16, - data: &Vec, - pre_weight: Option, - pre_slashed: bool, - has_preceding_accumulated: bool, - has_next_topic_weight: bool, - validator_in_list: bool, - result: &DataSet>, - ) { - let required = required_participation(total_weight); - let post_slashed = TributaryDb::is_fatally_slashed(db, set, validator); - let post_weight = AccumulatedWeight::get(db, set, topic); - - // Branch 1: Slash for participating in unrecognized topic requiring recognition. - if topic.requires_recognition() && pre_weight.is_none() { - assert!(post_slashed, "should be fatally slashed for unrecognized topic"); - assert!(matches!(result, DataSet::None)); - assert_eq!(post_weight, None, "weight should remain None after recognition slash"); - assert!( - Accumulated::>::get(db, set, topic, validator).is_none(), - "no data should be stored after recognition slash" - ); - return; - } + // First: accumulate for attempt std::u32::MAX (below threshold) + { + let mut txn = db.txn(); + TributaryDb::recognize_topic(&mut txn, set, topic_max); + let result = TributaryDb::accumulate::<[u8; 32]>( + &mut txn, + set, + &validators, + total_weight, + random_block_number(), + topic_max, + validators[0], + validator_weight, + &random_data_u32(), + ); + assert!(matches!(result, DataSet::None)); + txn.commit(); + } - let weight_before = pre_weight.unwrap_or(0); + assert_eq!(AccumulatedWeight::get(&db, set, topic_max), Some(validator_weight)); - // Branch 2: Slash for participating without completing the preceding topic. - if topic.preceding_topic().is_some() && !has_preceding_accumulated { - assert!(post_slashed, "should be fatally slashed for missing preceding participation"); - assert!(matches!(result, DataSet::None)); - assert_eq!(post_weight, pre_weight, "weight unchanged after preceding slash"); - return; - } + let data = random_data_u32(); - // Branch 3: required_participation overflows (total_weight > 32767). - let Some(required) = required else { - assert!(matches!(result, DataSet::None)); - assert_eq!(post_weight, pre_weight, "weight unchanged when required_participation overflows"); - if !pre_slashed { - assert!(!post_slashed, "should not be slashed on overflow NOP"); - } - return; - }; + // Now accumulate for attempt 0 + { + let mut txn = db.txn(); + TributaryDb::recognize_topic(&mut txn, set, topic_0); + let result = TributaryDb::accumulate::<[u8; 32]>( + &mut txn, + set, + &validators, + total_weight, + random_block_number(), + topic_0, + validators[1], + validator_weight, + &data, + ); + // Proceeds: next_attempt_topic(0) = attempt 1, which has no weight + assert!(matches!(result, DataSet::None)); + txn.commit(); + } - // Branch 4: Already accumulated past the threshold - NOP. - if weight_before >= required { - assert!(matches!(result, DataSet::None)); - assert_eq!(post_weight, pre_weight, "weight unchanged when past threshold"); - if !pre_slashed { - assert!(!post_slashed, "should not be slashed on threshold NOP"); + // Attempt 0 accumulated successfully + assert_eq!(Accumulated::<[u8; 32]>::get(&db, set, topic_0, validators[1]), Some(data)); + assert_eq!(AccumulatedWeight::get(&db, set, topic_0), Some(validator_weight)); } - return; } - // Branch 5: Old attempt - the next attempt's topic already has weight. - // Note: pre_weight may be None (topic not yet recognized) which is preserved. - let next_attempt_superseded = has_next_topic_weight && topic.next_attempt_topic().is_some(); - if next_attempt_superseded { - assert!(matches!(result, DataSet::None)); - assert_eq!(post_weight, pre_weight, "weight unchanged for superseded attempt"); - if !pre_slashed { - assert!(!post_slashed, "should not be slashed on superseded NOP"); - } - return; - } + mod fuzz { + use proptest::prelude::*; + use super::*; + + /// Verify all DB invariants after a single `TributaryDb::accumulate` call. + /// + /// Independently computes the expected DB state by tracing the code paths in `accumulate` + /// based on the inputs and pre-state, then asserts the actual DB matches. + #[expect(clippy::too_many_arguments)] + fn verify_accumulate_invariants( + db: &MemDb, + set: ExternalValidatorSet, + total_weight: u16, + block_number: u64, + topic: Topic, + validator: SeraiAddress, + validator_weight: u16, + data: &Vec, + pre_weight: Option, + pre_slashed: bool, + has_preceding_accumulated: bool, + has_next_topic_weight: bool, + validator_in_list: bool, + result: &DataSet>, + ) { + let required = required_participation(total_weight); + let post_slashed = TributaryDb::is_fatally_slashed(db, set, validator); + let post_weight = AccumulatedWeight::get(db, set, topic); + + // Branch 1: Slash for participating in unrecognized topic requiring recognition. + if topic.requires_recognition() && pre_weight.is_none() { + assert!(post_slashed, "should be fatally slashed for unrecognized topic"); + assert!(matches!(result, DataSet::None)); + assert_eq!(post_weight, None, "weight should remain None after recognition slash"); + assert!( + Accumulated::>::get(db, set, topic, validator).is_none(), + "no data should be stored after recognition slash" + ); + return; + } - // Accumulation happened (Branches 6 & 7) - let new_weight = weight_before + validator_weight; - assert_eq!(post_weight, Some(new_weight), "weight should reflect accumulation"); + let weight_before = pre_weight.unwrap_or(0); - if !pre_slashed { - assert!(!post_slashed, "should not be slashed after valid accumulation"); - } + // Branch 2: Slash for participating without completing the preceding topic. + if topic.preceding_topic().is_some() && !has_preceding_accumulated { + assert!(post_slashed, "should be fatally slashed for missing preceding participation"); + assert!(matches!(result, DataSet::None)); + assert_eq!(post_weight, pre_weight, "weight unchanged after preceding slash"); + return; + } - if new_weight >= required { - // Branch 7: Threshold crossed. - - // 7a: Reattempt should be queued if topic is reattemptable. - if let Some((reattempt_attempt, reattempt_topic)) = topic.reattempt_topic() { - #[cfg(not(feature = "longer-reattempts"))] - const BASE_REATTEMPT_DELAY: u32 = - (5u32 * 60 * 1000).div_ceil(tributary_sdk::tendermint::TARGET_BLOCK_TIME); - #[cfg(feature = "longer-reattempts")] - const BASE_REATTEMPT_DELAY: u32 = - (10u32 * 60 * 1000).div_ceil(tributary_sdk::tendermint::TARGET_BLOCK_TIME); - - let blocks_till = u64::from(reattempt_attempt * BASE_REATTEMPT_DELAY); - let recognize_at = block_number + blocks_till; - - let queued = Reattempt::get(db, set, recognize_at); - assert!(queued.is_some(), "reattempt should be queued at block {recognize_at}"); - assert!( - queued.unwrap().contains(&reattempt_topic), - "reattempt queue should contain {reattempt_topic:?}" - ); - } + // Branch 3: required_participation overflows. + let Some(required) = required else { + assert!(matches!(result, DataSet::None)); + assert_eq!( + post_weight, pre_weight, + "weight unchanged when required_participation overflows" + ); + if !pre_slashed { + assert!(!post_slashed, "should not be slashed on overflow NOP"); + } + return; + }; + + // Branch 4: Already accumulated past the threshold - NOP. + if weight_before >= required { + assert!(matches!(result, DataSet::None)); + assert_eq!(post_weight, pre_weight, "weight unchanged when past threshold"); + if !pre_slashed { + assert!(!post_slashed, "should not be slashed on threshold NOP"); + } + return; + } - // 7b: Succeeding topic should be recognized (weight set to 0). - if let Some(succeeding) = topic.succeeding_topic() { - assert_eq!( - AccumulatedWeight::get(db, set, succeeding), - Some(0), - "succeeding topic should be recognized with weight=0" - ); - } + // Branch 5: Old attempt - the next attempt's topic already has weight. + // Note: pre_weight may be None (topic not yet recognized) which is preserved. + let next_attempt_superseded = has_next_topic_weight && topic.next_attempt_topic().is_some(); + if next_attempt_superseded { + assert!(matches!(result, DataSet::None)); + assert_eq!(post_weight, pre_weight, "weight unchanged for superseded attempt"); + if !pre_slashed { + assert!(!post_slashed, "should not be slashed on superseded NOP"); + } + return; + } - // 7c: Accumulated data cleanup depends on whether a reattempt exists. - // The cleanup loop only iterates the `validators` slice, so data for a validator - // not in the list is never deleted regardless of reattempt status. - let has_reattempt = topic.reattempt_topic().is_some(); - if has_reattempt || !validator_in_list { - assert_eq!( - Accumulated::>::get(db, set, topic, validator), - Some(data.clone()), - "data should be preserved (reattempt={has_reattempt}, in_list={validator_in_list})" - ); - } else { - assert!( - Accumulated::>::get(db, set, topic, validator).is_none(), - "data should be cleaned up when no reattempt and validator in list" - ); - } + // Accumulation happened (Branches 6 & 7) + let new_weight = weight_before + validator_weight; + assert_eq!(post_weight, Some(new_weight), "weight should reflect accumulation"); + + if !pre_slashed { + assert!(!post_slashed, "should not be slashed after valid accumulation"); + } - // 7d: Result depends on whether the validator was in the collection list. - // The collection loop only gathers data from the `validators` slice. - // `participated` = data_set.contains_key(&validator), which is false when - // the validator is not in the slice. - if validator_in_list { - match result { - DataSet::Participating(data_set) => { - assert!(data_set.contains_key(&validator), "validator should be in result data set"); - assert_eq!(data_set.get(&validator).unwrap(), data, "result data should match input"); + if new_weight >= required { + // Branch 7: Threshold crossed. + + // 7a: Reattempt should be queued if topic is reattemptable. + if let Some((reattempt_attempt, reattempt_topic)) = topic.reattempt_topic() { + #[cfg(not(feature = "longer-reattempts"))] + const BASE_REATTEMPT_DELAY: u32 = + (5u32 * 60 * 1000).div_ceil(tributary_sdk::tendermint::TARGET_BLOCK_TIME); + #[cfg(feature = "longer-reattempts")] + const BASE_REATTEMPT_DELAY: u32 = + (10u32 * 60 * 1000).div_ceil(tributary_sdk::tendermint::TARGET_BLOCK_TIME); + + let blocks_till = u64::from(reattempt_attempt * BASE_REATTEMPT_DELAY); + let recognize_at = block_number + blocks_till; + + let queued = Reattempt::get(db, set, recognize_at); + assert!(queued.is_some(), "reattempt should be queued at block {recognize_at}"); + assert!( + queued.unwrap().contains(&reattempt_topic), + "reattempt queue should contain {reattempt_topic:?}" + ); } - DataSet::None => { - panic!("result should be Participating when threshold crossed by listed validator"); + + // 7b: Succeeding topic should be recognized (weight set to 0). + if let Some(succeeding) = topic.succeeding_topic() { + assert_eq!( + AccumulatedWeight::get(db, set, succeeding), + Some(0), + "succeeding topic should be recognized with weight=0" + ); } - } - } else { - match topic.participating() { - Participating::Participated => { - // Validator accumulated but isn't in the list, so participated=false - assert!(matches!(result, DataSet::None), "Participated + not in list => None"); + + // 7c: Accumulated data cleanup depends on whether a reattempt exists. + // The cleanup loop only iterates the `validators` slice, so data for a validator + // not in the list is never deleted regardless of reattempt status. + let has_reattempt = topic.reattempt_topic().is_some(); + if has_reattempt || !validator_in_list { + assert_eq!( + Accumulated::>::get(db, set, topic, validator), + Some(data.clone()), + "data should be preserved (reattempt={has_reattempt}, in_list={validator_in_list})" + ); + } else { + assert!( + Accumulated::>::get(db, set, topic, validator).is_none(), + "data should be cleaned up when no reattempt and validator in list" + ); } - Participating::Everyone => { - // Everyone always returns Participating, but the validator's data won't - // be in the set (it was only collected from the validators slice) + + // 7d: Result depends on whether the validator was in the collection list. + // The collection loop only gathers data from the `validators` slice. + // `participated` = data_set.contains_key(&validator), which is false when + // the validator is not in the slice. + if validator_in_list { match result { DataSet::Participating(data_set) => { assert!( - !data_set.contains_key(&validator), - "validator not in list should not appear in data set" + data_set.contains_key(&validator), + "validator should be in result data set" + ); + assert_eq!( + data_set.get(&validator).unwrap(), + data, + "result data should match input" ); } DataSet::None => { - panic!("Everyone topics always return Participating"); + panic!("result should be Participating when threshold crossed by listed validator"); + } + } + } else { + match topic.participating() { + Participating::Participated => { + // Validator accumulated but isn't in the list, so participated=false + assert!(matches!(result, DataSet::None), "Participated + not in list => None"); + } + Participating::Everyone => { + // Everyone always returns Participating, but the validator's data won't + // be in the set (it was only collected from the validators slice) + match result { + DataSet::Participating(data_set) => { + assert!( + !data_set.contains_key(&validator), + "validator not in list should not appear in data set" + ); + } + DataSet::None => { + panic!("Everyone topics always return Participating"); + } + } } } } + } else { + // Branch 6: Below threshold - data stored, result is None. + assert!(matches!(result, DataSet::None), "result should be None when below threshold"); + assert_eq!( + Accumulated::>::get(db, set, topic, validator), + Some(data.clone()), + "accumulated data should be stored" + ); } } - } else { - // Branch 6: Below threshold - data stored, result is None. - assert!(matches!(result, DataSet::None), "result should be None when below threshold"); - assert_eq!( - Accumulated::>::get(db, set, topic, validator), - Some(data.clone()), - "accumulated data should be stored" - ); - } - } - - proptest! { - #![proptest_config(ProptestConfig::with_cases(1000))] - - #[test] - fn fuzz_accumulate( - has_initial_weight in any::(), - initial_weight in 0u16..u16::MAX, - total_weight in 1u16..u16::MAX, - - has_next_topic_weight in any::(), - next_topic_initial_weight in 0u16..u16::MAX, - - has_preceding_topic_accumulated in any::(), - - topic_variant in 0u8..5, - attempt in 0u32..100, - round in 0u8..2, - cosign_block in any::(), - batch_id in any::<[u8; 32]>(), - validator_weight in 1u16..u16::MAX, - block_number in 1u64..u64::MAX, - data in prop::collection::vec(any::(), 0..64), - - num_validators in 1u16..u16::MAX, - cur_validator in 0u16..u16::MAX, - validator_in_list in any::(), - ) { - let round = - if round == 0 { SigningProtocolRound::Preprocess } else { SigningProtocolRound::Share }; - - let topic = match topic_variant % 5 { - 0 => Topic::RemoveParticipant { participant: random_serai_address(&mut OsRng) }, - 1 => Topic::DkgConfirmation { attempt: attempt % 100, round }, - 2 => Topic::SlashReport, - 3 => Topic::Sign { - id: VariantSignId::Cosign(cosign_block), - attempt: attempt % 100, - round, - }, - _ => { - Topic::Sign { id: VariantSignId::Batch(batch_id), attempt: attempt % 100, round } - } - }; - let mut db = MemDb::new(); - let set = default_test_validator_set(); + proptest! { + #![proptest_config(ProptestConfig::with_cases(1000))] + + #[test] + fn fuzz_accumulate( + has_initial_weight in any::(), + initial_weight in 0u16..u16::MAX, + total_weight in 1u16..u16::MAX, + + has_next_topic_weight in any::(), + next_topic_initial_weight in 0u16..u16::MAX, + + has_preceding_topic_accumulated in any::(), + + topic_variant in 0u8..5, + attempt in 0u32..100, + round in 0u8..2, + cosign_block in any::(), + batch_id in any::<[u8; 32]>(), + validator_weight in 1u16..u16::MAX, + block_number in 1u64..u64::MAX, + data in prop::collection::vec(any::(), 0..64), + + num_validators in 1u16..u16::MAX, + cur_validator in 0u16..u16::MAX, + validator_in_list in any::(), + ) { + let round = + if round == 0 { SigningProtocolRound::Preprocess } else { SigningProtocolRound::Share }; + + let topic = match topic_variant % 5 { + 0 => Topic::RemoveParticipant { participant: random_serai_address(&mut OsRng) }, + 1 => Topic::DkgConfirmation { attempt: attempt % 100, round }, + 2 => Topic::SlashReport, + 3 => Topic::Sign { + id: VariantSignId::Cosign(cosign_block), + attempt: attempt % 100, + round, + }, + _ => { + Topic::Sign { id: VariantSignId::Batch(batch_id), attempt: attempt % 100, round } + } + }; - let validators: Vec = - (0 .. num_validators).map(|_i| random_serai_address(&mut OsRng)).collect(); + let mut db = MemDb::new(); + let set = default_test_validator_set(); - let validator_weight = validator_weight.min(total_weight).max(1); + let validators: Vec = + (0 .. num_validators).map(|_i| random_serai_address(&mut OsRng)).collect(); - let mut txn = db.txn(); + let validator_weight = validator_weight.min(total_weight).max(1); - if has_initial_weight { - AccumulatedWeight::set(&mut txn, set, topic, &initial_weight); - } + let mut txn = db.txn(); - if has_next_topic_weight { - if let Some(next_attempt_topic) = topic.next_attempt_topic() { - AccumulatedWeight::set(&mut txn, set, next_attempt_topic, &next_topic_initial_weight); + if has_initial_weight { + AccumulatedWeight::set(&mut txn, set, topic, &initial_weight); } - } - // When validator_in_list is false, the accumulating validator is an outsider - // not present in the validators slice. This exercises the `participated = false` - // branch when the threshold is crossed. - let cur_validator = (cur_validator as usize) % validators.len(); - let validator = if validator_in_list { - validators[cur_validator] - } else { - random_serai_address(&mut OsRng) - }; - - if has_preceding_topic_accumulated { - if let Some(preceding_topic) = topic.preceding_topic() { - Accumulated::set(&mut txn, set, preceding_topic, validator, &data) + if has_next_topic_weight { + if let Some(next_attempt_topic) = topic.next_attempt_topic() { + AccumulatedWeight::set(&mut txn, set, next_attempt_topic, &next_topic_initial_weight); + } } - } - let pre_weight = AccumulatedWeight::get(&txn, set, topic); - let pre_slashed = TributaryDb::is_fatally_slashed(&txn, set, validator); - - let result = TributaryDb::accumulate::>( - &mut txn, - set, - &validators, - total_weight, - block_number, - topic, - validator, - validator_weight, - &data, - ); + // When validator_in_list is false, the accumulating validator is an outsider + // not present in the validators slice. This exercises the `participated = false` + // branch when the threshold is crossed. + let cur_validator = (cur_validator as usize) % validators.len(); + let validator = if validator_in_list { + validators[cur_validator] + } else { + random_serai_address(&mut OsRng) + }; + + if has_preceding_topic_accumulated { + if let Some(preceding_topic) = topic.preceding_topic() { + Accumulated::set(&mut txn, set, preceding_topic, validator, &data) + } + } - txn.commit(); + let pre_weight = AccumulatedWeight::get(&txn, set, topic); + let pre_slashed = TributaryDb::is_fatally_slashed(&txn, set, validator); + + let result = TributaryDb::accumulate::>( + &mut txn, + set, + &validators, + total_weight, + block_number, + topic, + validator, + validator_weight, + &data, + ); - verify_accumulate_invariants( - &db, - set, - total_weight, - block_number, - topic, - validator, - validator_weight, - &data, - pre_weight, - pre_slashed, - has_preceding_topic_accumulated, - has_next_topic_weight, - validator_in_list, - &result, - ); + txn.commit(); + + verify_accumulate_invariants( + &db, + set, + total_weight, + block_number, + topic, + validator, + validator_weight, + &data, + pre_weight, + pre_slashed, + has_preceding_topic_accumulated, + has_next_topic_weight, + validator_in_list, + &result, + ); + } } + } } } diff --git a/coordinator/tributary/src/tests/mod.rs b/coordinator/tributary/src/tests/mod.rs index 782ed3a4f..4d4d0f019 100644 --- a/coordinator/tributary/src/tests/mod.rs +++ b/coordinator/tributary/src/tests/mod.rs @@ -1,7 +1,10 @@ use ciphersuite::group::GroupEncoding; use ciphersuite::WrappedGroup; use dalek_ff_group::{Ristretto, RistrettoPoint}; +use messages::sign::VariantSignId; + use rand::{CryptoRng, RngCore}; +use rand_core::OsRng; use serai_primitives::{ address::SeraiAddress, @@ -9,12 +12,21 @@ use serai_primitives::{ validator_sets::{ExternalValidatorSet, Session}, }; +use tributary_sdk::P2p; use zeroize::Zeroizing; pub mod transaction; pub mod db; pub mod scan_block; +#[derive(Clone)] +struct MockP2p; +impl P2p for MockP2p { + fn broadcast(&self, _: [u8; 32], _: Vec) -> impl Send + core::future::Future { + async {} + } +} + pub(crate) fn default_test_validator_set() -> ExternalValidatorSet { // The external validator set does not alter or affect the behavior of the functions being tested // this can be used just as a default value any time @@ -37,3 +49,13 @@ pub(crate) fn random_serai_address_and_key( let key = get_key_point(random_key(rng)); (key, SeraiAddress(key.to_bytes())) } + +pub(crate) fn random_transaction_id() -> VariantSignId { + let mut txid = [0u8; 32]; + OsRng.fill_bytes(&mut txid); + VariantSignId::Transaction(txid) +} + +pub(crate) fn random_block_number() -> u64 { + OsRng.next_u64() +} diff --git a/coordinator/tributary/src/tests/transaction.rs b/coordinator/tributary/src/tests/transaction.rs index 73f3221b9..0b38b7eb2 100644 --- a/coordinator/tributary/src/tests/transaction.rs +++ b/coordinator/tributary/src/tests/transaction.rs @@ -6,7 +6,7 @@ use schnorr::SchnorrSignature; use ciphersuite::{group::Group as _, *}; use dalek_ff_group::Ristretto; -use serai_primitives::{validator_sets::KeyShares, address::SeraiAddress}; +use serai_primitives::validator_sets::KeyShares; use serai_substrate_tests::{random_serai_address, random_block_hash}; use messages::sign::VariantSignId; @@ -149,9 +149,7 @@ mod signed { } } - let mut writer = FailingWriter; - - let result = random_signed(&mut OsRng).serialize(&mut writer); + let result = random_signed(&mut OsRng).serialize(&mut FailingWriter); assert!(result.is_err()); assert_eq!(result.unwrap_err().kind(), io::ErrorKind::Other); } @@ -165,13 +163,12 @@ mod signed { } } - let mut failing_reader = FailingReader; - let result = Signed::deserialize_reader(&mut failing_reader); + let result = Signed::deserialize_reader(&mut FailingReader); assert!(result.is_err()); assert_eq!(result.unwrap_err().kind(), io::ErrorKind::UnexpectedEof); } - // Errors with incomplete data + // Errors with incomplete data (signer read_G fails) { let serialized = borsh::to_vec(&random_signed(&mut OsRng)).unwrap(); let truncated = &serialized[.. 5]; @@ -179,6 +176,15 @@ mod signed { let result = Signed::deserialize_reader(&mut cursor); assert!(result.is_err()); } + + // Errors when signer is valid but signature data is missing (SchnorrSignature::read fails) + { + let serialized = borsh::to_vec(&random_signed(&mut OsRng)).unwrap(); + let signer_only = &serialized[.. 32]; + let mut cursor = std::io::Cursor::new(signer_only); + let result = Signed::deserialize_reader(&mut cursor); + assert!(result.is_err()); + } } } @@ -225,22 +231,22 @@ mod kind { let nonce = signed.nonce; match tx { - Transaction::RemoveParticipant { participant: _, signed: _ } => { + Transaction::RemoveParticipant { .. } => { assert_eq!(nonce, SigningProtocolRound::Preprocess.nonce()) } - Transaction::DkgParticipation { participation: _, signed: _ } => { + Transaction::DkgParticipation { .. } => { assert_eq!(nonce, SigningProtocolRound::Preprocess.nonce()) } - Transaction::DkgConfirmationPreprocess { attempt: _, preprocess: _, signed: _ } => { + Transaction::DkgConfirmationPreprocess { .. } => { assert_eq!(nonce, SigningProtocolRound::Share.nonce()) } - Transaction::DkgConfirmationShare { attempt: _, share: _, signed: _ } => { + Transaction::DkgConfirmationShare { .. } => { assert_eq!(nonce, SigningProtocolRound::Share.nonce()) } - Transaction::Sign { id: _, attempt: _, round, data: _, signed: _ } => { + Transaction::Sign { round, .. } => { assert_eq!(nonce, round.nonce()) } - Transaction::SlashReport { slash_points: _, signed: _ } => { + Transaction::SlashReport { .. } => { assert_eq!(nonce, SigningProtocolRound::Preprocess.nonce()) } _ => panic!("Expected Signed kind for {tx:?}"), @@ -369,52 +375,45 @@ fn tx_verify() { #[test] fn topic_returns_correct_mapping() { - let participant = SeraiAddress([1; 32]); + let participant = random_serai_address(&mut OsRng); - // RemoveParticipant → Some(RemoveParticipant) let tx = Transaction::RemoveParticipant { participant, signed: Signed::default() }; assert_eq!(tx.topic(), Some(Topic::RemoveParticipant { participant })); - // DkgParticipation → None let tx = Transaction::DkgParticipation { participation: vec![], signed: Signed::default() }; assert_eq!(tx.topic(), None); - // DkgConfirmationPreprocess → DkgConfirmation with Preprocess round let tx = Transaction::DkgConfirmationPreprocess { - attempt: 5, + attempt: 0, preprocess: [0; 64], signed: Signed::default(), }; assert_eq!( tx.topic(), - Some(Topic::DkgConfirmation { attempt: 5, round: SigningProtocolRound::Preprocess }) + Some(Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Preprocess }) ); - // DkgConfirmationShare → DkgConfirmation with Share round let tx = - Transaction::DkgConfirmationShare { attempt: 3, share: [0; 32], signed: Signed::default() }; + Transaction::DkgConfirmationShare { attempt: 0, share: [0; 32], signed: Signed::default() }; assert_eq!( tx.topic(), - Some(Topic::DkgConfirmation { attempt: 3, round: SigningProtocolRound::Share }) + Some(Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Share }) ); - // Provided transactions → None for tx in all_provided_transactions() { assert_eq!(tx.topic(), None, "Provided tx should have no topic: {tx:?}"); } - // Sign → Topic::Sign preserving all fields let id = VariantSignId::Batch([9; 32]); let tx = Transaction::Sign { id, - attempt: 2, + attempt: 0, round: SigningProtocolRound::Share, data: vec![], signed: Signed::default(), }; - assert_eq!(tx.topic(), Some(Topic::Sign { id, attempt: 2, round: SigningProtocolRound::Share })); + assert_eq!(tx.topic(), Some(Topic::Sign { id, attempt: 0, round: SigningProtocolRound::Share })); - // SlashReport → Topic::SlashReport let tx = Transaction::SlashReport { slash_points: vec![], signed: Signed::default() }; assert_eq!(tx.topic(), Some(Topic::SlashReport)); } @@ -445,7 +444,7 @@ mod sign { // Wrong genesis fails verification { let mut tx = Transaction::RemoveParticipant { - participant: SeraiAddress([1; 32]), + participant: random_serai_address(&mut OsRng), signed: Signed::default(), }; tx.sign(&mut OsRng, new_genesis(), &key); From a94f298bba8a6db8f2e6ac1e57b5ba34e92bef6a Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Mon, 9 Mar 2026 12:20:27 -0300 Subject: [PATCH 32/71] misc --- coordinator/cosign/src/tests/intend.rs | 49 ++++++++++---------------- 1 file changed, 19 insertions(+), 30 deletions(-) diff --git a/coordinator/cosign/src/tests/intend.rs b/coordinator/cosign/src/tests/intend.rs index 07bc3286b..a6a709dd9 100644 --- a/coordinator/cosign/src/tests/intend.rs +++ b/coordinator/cosign/src/tests/intend.rs @@ -332,8 +332,6 @@ mod errors { /// Random event, state, and block generator. struct EventFuzzer { - /// Monotonic counter hashed with blake2 for deterministic pseudo-random bytes. - counter: u64, /// Seed bytes. seed: [u8; 32], /// Available validator addresses. @@ -367,7 +365,6 @@ impl EventFuzzer { let networks: Vec = NetworkId::all().collect(); Self { - counter: 0, seed, validators, networks, @@ -377,26 +374,17 @@ impl EventFuzzer { } } - /// Generate a pseudo-random `u64` by hashing counter + seed with blake2. - fn next_u64(&mut self) -> u64 { - use blake2::{Blake2b256, Digest as _}; - let hash = - Blake2b256::new().chain_update(self.seed).chain_update(self.counter.to_le_bytes()).finalize(); - self.counter += 1; - u64::from_le_bytes(hash[0 .. 8].try_into().unwrap()) - } - /// Pick a random element from a slice. fn pick<'a, T>(&mut self, slice: &'a [T]) -> &'a T { - let idx = self.next_u64() % u64::try_from(slice.len()).unwrap(); - &slice[usize::try_from(idx).unwrap()] + let i = OsRng.next_u64() % u64::try_from(slice.len()).unwrap(); + &slice[usize::try_from(i).unwrap()] } /// Generate a random allocation event. fn random_allocation(&mut self) -> Event { let validator = *self.pick(&self.validators.clone()); let network = *self.pick(&self.networks.clone()); - let amount = (self.next_u64() % 10000) + 1; // 1..=10000 + let amount = (OsRng.next_u64() % 10000) + 1; // 1..=10000 if let Ok(ext) = ExternalNetworkId::try_from(network) { *self.stakes.entry((ext, validator)).or_default() += amount; } @@ -406,9 +394,9 @@ impl EventFuzzer { /// Generate a random deallocation event. Returns `None` if no validator has stake. fn random_deallocation(&mut self) -> Option { // ~25% chance of generating a Serai deallocation (exercises the `continue` branch) - if self.next_u64() % 4 == 0 { + if OsRng.next_u64() % 4 == 0 { let validator = *self.pick(&self.validators.clone()); - let amount = (self.next_u64() % 100) + 1; + let amount = (OsRng.next_u64() % 100) + 1; return Some(deallocation_event(validator, NetworkId::Serai, amount)); } @@ -417,9 +405,9 @@ impl EventFuzzer { if candidates.is_empty() { return None; } - let idx = self.next_u64() % u64::try_from(candidates.len()).unwrap(); - let ((network, validator), current_stake) = candidates[usize::try_from(idx).unwrap()]; - let amount = (self.next_u64() % current_stake) + 1; // 1..=current_stake + let i = OsRng.next_u64() % u64::try_from(candidates.len()).unwrap(); + let ((network, validator), current_stake) = candidates[usize::try_from(i).unwrap()]; + let amount = (OsRng.next_u64() % current_stake) + 1; // 1..=current_stake *self.stakes.entry((network, validator)).or_default() -= amount; Some(deallocation_event(validator, NetworkId::External(network), amount)) } @@ -441,14 +429,15 @@ impl EventFuzzer { // Pick 1..=min(3, validators.len()) random validators for this set let max_count = self.validators.len().min(3); - let count = usize::try_from((self.next_u64() % u64::try_from(max_count).unwrap()) + 1).unwrap(); + let count = + usize::try_from((OsRng.next_u64() % u64::try_from(max_count).unwrap()) + 1).unwrap(); // Shuffle-pick by swapping from a clone let mut pool = self.validators.clone(); let mut chosen = Vec::with_capacity(count); for _ in 0 .. count { - let idx = usize::try_from(self.next_u64() % u64::try_from(pool.len()).unwrap()).unwrap(); - chosen.push(pool.swap_remove(idx)); + let i = usize::try_from(OsRng.next_u64() % u64::try_from(pool.len()).unwrap()).unwrap(); + chosen.push(pool.swap_remove(i)); } self.pending_keys.insert(set, chosen.clone()); @@ -469,8 +458,8 @@ impl EventFuzzer { } let keys: Vec = self.pending_keys.keys().copied().collect(); - let idx = usize::try_from(self.next_u64() % u64::try_from(keys.len()).unwrap()).unwrap(); - let set = keys[idx]; + let i = usize::try_from(OsRng.next_u64() % u64::try_from(keys.len()).unwrap()).unwrap(); + let set = keys[i]; // Remove from pending — the task will Validators::take it self.pending_keys.remove(&set); @@ -478,7 +467,7 @@ impl EventFuzzer { *self.next_session.entry(set.network).or_insert(0) += 1; let mut public = Public([0u8; 32]); - public.0[0 .. 8].copy_from_slice(&self.next_u64().to_le_bytes()); + public.0[0 .. 8].copy_from_slice(&OsRng.next_u64().to_le_bytes()); let external_key = ExternalKey(vec![1u8].try_into().unwrap()); let key_pair = KeyPair(public, external_key); @@ -488,13 +477,13 @@ impl EventFuzzer { /// Generate a random BurnWithInstruction event. fn random_burn(&mut self) -> Event { let mut burn_address = SeraiAddress([0u8; 32]); - burn_address.0[0 .. 8].copy_from_slice(&self.next_u64().to_le_bytes()); + burn_address.0[0 .. 8].copy_from_slice(&OsRng.next_u64().to_le_bytes()); burn_with_instruction_event(burn_address) } /// Generate random events for a single block. fn generate_block_events(&mut self) -> Vec> { - let num_events = self.next_u64() % 8; // 0..=7 events per block + let num_events = OsRng.next_u64() % 8; // 0..=7 events per block if num_events == 0 { return vec![]; } @@ -506,7 +495,7 @@ impl EventFuzzer { let mut burn_count = 0u64; for _ in 0 .. num_events { - match self.next_u64() % 100 { + match OsRng.next_u64() % 100 { 0 ..= 35 => alloc_count += 1, 36 ..= 55 => dealloc_count += 1, 56 ..= 70 => set_decided_count += 1, @@ -549,7 +538,7 @@ impl EventFuzzer { // Shuffle the events to test order-independence for i in (1 .. events.len()).rev() { - let j = usize::try_from(self.next_u64() % u64::try_from(i + 1).unwrap()).unwrap(); + let j = usize::try_from(OsRng.next_u64() % u64::try_from(i + 1).unwrap()).unwrap(); events.swap(i, j); } From 1493c168f330bac28f6d08b22019033bcdb39e2f Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Fri, 13 Mar 2026 16:26:21 -0300 Subject: [PATCH 33/71] feat(coordinator/cosign): adding full-stack tests --- common/task/src/lib.rs | 5 +- coordinator/cosign/Cargo.toml | 3 + coordinator/cosign/src/delay.rs | 32 +++- coordinator/cosign/src/evaluator.rs | 54 +++++-- coordinator/cosign/src/intend.rs | 20 ++- coordinator/cosign/src/lib.rs | 16 +- coordinator/cosign/src/tests/cosigning.rs | 176 ++++++++++----------- coordinator/cosign/src/tests/delay.rs | 55 ++++++- coordinator/cosign/src/tests/evaluator.rs | 19 ++- coordinator/cosign/src/tests/full_stack.rs | 125 +++++++++++++++ coordinator/cosign/src/tests/intend.rs | 56 ++++--- coordinator/cosign/src/tests/mod.rs | 3 + coordinator/cosign/types/Cargo.toml | 6 +- coordinator/cosign/types/src/tests/mod.rs | 59 ++----- coordinator/src/main.rs | 6 +- coordinator/src/tributary.rs | 2 +- coordinator/substrate/src/canonical.rs | 2 +- coordinator/substrate/src/ephemeral.rs | 2 +- substrate/primitives/Cargo.toml | 1 + substrate/primitives/src/lib.rs | 4 + substrate/primitives/src/test_helpers.rs | 40 +++++ 21 files changed, 484 insertions(+), 202 deletions(-) create mode 100644 coordinator/cosign/src/tests/full_stack.rs create mode 100644 substrate/primitives/src/test_helpers.rs diff --git a/common/task/src/lib.rs b/common/task/src/lib.rs index ca002a8a4..9e44acac9 100644 --- a/common/task/src/lib.rs +++ b/common/task/src/lib.rs @@ -106,8 +106,9 @@ pub trait ContinuallyRan: Sized + Send { let mut current_sleep_before_next_task = default_sleep_before_next_task; let increase_sleep_before_next_task = |current_sleep_before_next_task: &mut u64| { let new_sleep = *current_sleep_before_next_task + default_sleep_before_next_task; - // Set a limit of sleeping for two minutes - *current_sleep_before_next_task = new_sleep.max(Self::MAX_DELAY_BETWEEN_ITERATIONS); + // Set a limit of sleeping **at most** two minutes + // use min to get the smallest value: either new_sleep, or 2 minutes. Never greater + *current_sleep_before_next_task = new_sleep.min(Self::MAX_DELAY_BETWEEN_ITERATIONS); }; loop { diff --git a/coordinator/cosign/Cargo.toml b/coordinator/cosign/Cargo.toml index 5d8a48fac..55fca6c96 100644 --- a/coordinator/cosign/Cargo.toml +++ b/coordinator/cosign/Cargo.toml @@ -40,4 +40,7 @@ serai-shim-rpc = { path = "../../tests/shim-rpc" } serai-test-task = { path = "../../tests/task" } hex = { version = "0.4", default-features = false } +schnorrkel = { version = "0.11", default-features = false, features = ["std"] } +rand = { version = "0.8", default-features = false } rand_core = { version = "0.6", default-features = false } +serai-primitives = { path = "../../substrate/primitives", features = ["test-helpers"] } diff --git a/coordinator/cosign/src/delay.rs b/coordinator/cosign/src/delay.rs index bed89ec5f..9c6ca655b 100644 --- a/coordinator/cosign/src/delay.rs +++ b/coordinator/cosign/src/delay.rs @@ -4,7 +4,7 @@ use std::time::{Duration, SystemTime}; use serai_db::*; use serai_task::{DoesNotError, ContinuallyRan}; -use crate::evaluator::CosignedBlocks; +use crate::evaluator::{CosignedBlocks, LatestEvaluatedBlock}; #[cfg(not(any(test)))] /// How often callers should broadcast the cosigns flagged for rebroadcasting. @@ -27,8 +27,8 @@ pub(crate) fn now_timestamp() -> Duration { create_db!( SubstrateCosignDelay { - // The latest cosigned block number. - LatestCosignedBlockNumber: () -> u64, + // The latest block number acknowledged by the delay task. + LatestAcknowledgedBlock: () -> u64, } ); @@ -52,13 +52,13 @@ impl ContinuallyRan for CosignDelayTask { break; }; - let latest_cosigned_block_number = LatestCosignedBlockNumber::get(&mut txn).unwrap_or(0); + let latest_acknowledged_block = LatestAcknowledgedBlock::get(&mut txn).unwrap_or(0); serai_log::debug!( - "beginning delay: block_number={block_number}, time_evaluated={time_evaluated}, latest_cosigned_block_number={latest_cosigned_block_number}", + "beginning delay: block_number={block_number}, time_evaluated={time_evaluated}, latest_acknowledged_block={latest_acknowledged_block}", ); - if block_number <= latest_cosigned_block_number { + if block_number <= latest_acknowledged_block { // If we've already acknowledged a later block, consume and skip (don't sleep). txn.commit(); continue; @@ -74,19 +74,35 @@ impl ContinuallyRan for CosignDelayTask { if time_valid_timestamp > now_timestamp { // Sleep until then let time_left = time_valid_timestamp - now_timestamp; + serai_log::debug!("beginning sleep: {time_left}s"); tokio::time::sleep(Duration::from_secs(time_left)).await; } let mut txn = self.db.txn(); // Consume block to continue CosignedBlocks::try_recv(&mut txn); - // Set the cosigned block - LatestCosignedBlockNumber::set(&mut txn, &block_number); + LatestAcknowledgedBlock::set(&mut txn, &block_number); txn.commit(); + serai_log::debug!("LatestAcknowledgedBlock={block_number}"); + made_progress = true; } + // Catch up to HasEvents::No blocks that don't go through CosignedBlocks + // they only advance LatestEvaluatedBlock. These blocks need no sleep delay. + // since no cosign means no need for equivocation prevention + if let Some(evaluated) = LatestEvaluatedBlock::get(&self.db) { + let acknowledged = LatestAcknowledgedBlock::get(&self.db).unwrap_or(0); + if evaluated > acknowledged { + let mut txn = self.db.txn(); + LatestAcknowledgedBlock::set(&mut txn, &evaluated); + txn.commit(); + serai_log::debug!("LatestAcknowledgedBlock={evaluated} (caught up to evaluator)"); + made_progress = true; + } + } + Ok(made_progress) } } diff --git a/coordinator/cosign/src/evaluator.rs b/coordinator/cosign/src/evaluator.rs index 659b69287..c01e8342f 100644 --- a/coordinator/cosign/src/evaluator.rs +++ b/coordinator/cosign/src/evaluator.rs @@ -21,6 +21,8 @@ create_db!( SubstrateCosignEvaluator { // The global session currently being evaluated. CurrentlyEvaluatedGlobalSession: () -> ([u8; 32], GlobalSession), + // The latest block number the evaluator has processed. + LatestEvaluatedBlock: () -> u64, } ); @@ -31,6 +33,12 @@ db_channel!( } ); +/// Commit a block as evaluated without sending it for cosign delay. +fn commit_evaluated_block(mut txn: impl DbTxn, block_number: u64) { + LatestEvaluatedBlock::set(&mut txn, &block_number); + txn.commit(); +} + /// This is a strict function which won't panic, even with a malicious Serai node, so long as: /// - It's called incrementally (with an increment of 1) /// - It's only called for block numbers we've completed indexing on within the intend task @@ -168,6 +176,11 @@ pub(crate) struct CosignEvaluatorTask { } impl ContinuallyRan for CosignEvaluatorTask { + #[cfg(test)] + const DELAY_BETWEEN_ITERATIONS: u64 = 1; + #[cfg(test)] + const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 5; + type Error = String; fn run_iteration(&mut self) -> impl Send + Future> { @@ -176,21 +189,33 @@ impl ContinuallyRan for CosignEvaluatorT let mut made_progress = false; loop { - // This task requires the global sessions channel to be populated - // as the block declaring the session is indexed - if CurrentlyEvaluatedGlobalSession::get(&self.db).is_none() && - GlobalSessionsChannel::peek(&self.db).is_none() - { - // no session has ever been declared - return Ok(false); - } - let mut txn = self.db.txn(); let Some(BlockEventData { block_number, has_events }) = BlockEvents::try_recv(&mut txn) else { break; }; + // If no session is being evaluated yet, check if this block can be processed + if currently_evaluated_global_session(&txn).is_none() { + match GlobalSessionsChannel::peek(&txn) { + // No global session declared yet: this block predates all sessions, skip it + // this means only HasEvents:No blocks have been consumed so far + None => { + commit_evaluated_block(txn, block_number); + made_progress = true; + continue; + } + // Session queued but starts after this block, skip it + Some(next) if next.1.start_block_number > block_number => { + commit_evaluated_block(txn, block_number); + made_progress = true; + continue; + } + // Session covers this block: proceed normally + _ => {} + } + } + serai_log::log::debug!( "beginning evaluator: block_number={block_number}, has_events={:#?}", has_events @@ -274,17 +299,24 @@ impl ContinuallyRan for CosignEvaluatorT } // If this block has no events necessitating cosigning, we can immediately consider the // block cosigned (making this block a NOP) - HasEvents::No => {} + HasEvents::No => { + commit_evaluated_block(txn, block_number); + made_progress = true; + continue; + } } // Since we checked we had the necessary cosigns, send it for delay before acknowledgement CosignedBlocks::send(&mut txn, &(block_number, now_timestamp().as_secs())); - txn.commit(); + commit_evaluated_block(txn, block_number); // Roughly ~1 hour, no need for repetitive logging + #[cfg(not(test))] if (block_number % 500) == 0 { serai_log::debug!("marking block #{block_number} as cosigned"); } + #[cfg(test)] + serai_log::info!("marking block #{block_number} as cosigned"); made_progress = true; } diff --git a/coordinator/cosign/src/intend.rs b/coordinator/cosign/src/intend.rs index 9b80e4fc0..c6eb378a1 100644 --- a/coordinator/cosign/src/intend.rs +++ b/coordinator/cosign/src/intend.rs @@ -72,6 +72,11 @@ pub(crate) struct CosignIntendTask { } impl ContinuallyRan for CosignIntendTask { + #[cfg(test)] + const DELAY_BETWEEN_ITERATIONS: u64 = 1; + #[cfg(test)] + const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 5; + type Error = String; fn run_iteration(&mut self) -> impl Send + Future> { @@ -143,7 +148,7 @@ impl ContinuallyRan for CosignIntendTask { ); BuildsUpon::set(&mut txn, &builds_upon); - serai_log::debug!("iterating over block_number={block_number}, hash={serai_block_hash:?}"); + serai_log::debug!("iterating over block_number={block_number}"); let mut has_events = HasEvents::No; let vset_events = serai_block_events.validator_sets(); @@ -287,6 +292,11 @@ impl ContinuallyRan for CosignIntendTask { stakes, total_stake, }; + + serai_log::debug!( + "Notable block block_number={block_number}: new session created {next_global_session_info:?}" + ); + GlobalSessions::set(&mut txn, new_global_session, &next_global_session_info); if let Some(ending_global_session) = global_session_for_this_block { GlobalSessionsLastBlock::set(&mut txn, ending_global_session, &block_number); @@ -316,7 +326,9 @@ impl ContinuallyRan for CosignIntendTask { // Tell each set of their expectation to cosign this block for set in ending_global_session_info.sets { - serai_log::debug!("set will cosign block: set={set:?}, block_number={block_number}"); + serai_log::info!( + "set will cosign {has_events:?} block: set={set:?}, block_number={block_number}" + ); IntendedCosigns::send( &mut txn, @@ -333,7 +345,9 @@ impl ContinuallyRan for CosignIntendTask { HasEvents::No => {} } - serai_log::debug!("finished iterating: has_events={has_events:?}"); + serai_log::debug!( + "finished iterating block_number={block_number}: has_events={has_events:?}" + ); // Populate a singular feed with every block's status for the evaluator to work off of BlockEvents::send(&mut txn, &(BlockEventData { block_number, has_events })); diff --git a/coordinator/cosign/src/lib.rs b/coordinator/cosign/src/lib.rs index ebafe42c1..5ed0f9ad9 100644 --- a/coordinator/cosign/src/lib.rs +++ b/coordinator/cosign/src/lib.rs @@ -32,7 +32,7 @@ mod evaluator; /// The task to delay acknowledgement of the cosigns. mod delay; pub use delay::BROADCAST_FREQUENCY; -use delay::LatestCosignedBlockNumber; +use delay::LatestAcknowledgedBlock; #[cfg(test)] /// Test helpers and fixtures. @@ -228,13 +228,13 @@ impl Cosigning { Self { db, _task_handles: vec![intend_task_handle, evaluator_task_handle, delay_task_handle] } } - /// The latest cosigned block number. - pub fn latest_cosigned_block_number(getter: &impl Get) -> Result { + /// The latest acknowledged block number. + pub fn latest_acknowledged_block(getter: &impl Get) -> Result { if FaultedSession::get(getter).is_some() { Err(Faulted)?; } - Ok(LatestCosignedBlockNumber::get(getter).unwrap_or(0)) + Ok(LatestAcknowledgedBlock::get(getter).unwrap_or(0)) } /// Fetch a cosigned Substrate block's hash by its block number. @@ -242,7 +242,7 @@ impl Cosigning { getter: &impl Get, block_number: u64, ) -> Result, Faulted> { - if block_number == 0 || block_number > Self::latest_cosigned_block_number(getter)? { + if block_number == 0 || block_number > Self::latest_acknowledged_block(getter)? { return Ok(None); } @@ -357,10 +357,10 @@ impl Cosigning { if !faulty { // If this is for a future global session, we don't acknowledge this cosign at this time - let latest_cosigned_block_number = LatestCosignedBlockNumber::get(&txn).unwrap_or(0); + let latest_evaluated_block = evaluator::LatestEvaluatedBlock::get(&txn).unwrap_or(0); // This global session starts the block *after* its declaration, so we want to check if the - // block declaring it was cosigned - if (global_session.start_block_number - 1) > latest_cosigned_block_number { + // block declaring it was evaluated + if (global_session.start_block_number - 1) > latest_evaluated_block { drop(txn); return Err(IntakeCosignError::FutureGlobalSession); } diff --git a/coordinator/cosign/src/tests/cosigning.rs b/coordinator/cosign/src/tests/cosigning.rs index 1cd374524..9aad1cdec 100644 --- a/coordinator/cosign/src/tests/cosigning.rs +++ b/coordinator/cosign/src/tests/cosigning.rs @@ -4,8 +4,11 @@ use borsh::{BorshDeserialize, BorshSerialize}; use blake2::{Blake2s256, Digest}; +use rand_core::OsRng; use serai_db::{Db as _, DbTxn, MemDb}; +use serai_primitives::test_helpers::random_keypair; +use serai_cosign_types::tests::sign_cosign; use serai_task::Task; use serai_client_serai::abi::primitives::{ @@ -18,42 +21,10 @@ use serai_client_serai::abi::primitives::{ use crate::{ Cosign, CosignIntent, Cosigning, Faulted, FaultedSession, Faults, GlobalSession, GlobalSessions, GlobalSessionsLastBlock, IntakeCosignError, NetworksLatestCosignedBlock, SignedCosign, - SubstrateBlockHash, delay::LatestCosignedBlockNumber, evaluator::CurrentlyEvaluatedGlobalSession, + SubstrateBlockHash, delay::LatestAcknowledgedBlock, evaluator::CurrentlyEvaluatedGlobalSession, intend::IntendedCosigns, tests::TestRequest, tests::setup_shim_serai, }; -use serai_cosign_types::tests::{ - fixture_public_key, public_key_from_seed, sign_cosign_with_fixture, sign_cosign_with_seed, -}; - -const FIXTURE_SEED: [u8; 32] = [0xff; 32]; - -struct Sr25519Fixture { - seed: [u8; 32], -} - -impl Sr25519Fixture { - fn public_bytes(&self) -> [u8; 32] { - if self.seed == FIXTURE_SEED { - fixture_public_key() - } else { - public_key_from_seed(self.seed) - } - } -} - -fn sr25519_fixture() -> Sr25519Fixture { - Sr25519Fixture { seed: FIXTURE_SEED } -} - -fn sign_cosign(cosign: Cosign, fixture: &Sr25519Fixture) -> SignedCosign { - if fixture.seed == FIXTURE_SEED { - sign_cosign_with_fixture(cosign) - } else { - sign_cosign_with_seed(cosign, fixture.seed) - } -} - #[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] struct TestGlobalSession { start_block_number: u64, @@ -80,19 +51,21 @@ impl TestGlobalSession { } } -fn session_fixture() -> TestGlobalSession { +fn random_session() -> (TestGlobalSession, schnorrkel::Keypair) { let network = ExternalNetworkId::Bitcoin; let set = ExternalValidatorSet { network, session: Session(0) }; + let (keypair, public) = random_keypair(&mut OsRng); + let mut keys = HashMap::new(); let mut stakes = HashMap::new(); - let fixture = sr25519_fixture(); - let pubkey = Public(fixture.public_bytes()); - keys.insert(network, pubkey); + keys.insert(network, public); stakes.insert(network, 100); - TestGlobalSession { start_block_number: 1, sets: vec![set], keys, stakes, total_stake: 100 } + let session = + TestGlobalSession { start_block_number: 1, sets: vec![set], keys, stakes, total_stake: 100 }; + (session, keypair) } fn seed_minimal_state(db: &mut MemDb, session: &TestGlobalSession) { @@ -106,7 +79,7 @@ fn seed_minimal_state(db: &mut MemDb, session: &TestGlobalSession) { CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, session.to_global())); // Required for `intake_cosign` to not classify a session as "future". - LatestCosignedBlockNumber::set(&mut txn, &0u64); + LatestAcknowledgedBlock::set(&mut txn, &0u64); txn.commit(); } @@ -198,7 +171,7 @@ mod spawn { assert!(cosigning.cosigns_to_rebroadcast().is_empty()); - let latest = Cosigning::::latest_cosigned_block_number(&db); + let latest = Cosigning::::latest_acknowledged_block(&db); assert!(latest.is_ok()); assert_eq!(latest.unwrap(), 0); } @@ -213,40 +186,74 @@ mod spawn { tokio::time::sleep(Duration::from_millis(10)).await; - let latest = Cosigning::::latest_cosigned_block_number(&db); + let latest = Cosigning::::latest_acknowledged_block(&db); assert!(latest.is_ok()); } + + #[tokio::test] + async fn spawn_end_to_end() { + let db = MemDb::new(); + let (shim_serai, serai) = setup_shim_serai().await; + let (request, _calls) = TestRequest::new(false); + + // Create block 0 so the intend task has something to scan + shim_serai.make_block(0, vec![]).await; + + // Spawn cosigning tasks in the background + let _cosigning = Cosigning::spawn(db.clone(), serai, request, vec![]); + + // Keep adding new blocks while the background tasks process them + let total_blocks = 10u64; + for _ in 1 ..= total_blocks { + shim_serai.add_block_with_events(vec![]).await; + tokio::time::sleep(Duration::from_millis(50)).await; + } + + // Wait for the full pipeline to process in a polling loop: + // intend (indexes blocks) -> evaluator (passes HasEvents::No through) -> delay (waits + // ACKNOWLEDGEMENT_DELAY) + loop { + let latest = Cosigning::::latest_acknowledged_block(&db); + if latest.map(|n| n >= total_blocks).unwrap_or(false) { + break; + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + + let latest = Cosigning::::latest_acknowledged_block(&db).unwrap(); + assert!(latest >= total_blocks); + } } -mod latest_cosigned_block_number { +mod latest_acknowledged_block { use super::*; #[test] - fn latest_cosigned_block_number_defaults_to_zero() { + fn latest_acknowledged_block_defaults_to_zero() { let db = MemDb::new(); - assert_eq!(Cosigning::::latest_cosigned_block_number(&db).unwrap(), 0); + assert_eq!(Cosigning::::latest_acknowledged_block(&db).unwrap(), 0); } #[test] - fn latest_cosigned_block_number_errors_when_faulted() { + fn latest_acknowledged_block_errors_when_faulted() { let mut db = MemDb::new(); { let mut txn = db.txn(); FaultedSession::set(&mut txn, &[1u8; 32]); txn.commit(); } - assert!(matches!(Cosigning::::latest_cosigned_block_number(&db), Err(Faulted))); + assert!(matches!(Cosigning::::latest_acknowledged_block(&db), Err(Faulted))); } #[test] - fn latest_cosigned_block_number_returns_stored_value() { + fn latest_acknowledged_block_returns_stored_value() { let mut db = MemDb::new(); { let mut txn = db.txn(); - LatestCosignedBlockNumber::set(&mut txn, &42u64); + LatestAcknowledgedBlock::set(&mut txn, &42u64); txn.commit(); } - assert_eq!(Cosigning::::latest_cosigned_block_number(&db).unwrap(), 42); + assert_eq!(Cosigning::::latest_acknowledged_block(&db).unwrap(), 42); } } @@ -258,7 +265,7 @@ mod cosigned_block { let mut db = MemDb::new(); { let mut txn = db.txn(); - LatestCosignedBlockNumber::set(&mut txn, &5u64); + LatestAcknowledgedBlock::set(&mut txn, &5u64); txn.commit(); } assert_eq!(Cosigning::::cosigned_block(&db, 6).unwrap(), None); @@ -270,7 +277,7 @@ mod cosigned_block { let block_hash = BlockHash([9u8; 32]); { let mut txn = db.txn(); - LatestCosignedBlockNumber::set(&mut txn, &5u64); + LatestAcknowledgedBlock::set(&mut txn, &5u64); SubstrateBlockHash::set(&mut txn, 3, &block_hash); txn.commit(); } @@ -298,7 +305,7 @@ mod cosigned_block { let mut txn = db.txn(); SubstrateBlockHash::set(&mut txn, 5, &block_hash_5); SubstrateBlockHash::set(&mut txn, 10, &block_hash_10); - LatestCosignedBlockNumber::set(&mut txn, &10u64); + LatestAcknowledgedBlock::set(&mut txn, &10u64); txn.commit(); } @@ -328,9 +335,8 @@ mod notable_cosigns { #[test] fn notable_cosigns_returns_cosigns_for_session() { - let session = session_fixture(); + let (session, keypair) = random_session(); let id = session.id(); - let keypair = sr25519_fixture(); let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); @@ -363,9 +369,8 @@ mod cosigns_to_rebroadcast { #[test] fn cosigns_to_rebroadcast_excludes_cosigns_from_different_global_session() { - let session = session_fixture(); + let (session, keypair) = random_session(); let id = session.id(); - let keypair = sr25519_fixture(); let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); @@ -418,9 +423,8 @@ mod cosigns_to_rebroadcast { #[test] fn cosigns_to_rebroadcast_returns_latest_cosigns_when_not_faulted() { - let session = session_fixture(); + let (session, keypair) = random_session(); let id = session.id(); - let keypair = sr25519_fixture(); let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); @@ -448,9 +452,8 @@ mod cosigns_to_rebroadcast { #[test] fn cosigns_to_rebroadcast_returns_faults_and_honest_when_faulted() { - let session = session_fixture(); + let (session, keypair) = random_session(); let id = session.id(); - let keypair = sr25519_fixture(); let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); @@ -502,7 +505,7 @@ mod intake_cosign { #[test] fn intake_cosign_rejects_not_yet_indexed_block() { let db = MemDb::new(); - let keypair = sr25519_fixture(); + let (keypair, _) = random_keypair(&mut OsRng); let cosign = Cosign { global_session: [1u8; 32], @@ -518,9 +521,8 @@ mod intake_cosign { #[test] fn intake_cosign_accepts_valid_cosign() { - let session = session_fixture(); + let (session, keypair) = random_session(); let id = session.id(); - let keypair = sr25519_fixture(); let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); @@ -543,9 +545,8 @@ mod intake_cosign { #[test] fn intake_cosign_rejects_stale_cosign() { - let session = session_fixture(); + let (session, keypair) = random_session(); let id = session.id(); - let keypair = sr25519_fixture(); let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); @@ -582,7 +583,7 @@ mod intake_cosign { #[test] fn intake_cosign_rejects_unrecognized_global_session() { - let keypair = sr25519_fixture(); + let (keypair, _) = random_keypair(&mut OsRng); let mut db = MemDb::new(); let block_number = 1; @@ -610,17 +611,16 @@ mod intake_cosign { #[test] fn intake_cosign_rejects_before_global_session_start() { - let mut session = session_fixture(); + let (mut session, keypair) = random_session(); session.start_block_number = 10; let id = session.id(); - let keypair = sr25519_fixture(); let mut db = MemDb::new(); { let mut txn = db.txn(); GlobalSessions::set(&mut txn, id, &session.to_global()); CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, session.to_global())); - LatestCosignedBlockNumber::set(&mut txn, &10u64); + LatestAcknowledgedBlock::set(&mut txn, &10u64); SubstrateBlockHash::set(&mut txn, 5, &BlockHash([5u8; 32])); txn.commit(); @@ -643,9 +643,8 @@ mod intake_cosign { #[test] fn intake_cosign_rejects_after_global_session_end() { - let session = session_fixture(); + let (session, keypair) = random_session(); let id = session.id(); - let keypair = sr25519_fixture(); let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); @@ -676,10 +675,10 @@ mod intake_cosign { #[test] fn intake_cosign_rejects_invalid_signature() { - let session = session_fixture(); + let (session, _keypair) = random_session(); let id = session.id(); - // Use a different keypair than the one in session_fixture - let wrong_keypair = Sr25519Fixture { seed: [99u8; 32] }; + // Use a different keypair than the one in the session + let (wrong_keypair, _) = random_keypair(&mut OsRng); let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); @@ -702,10 +701,9 @@ mod intake_cosign { #[test] fn intake_cosign_rejects_future_global_session() { - let mut session = session_fixture(); + let (mut session, keypair) = random_session(); session.start_block_number = 10; let id = session.id(); - let keypair = sr25519_fixture(); let mut db = MemDb::new(); { @@ -713,7 +711,7 @@ mod intake_cosign { GlobalSessions::set(&mut txn, id, &session.to_global()); CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, session.to_global())); - LatestCosignedBlockNumber::set(&mut txn, &5u64); + LatestAcknowledgedBlock::set(&mut txn, &5u64); SubstrateBlockHash::set(&mut txn, 10, &BlockHash([10u8; 32])); txn.commit(); } @@ -735,9 +733,8 @@ mod intake_cosign { #[test] fn intake_cosign_handles_faulty_cosign() { - let session = session_fixture(); + let (session, keypair) = random_session(); let id = session.id(); - let keypair = sr25519_fixture(); let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); @@ -774,9 +771,8 @@ mod intake_cosign { #[test] fn intake_cosign_accepts_newer_cosign_when_existing_is_older() { - let session = session_fixture(); + let (session, keypair) = random_session(); let id = session.id(); - let keypair = sr25519_fixture(); let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); @@ -815,9 +811,8 @@ mod intake_cosign { #[test] fn intake_cosign_accepts_cosign_at_global_session_last_block() { - let session = session_fixture(); + let (session, keypair) = random_session(); let id = session.id(); - let keypair = sr25519_fixture(); let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); @@ -849,9 +844,8 @@ mod intake_cosign { #[test] fn intake_cosign_ignores_duplicate_fault_from_same_network() { - let session = session_fixture(); + let (session, keypair) = random_session(); let id = session.id(); - let keypair = sr25519_fixture(); let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); @@ -902,10 +896,10 @@ mod intake_cosign { #[test] fn intake_cosign_rejects_non_participating_network() { - let session = session_fixture(); + let (session, _keypair) = random_session(); let id = session.id(); - let eth_keypair = Sr25519Fixture { seed: [77u8; 32] }; + let (eth_keypair, _) = random_keypair(&mut OsRng); let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); @@ -940,14 +934,14 @@ mod intake_cosign { let set1 = ExternalValidatorSet { network: network1, session: Session(0) }; let set2 = ExternalValidatorSet { network: network2, session: Session(0) }; - let keypair1 = sr25519_fixture(); - let keypair2 = Sr25519Fixture { seed: [88u8; 32] }; + let (keypair1, public1) = random_keypair(&mut OsRng); + let (_, public2) = random_keypair(&mut OsRng); let mut keys = HashMap::new(); let mut stakes = HashMap::new(); - keys.insert(network1, Public(keypair1.public_bytes())); - keys.insert(network2, Public(keypair2.public_bytes())); + keys.insert(network1, public1); + keys.insert(network2, public2); stakes.insert(network1, 10); stakes.insert(network2, 90); diff --git a/coordinator/cosign/src/tests/delay.rs b/coordinator/cosign/src/tests/delay.rs index 7bdead847..a0c9c6af1 100644 --- a/coordinator/cosign/src/tests/delay.rs +++ b/coordinator/cosign/src/tests/delay.rs @@ -1,8 +1,12 @@ -use std::time::Instant; +use std::time::{Duration, Instant}; + +use rand::RngCore; +use rand_core::OsRng; +use serai_task::ContinuallyRan; use crate::{ - LatestCosignedBlockNumber, - delay::{CosignDelayTask, now_timestamp}, + LatestAcknowledgedBlock, + delay::{ACKNOWLEDGEMENT_DELAY, CosignDelayTask, now_timestamp}, evaluator::CosignedBlocks, tests::{IntoTask, TaskTest}, }; @@ -38,8 +42,8 @@ impl DelayTest { (Self::default(), start) } - async fn assert_task_iteration_completes_with(&self, latest_cosigned_block_number: u64) { - assert_eq!(LatestCosignedBlockNumber::get(&self.db), Some(latest_cosigned_block_number)); + async fn assert_task_iteration_completes_with(&self, latest_acknowledged_block: u64) { + assert_eq!(LatestAcknowledgedBlock::get(&self.db), Some(latest_acknowledged_block)); // Assert CosignedBlocks queue items have been consumed after task run assert_eq!(CosignedBlocks::peek(&self.db), None); } @@ -52,12 +56,12 @@ async fn returns_false_with_no_messages() { TaskTest::task_runs_once_and_matches_progress(&mut task, false).await; - assert_eq!(LatestCosignedBlockNumber::get(&test.db), None); + assert_eq!(LatestAcknowledgedBlock::get(&test.db), None); assert_eq!(CosignedBlocks::peek(&test.db), None); } #[tokio::test] -async fn updates_latest_cosigned_block_number_after_ack_delay() { +async fn updates_latest_acknowledged_block_after_ack_delay() { let (mut test, start) = DelayTest::new(); { @@ -150,3 +154,40 @@ async fn does_not_regress_and_skips_if_not_a_later_block() { TaskTest::task_runs_once_and_matches_progress(&mut task, false).await; test.assert_task_iteration_completes_with(4).await; } + +#[tokio::test] +async fn respects_acknowledgement_delay() { + let mut test = DelayTest::default(); + let block_number = OsRng.next_u64(); + + let now = now_secs(); + { + let mut txn = test.db.txn(); + CosignedBlocks::send(&mut txn, &(block_number, now)); + txn.commit(); + } + + let start = Instant::now(); + let mut task = test.into_task(); + + // Run the task in the background (it will sleep internally for ACKNOWLEDGEMENT_DELAY) + let task_handle = tokio::spawn(async move { task.run_iteration().await }); + + // Well before ACKNOWLEDGEMENT_DELAY, the block must not be acknowledged + tokio::time::sleep(Duration::from_secs(ACKNOWLEDGEMENT_DELAY.as_secs().saturating_sub(2))).await; + assert!(LatestAcknowledgedBlock::get(&test.db).is_none()); + + // Wait for the task to complete + let made_progress = task_handle.await.unwrap().unwrap(); + assert!(made_progress); + + // Block is now acknowledged + assert_eq!(LatestAcknowledgedBlock::get(&test.db), Some(block_number)); + + // The elapsed time must be at least ACKNOWLEDGEMENT_DELAY + let elapsed = start.elapsed(); + assert!( + elapsed >= ACKNOWLEDGEMENT_DELAY, + "completed in {elapsed:?}, expected at least {ACKNOWLEDGEMENT_DELAY:?}" + ); +} diff --git a/coordinator/cosign/src/tests/evaluator.rs b/coordinator/cosign/src/tests/evaluator.rs index b77a6a67d..fc9a6ce03 100644 --- a/coordinator/cosign/src/tests/evaluator.rs +++ b/coordinator/cosign/src/tests/evaluator.rs @@ -18,7 +18,8 @@ use serai_task::ContinuallyRan; use crate::{ Cosign, GlobalSession, HasEvents, NetworksLatestCosignedBlock, evaluator::{ - CosignEvaluatorTask, CosignedBlocks, CurrentlyEvaluatedGlobalSession, REQUEST_COSIGNS_SPACING, + CosignEvaluatorTask, CosignedBlocks, CurrentlyEvaluatedGlobalSession, LatestEvaluatedBlock, + REQUEST_COSIGNS_SPACING, }, intend::{BlockEventData, BlockEvents, GlobalSessionsChannel}, tests::{IntoTask, TaskTest, TestRequest}, @@ -133,7 +134,13 @@ async fn processes_blocks_with_no_events() { let mut task = test.into_task(); TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - verify_db_invariants(&mut test.db, Some((0, 2))); + + assert!(BlockEvents::peek(&test.db).is_none(), "BlockEvents should be fully consumed"); + assert!( + CosignedBlocks::peek(&test.db).is_none(), + "HasEvent::No blocks shouldn't produce CosignedBlocks" + ); + assert_eq!(LatestEvaluatedBlock::get(&test.db), Some(2)); } #[tokio::test] @@ -324,7 +331,13 @@ async fn advances_global_session_at_start_block() { let mut task = test.into_task(); TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - verify_db_invariants(&mut test.db, Some((1, 3))); + + assert!(BlockEvents::peek(&test.db).is_none(), "BlockEvents should be fully consumed"); + assert!( + CosignedBlocks::peek(&test.db).is_none(), + "HasEvent::No blocks shouldn't produce CosignedBlocks" + ); + assert_eq!(LatestEvaluatedBlock::get(&test.db), Some(3)); let current = CurrentlyEvaluatedGlobalSession::get(&test.db).expect("should have current session"); diff --git a/coordinator/cosign/src/tests/full_stack.rs b/coordinator/cosign/src/tests/full_stack.rs new file mode 100644 index 000000000..1dd06c9fc --- /dev/null +++ b/coordinator/cosign/src/tests/full_stack.rs @@ -0,0 +1,125 @@ +use std::{collections::HashSet, time::Duration}; + +use serai_db::{Db as _, DbTxn, MemDb}; + +use serai_cosign_types::tests::sign_cosign; + +use serai_client_serai::abi::primitives::{ + network_id::ExternalNetworkId, + validator_sets::{ExternalValidatorSet, Session}, +}; + +use crate::{ + CosignIntent, Cosigning, GlobalSessions, IntakeCosignError, + tests::{TestRequest, setup_shim_serai}, +}; + +#[tokio::test] +async fn full_stack_fuzzed() { + use super::intend::EventFuzzer; + + let _ = env_logger::try_init(); + + let num_blocks = 20; + let mut fuzzer = EventFuzzer::new(); + let blocks = fuzzer.generate_blocks(num_blocks); + + serai_log::log::info!( + "Full-stack fuzz: {} blocks, {} validators, seed={}", + num_blocks, + fuzzer.validators.len(), + hex::encode(fuzzer.seed), + ); + + let (shim, serai) = setup_shim_serai().await; + for (i, events) in blocks.into_iter().enumerate() { + shim.make_block(u64::try_from(i).unwrap(), events).await; + } + + let mut db = MemDb::new(); + + let (request, _calls) = TestRequest::new(false); + let mut cosigning = Cosigning::spawn(db.clone(), serai, request, vec![]); + + let target = u64::try_from(num_blocks - 1).unwrap(); + + // Buffer for intents whose cosigns were rejected as FutureGlobalSession. + // These are retried each iteration until the delay task catches up. + let mut pending_intents: Vec<(ExternalNetworkId, CosignIntent)> = Vec::new(); + let mut seen_global_sessions: HashSet<[u8; 32]> = HashSet::new(); + + let deadline = tokio::time::Instant::now() + Duration::from_secs(300); + + loop { + assert!( + tokio::time::Instant::now() < deadline, + "timed out waiting for all blocks to be cosigned (target={target}, \ + latest={:?}, pending_intents={})", + Cosigning::::latest_acknowledged_block(&db), + pending_intents.len(), + ); + + // Drain new intended cosigns for all validator sets that have had SetKeys + { + let mut txn = db.txn(); + for network in ExternalNetworkId::all() { + let max_session = fuzzer.next_session.get(&network).copied().unwrap_or(0); + for session_num in 0 .. max_session { + let set = ExternalValidatorSet { network, session: Session(session_num) }; + let intents = Cosigning::::intended_cosigns(&mut txn, set); + for intent in intents { + seen_global_sessions.insert(intent.global_session); + pending_intents.push((network, intent)); + } + } + } + txn.commit(); + } + + // Try to intake all pending intents, keeping those that fail with temporal errors + let mut still_pending = Vec::new(); + for (network, intent) in pending_intents.drain(..) { + let cosign = intent.into_cosign(network); + let Some(gs) = GlobalSessions::get(&db, intent.global_session) else { + still_pending.push((network, intent)); + continue; + }; + let Some(public) = gs.keys.get(&network) else { continue }; + let Some(keypair) = fuzzer.keypairs.get(&public.0) else { continue }; + let signed = sign_cosign(cosign, keypair); + match cosigning.intake_cosign(&signed) { + Ok(()) => {} + Err(IntakeCosignError::FutureGlobalSession) | + Err(IntakeCosignError::UnrecognizedGlobalSession) | + Err(IntakeCosignError::NotYetIndexedBlock) => { + still_pending.push((network, intent)); + } + // StaleCosign means a newer cosign already exists; safe to drop + Err(IntakeCosignError::StaleCosign) => {} + Err(ref e) => { + serai_log::log::warn!( + "intake_cosign dropped: block={}, network={:?}, err={:?}", + intent.block_number, + network, + e, + ); + } + } + } + pending_intents = still_pending; + + match Cosigning::::latest_acknowledged_block(&db) { + Ok(n) if n >= target => break, + _ => {} + } + + tokio::time::sleep(Duration::from_millis(50)).await; + } + + let latest = Cosigning::::latest_acknowledged_block(&db).unwrap(); + assert!(latest >= target, "expected latest cosigned block >= {target}, got {latest}"); + + serai_log::log::info!("Full-stack fuzz completed: all {num_blocks} blocks cosigned"); + + let session_ids: Vec<[u8; 32]> = seen_global_sessions.into_iter().collect(); +} diff --git a/coordinator/cosign/src/tests/intend.rs b/coordinator/cosign/src/tests/intend.rs index a6a709dd9..49d3259ea 100644 --- a/coordinator/cosign/src/tests/intend.rs +++ b/coordinator/cosign/src/tests/intend.rs @@ -11,10 +11,10 @@ use serai_client_serai::{ abi::{ Event, coins, primitives::{ - address::{ExternalAddress, SeraiAddress}, + address::SeraiAddress, balance::{Amount, ExternalBalance}, coin::ExternalCoin, - crypto::{ExternalKey, KeyPair, Public}, + crypto::{ExternalKey, KeyPair}, instructions::{OutInstruction, OutInstructionWithBalance}, network_id::{ExternalNetworkId, NetworkId}, validator_sets::{ExternalValidatorSet, KeyShares, Session, ValidatorSet}, @@ -22,14 +22,18 @@ use serai_client_serai::{ validator_sets, }, }; +use serai_primitives::test_helpers::{random_external_address, random_keypair}; use crate::{intend::*, tests::*, *}; -fn set_decided_event(set: ValidatorSet, validators: Vec<(SeraiAddress, KeyShares)>) -> Event { +pub(super) fn set_decided_event( + set: ValidatorSet, + validators: Vec<(SeraiAddress, KeyShares)>, +) -> Event { Event::ValidatorSets(validator_sets::Event::SetDecided { set, validators }) } -fn allocation_event(validator: SeraiAddress, network: NetworkId, amount: u64) -> Event { +pub(super) fn allocation_event(validator: SeraiAddress, network: NetworkId, amount: u64) -> Event { Event::ValidatorSets(validator_sets::Event::Allocation { validator, network, @@ -46,13 +50,11 @@ fn deallocation_event(validator: SeraiAddress, network: NetworkId, amount: u64) }) } -fn burn_with_instruction_event(from: SeraiAddress) -> Event { +pub(super) fn burn_with_instruction_event(from: SeraiAddress) -> Event { Event::Coins(coins::Event::BurnWithInstruction { from, instruction: OutInstructionWithBalance { - instruction: OutInstruction::Transfer( - ExternalAddress::try_from(vec![1u8, 2u8, 3u8]).unwrap(), - ), + instruction: OutInstruction::Transfer(random_external_address(&mut OsRng)), balance: ExternalBalance { coin: ExternalCoin::Bitcoin, amount: Amount(1) }, }, }) @@ -331,11 +333,11 @@ mod errors { } /// Random event, state, and block generator. -struct EventFuzzer { +pub(super) struct EventFuzzer { /// Seed bytes. - seed: [u8; 32], + pub(super) seed: [u8; 32], /// Available validator addresses. - validators: Vec, + pub(super) validators: Vec, /// All networks. networks: Vec, /// Running stake ledger: `(network, validator) -> accumulated_stake`. @@ -343,11 +345,13 @@ struct EventFuzzer { /// Sets that have been decided but not yet keyed. pending_keys: HashMap>, /// Next session number per network. - next_session: HashMap, + pub(super) next_session: HashMap, + /// Keypairs indexed by public key bytes, for signing cosigns. + pub(super) keypairs: HashMap<[u8; 32], schnorrkel::Keypair>, } impl EventFuzzer { - fn new() -> Self { + pub(super) fn new() -> Self { let mut seed = [0u8; 32]; OsRng.fill_bytes(&mut seed); @@ -371,6 +375,7 @@ impl EventFuzzer { stakes: HashMap::new(), pending_keys: HashMap::new(), next_session: HashMap::new(), + keypairs: HashMap::new(), } } @@ -380,11 +385,23 @@ impl EventFuzzer { &slice[usize::try_from(i).unwrap()] } + /// Generate a random amount using a weighted distribution: + /// ~25% tiny (1..=10), ~35% small (11..=1_000), ~25% medium (1_001..=100_000), + /// ~15% large (100_001..=10_000_000). + fn random_amount(&mut self) -> u64 { + match OsRng.next_u64() % 20 { + 0 ..= 4 => (OsRng.next_u64() % 10) + 1, + 5 ..= 11 => (OsRng.next_u64() % 990) + 11, + 12 ..= 16 => (OsRng.next_u64() % 99_000) + 1_001, + _ => (OsRng.next_u64() % 9_900_000) + 100_001, + } + } + /// Generate a random allocation event. fn random_allocation(&mut self) -> Event { let validator = *self.pick(&self.validators.clone()); let network = *self.pick(&self.networks.clone()); - let amount = (OsRng.next_u64() % 10000) + 1; // 1..=10000 + let amount = self.random_amount(); if let Ok(ext) = ExternalNetworkId::try_from(network) { *self.stakes.entry((ext, validator)).or_default() += amount; } @@ -396,7 +413,7 @@ impl EventFuzzer { // ~25% chance of generating a Serai deallocation (exercises the `continue` branch) if OsRng.next_u64() % 4 == 0 { let validator = *self.pick(&self.validators.clone()); - let amount = (OsRng.next_u64() % 100) + 1; + let amount = self.random_amount(); return Some(deallocation_event(validator, NetworkId::Serai, amount)); } @@ -407,7 +424,8 @@ impl EventFuzzer { } let i = OsRng.next_u64() % u64::try_from(candidates.len()).unwrap(); let ((network, validator), current_stake) = candidates[usize::try_from(i).unwrap()]; - let amount = (OsRng.next_u64() % current_stake) + 1; // 1..=current_stake + // Use weighted amount, clamped to current_stake so we don't underflow + let amount = self.random_amount().min(current_stake).max(1); *self.stakes.entry((network, validator)).or_default() -= amount; Some(deallocation_event(validator, NetworkId::External(network), amount)) } @@ -466,8 +484,8 @@ impl EventFuzzer { // Advance session for this network so the next SetDecided gets session+1 *self.next_session.entry(set.network).or_insert(0) += 1; - let mut public = Public([0u8; 32]); - public.0[0 .. 8].copy_from_slice(&OsRng.next_u64().to_le_bytes()); + let (keypair, public) = random_keypair(&mut OsRng); + self.keypairs.insert(public.0, keypair); let external_key = ExternalKey(vec![1u8].try_into().unwrap()); let key_pair = KeyPair(public, external_key); @@ -550,7 +568,7 @@ impl EventFuzzer { } /// Generate multiple blocks of random events. - fn generate_blocks(&mut self, count: usize) -> Vec>> { + pub(super) fn generate_blocks(&mut self, count: usize) -> Vec>> { let mut blocks = Vec::with_capacity(count); for _ in 0 .. count { blocks.push(self.generate_block_events()); diff --git a/coordinator/cosign/src/tests/mod.rs b/coordinator/cosign/src/tests/mod.rs index e306f4e54..37413c0c4 100644 --- a/coordinator/cosign/src/tests/mod.rs +++ b/coordinator/cosign/src/tests/mod.rs @@ -10,6 +10,9 @@ mod delay; #[cfg(test)] mod cosigning; +#[cfg(test)] +mod full_stack; + use std::{ sync::{ Arc, diff --git a/coordinator/cosign/types/Cargo.toml b/coordinator/cosign/types/Cargo.toml index 4ef4f3557..30d3a5925 100644 --- a/coordinator/cosign/types/Cargo.toml +++ b/coordinator/cosign/types/Cargo.toml @@ -18,7 +18,7 @@ rustdoc-args = ["--cfg", "docsrs"] workspace = true [features] -test-helpers = [] +test-helpers = ["serai-primitives/test-helpers"] [dependencies] schnorrkel = { version = "0.11", default-features = false, features = ["std"] } @@ -26,3 +26,7 @@ schnorrkel = { version = "0.11", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } serai-primitives = { path = "../../../substrate/primitives", default-features = false, features = ["std"] } + +[dev-dependencies] +rand_core = { version = "0.6", default-features = false, features = ["std"] } +serai-primitives = { path = "../../../substrate/primitives", features = ["test-helpers"] } diff --git a/coordinator/cosign/types/src/tests/mod.rs b/coordinator/cosign/types/src/tests/mod.rs index 5ce65d3a3..6125f3e7c 100644 --- a/coordinator/cosign/types/src/tests/mod.rs +++ b/coordinator/cosign/types/src/tests/mod.rs @@ -1,44 +1,19 @@ use crate::{COSIGN_CONTEXT, Cosign, SignedCosign}; -#[cfg(test)] -use crate::{BlockHash, CosignIntent, ExternalNetworkId, Public}; - -fn sr25519_fixture_from_seed(seed: [u8; 32]) -> schnorrkel::Keypair { - schnorrkel::MiniSecretKey::from_bytes(&seed) - .expect("seed should be valid") - .expand_to_keypair(schnorrkel::ExpansionMode::Ed25519) -} - -fn sr25519_fixture() -> schnorrkel::Keypair { - sr25519_fixture_from_seed([0xff; 32]) -} - -fn sign_cosign(cosign: Cosign, keypair: &schnorrkel::Keypair) -> SignedCosign { +/// Sign a [`Cosign`] with a schnorrkel keypair, producing a [`SignedCosign`]. +pub fn sign_cosign(cosign: Cosign, keypair: &schnorrkel::Keypair) -> SignedCosign { SignedCosign { cosign: cosign.clone(), signature: keypair.sign_simple(COSIGN_CONTEXT, &cosign.signature_message()).to_bytes(), } } -/// Returns the public key bytes from the test fixture keypair (seed [0xff; 32]) -pub fn fixture_public_key() -> [u8; 32] { - sr25519_fixture().public.to_bytes() -} - -/// Returns the public key bytes for a keypair with the given seed -pub fn public_key_from_seed(seed: [u8; 32]) -> [u8; 32] { - sr25519_fixture_from_seed(seed).public.to_bytes() -} - -/// Creates a SignedCosign using the test fixture keypair (seed [0xff; 32]) -pub fn sign_cosign_with_fixture(cosign: Cosign) -> SignedCosign { - sign_cosign(cosign, &sr25519_fixture()) -} - -/// Creates a SignedCosign using a keypair derived from the given seed -pub fn sign_cosign_with_seed(cosign: Cosign, seed: [u8; 32]) -> SignedCosign { - sign_cosign(cosign, &sr25519_fixture_from_seed(seed)) -} +#[cfg(test)] +use rand_core::OsRng; +#[cfg(test)] +use serai_primitives::test_helpers::random_keypair; +#[cfg(test)] +use crate::{BlockHash, CosignIntent, ExternalNetworkId, Public}; #[test] fn cosign_intent_into_cosign() { @@ -74,7 +49,7 @@ fn deterministic_signature_message() { #[test] fn signed_cosign_verify_signature_valid() { - let keypair = sr25519_fixture(); + let (keypair, public) = random_keypair(&mut OsRng); let cosign = Cosign { global_session: [1u8; 32], block_number: 5, @@ -83,14 +58,14 @@ fn signed_cosign_verify_signature_valid() { }; let signed = sign_cosign(cosign, &keypair); - let pubkey = Public(keypair.public.to_bytes()); - assert!(signed.verify_signature(pubkey), "valid signature should verify"); + assert!(signed.verify_signature(public), "valid signature should verify"); } #[test] fn signed_cosign_verify_signature_invalid() { - let keypair1 = sr25519_fixture(); + let (keypair1, _) = random_keypair(&mut OsRng); + let (_, wrong_public) = random_keypair(&mut OsRng); let cosign = Cosign { global_session: [1u8; 32], @@ -100,14 +75,13 @@ fn signed_cosign_verify_signature_invalid() { }; let signed = sign_cosign(cosign, &keypair1); - let wrong_pubkey = public_key_from_seed([0x01; 32]); - assert!(!signed.verify_signature(wrong_pubkey), "invalid signature should not verify"); + assert!(!signed.verify_signature(wrong_public), "invalid signature should not verify"); } #[test] fn signed_cosign_verify_signature_invalid_public_key_bytes() { - let keypair = sr25519_fixture(); + let (keypair, _) = random_keypair(&mut OsRng); let cosign = Cosign { global_session: [1u8; 32], block_number: 5, @@ -144,8 +118,7 @@ fn signed_cosign_verify_signature_invalid_signature_bytes() { let signed = SignedCosign { cosign, signature: invalid_sig_bytes }; - let keypair = sr25519_fixture(); - let valid_pubkey = Public(keypair.public.to_bytes()); + let (_, valid_public) = random_keypair(&mut OsRng); - assert!(!signed.verify_signature(valid_pubkey), "invalid signature bytes should return false"); + assert!(!signed.verify_signature(valid_public), "invalid signature bytes should return false"); } diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 3a1bbc2d7..6d6784934 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -91,8 +91,8 @@ fn spawn_cosigning( let last_cosign_rebroadcast = Instant::now(); loop { // Intake our own cosigns - match Cosigning::::latest_cosigned_block_number(&db) { - Ok(latest_cosigned_block_number) => { + match Cosigning::::latest_acknowledged_block(&db) { + Ok(latest_acknowledged_block) => { let mut txn = db.txn(); // The cosigns we prior tried to intake yet failed to let mut cosigns = ErroneousCosigns::get(&txn).unwrap_or(vec![]); @@ -104,7 +104,7 @@ fn spawn_cosigning( let mut erroneous = vec![]; for cosign in cosigns { // If this cosign is stale, move on - if cosign.cosign.block_number <= latest_cosigned_block_number { + if cosign.cosign.block_number <= latest_acknowledged_block { continue; } diff --git a/coordinator/src/tributary.rs b/coordinator/src/tributary.rs index 083554570..040e6ff85 100644 --- a/coordinator/src/tributary.rs +++ b/coordinator/src/tributary.rs @@ -103,7 +103,7 @@ impl ContinuallyRan pending_notable_cosign = cosign.notable; // If we (Serai) haven't cosigned this block, break as this is still pending - let latest = match Cosigning::::latest_cosigned_block_number(&txn) { + let latest = match Cosigning::::latest_acknowledged_block(&txn) { Ok(latest) => latest, Err(Faulted) => { log::error!("cosigning faulted"); diff --git a/coordinator/substrate/src/canonical.rs b/coordinator/substrate/src/canonical.rs index 02f5069b0..5a24873b0 100644 --- a/coordinator/substrate/src/canonical.rs +++ b/coordinator/substrate/src/canonical.rs @@ -44,7 +44,7 @@ impl ContinuallyRan for CanonicalEventStream { async move { let next_block = NextBlock::get(&self.db).unwrap_or(0); let latest_finalized_block = - Cosigning::::latest_cosigned_block_number(&self.db).map_err(|e| format!("{e:?}"))?; + Cosigning::::latest_acknowledged_block(&self.db).map_err(|e| format!("{e:?}"))?; // These are all the events which generate canonical messages struct CanonicalEvents { diff --git a/coordinator/substrate/src/ephemeral.rs b/coordinator/substrate/src/ephemeral.rs index dc2de117a..b94aca661 100644 --- a/coordinator/substrate/src/ephemeral.rs +++ b/coordinator/substrate/src/ephemeral.rs @@ -54,7 +54,7 @@ impl ContinuallyRan for EphemeralEventStream { async move { let next_block = NextBlock::get(&self.db).unwrap_or(0); let latest_finalized_block = - Cosigning::::latest_cosigned_block_number(&self.db).map_err(|e| format!("{e:?}"))?; + Cosigning::::latest_acknowledged_block(&self.db).map_err(|e| format!("{e:?}"))?; // These are all the events which generate canonical messages struct EphemeralEvents { diff --git a/substrate/primitives/Cargo.toml b/substrate/primitives/Cargo.toml index 883a12b3c..06535bca3 100644 --- a/substrate/primitives/Cargo.toml +++ b/substrate/primitives/Cargo.toml @@ -42,6 +42,7 @@ rand_core = { version = "0.6", default-features = false, features = ["std"] } bech32 = { version = "0.11", default-features = false, features = ["alloc"] } [features] +test-helpers = ["std"] std = [ "rand_core/std", diff --git a/substrate/primitives/src/lib.rs b/substrate/primitives/src/lib.rs index 12b56fe17..18816fd99 100644 --- a/substrate/primitives/src/lib.rs +++ b/substrate/primitives/src/lib.rs @@ -53,6 +53,10 @@ pub mod instructions; /// Merkle trees. pub mod merkle; +#[cfg(any(test, feature = "test-helpers"))] +/// Test helpers for generating random instances of primitive types. +pub mod test_helpers; + /// The type used to identify block numbers. /// /// A block's number is its zero-indexed position on the list of blocks which form a blockchain. diff --git a/substrate/primitives/src/test_helpers.rs b/substrate/primitives/src/test_helpers.rs new file mode 100644 index 000000000..f80e7a5bc --- /dev/null +++ b/substrate/primitives/src/test_helpers.rs @@ -0,0 +1,40 @@ +//! Test helpers for generating random instances of primitive types. + +use rand_core::{RngCore, CryptoRng}; + +use crate::{BlockHash, address::{SeraiAddress, ExternalAddress}, crypto::Public}; + +/// Generate a random [`ExternalAddress`]. +pub fn random_external_address(rng: &mut R) -> ExternalAddress { + let mut key = [0; 32]; + rng.fill_bytes(&mut key); + ExternalAddress::try_from(key.to_vec()).unwrap() +} + +/// Generate a random [`SeraiAddress`]. +pub fn random_serai_address(rng: &mut R) -> SeraiAddress { + let mut key = [0; 32]; + rng.fill_bytes(&mut key); + SeraiAddress(key) +} + +/// Generate a random [`Public`]. +pub fn random_public(rng: &mut R) -> Public { + let mut key = [0; 32]; + rng.fill_bytes(&mut key); + Public(key) +} + +/// Generate a random schnorrkel keypair and its [`Public`] wrapper. +pub fn random_keypair(rng: &mut R) -> (schnorrkel::Keypair, Public) { + let keypair = schnorrkel::Keypair::generate_with(rng); + let public = Public(keypair.public.to_bytes()); + (keypair, public) +} + +/// Generate a random [`BlockHash`]. +pub fn random_block_hash(rng: &mut R) -> BlockHash { + let mut hash = [0; 32]; + rng.fill_bytes(&mut hash); + BlockHash(hash) +} From 836eb44d7e81a6a103a322d94c07ec529528912a Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Mon, 16 Mar 2026 14:43:12 -0300 Subject: [PATCH 34/71] feat(env): move serai_log into serai_env --- common/env/Cargo.toml | 4 ++ common/env/src/lib.rs | 87 ++++++++++++++++++++++ common/log/Cargo.toml | 21 ------ common/log/LICENSE | 21 ------ common/log/build.rs | 3 - common/log/src/lib.rs | 64 ----------------- coordinator/Cargo.toml | 1 - coordinator/cosign/Cargo.toml | 3 +- coordinator/cosign/src/delay.rs | 8 +-- coordinator/cosign/src/evaluator.rs | 6 +- coordinator/cosign/src/intend.rs | 16 ++--- coordinator/cosign/src/tests/cosigning.rs | 36 ++++++++++ coordinator/cosign/src/tests/delay.rs | 20 ++++-- coordinator/cosign/src/tests/evaluator.rs | 88 +++++++++++++++++------ coordinator/cosign/src/tests/intend.rs | 74 +++++++++++++------ coordinator/cosign/src/tests/mod.rs | 15 ++-- coordinator/src/main.rs | 7 +- message-queue/Cargo.toml | 1 - message-queue/src/main.rs | 7 +- networks/ethereum/relayer/Cargo.toml | 1 - networks/ethereum/relayer/src/main.rs | 7 +- processor/bin/Cargo.toml | 1 - processor/bin/src/lib.rs | 7 +- tests/task/Cargo.toml | 2 +- tests/task/src/lib.rs | 4 +- 25 files changed, 291 insertions(+), 213 deletions(-) delete mode 100644 common/log/Cargo.toml delete mode 100644 common/log/LICENSE delete mode 100644 common/log/build.rs delete mode 100644 common/log/src/lib.rs diff --git a/common/env/Cargo.toml b/common/env/Cargo.toml index e88ef1bcf..85811cd1d 100644 --- a/common/env/Cargo.toml +++ b/common/env/Cargo.toml @@ -15,3 +15,7 @@ rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true + +[dependencies] +env_logger = { version = "0.10", default-features = false } +log = { version = "0.4", default-features = false, features = ["std"] } diff --git a/common/env/src/lib.rs b/common/env/src/lib.rs index 90fef7f13..bdb5bba6a 100644 --- a/common/env/src/lib.rs +++ b/common/env/src/lib.rs @@ -1,9 +1,96 @@ #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_cfg))] +use std::str::FromStr; + +/// Re-export of `log` for direct access (e.g. `serai_env::log::Level`). +pub use log; + // Obtain a variable from the Serai environment/secret store. pub fn var(variable: &str) -> Option { // TODO: Move this to a proper secret store // TODO: Unset this variable std::env::var(variable).ok() } + +pub fn init_logger() { + env_logger::builder() + .filter_level( + log::LevelFilter::from_str(&var("RUST_LOG").unwrap_or_else(|| "info".to_owned())) + .expect("`RUST_LOG` environment variable had an invalid filter"), + ) + .try_init() + .ok(); +} + +/// Coverage-gated `trace!`. Compiles to nothing under `cfg(coverage)`. +#[cfg(not(coverage))] +#[macro_export] +macro_rules! trace { + ($($arg:tt)+) => { $crate::log::trace!($($arg)+) }; +} +#[cfg(coverage)] +#[macro_export] +macro_rules! trace { + ($($arg:tt)+) => {}; +} + +/// Coverage-gated `debug!`. Compiles to nothing under `cfg(coverage)`. +#[cfg(not(coverage))] +#[macro_export] +macro_rules! debug { + ($($arg:tt)+) => { $crate::log::debug!($($arg)+) }; +} +#[cfg(coverage)] +#[macro_export] +macro_rules! debug { + ($($arg:tt)+) => {}; +} + +/// Coverage-gated `info!`. Compiles to nothing under `cfg(coverage)`. +#[cfg(not(coverage))] +#[macro_export] +macro_rules! info { + ($($arg:tt)+) => { $crate::log::info!($($arg)+) }; +} +#[cfg(coverage)] +#[macro_export] +macro_rules! info { + ($($arg:tt)+) => {}; +} + +/// Coverage-gated `warn!`. Compiles to nothing under `cfg(coverage)`. +#[cfg(not(coverage))] +#[macro_export] +macro_rules! warn { + ($($arg:tt)+) => { $crate::log::warn!($($arg)+) }; +} +#[cfg(coverage)] +#[macro_export] +macro_rules! warn { + ($($arg:tt)+) => {}; +} + +/// Coverage-gated `error!`. Compiles to nothing under `cfg(coverage)`. +#[cfg(not(coverage))] +#[macro_export] +macro_rules! error { + ($($arg:tt)+) => { $crate::log::error!($($arg)+) }; +} +#[cfg(coverage)] +#[macro_export] +macro_rules! error { + ($($arg:tt)+) => {}; +} + +/// `info!` in production, `debug!` in tests. +/// Use for operational logging that is useful in production but noisy during testing. +#[macro_export] +macro_rules! prod_info { + ($($arg:tt)+) => {{ + #[cfg(not(test))] + { $crate::log::info!($($arg)+) } + #[cfg(test)] + { $crate::log::debug!($($arg)+) } + }}; +} diff --git a/common/log/Cargo.toml b/common/log/Cargo.toml deleted file mode 100644 index 6d65fcf51..000000000 --- a/common/log/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "serai-log" -version = "0.1.0" -description = "Coverage-gated logging macros for the Serai project" -license = "MIT" -repository = "https://github.com/serai-dex/serai/tree/develop/common/log" -authors = ["Luke Parker ", "rafael_xmr "] -keywords = [] -edition = "2021" -publish = false -rust-version = "1.85" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] - -[lints] -workspace = true - -[dependencies] -log = { version = "0.4", default-features = false, features = ["std"] } diff --git a/common/log/LICENSE b/common/log/LICENSE deleted file mode 100644 index f995f1e78..000000000 --- a/common/log/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2026 Serai - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/common/log/build.rs b/common/log/build.rs deleted file mode 100644 index 040cbf090..000000000 --- a/common/log/build.rs +++ /dev/null @@ -1,3 +0,0 @@ -fn main() { - println!("cargo::rustc-check-cfg=cfg(coverage)"); -} diff --git a/common/log/src/lib.rs b/common/log/src/lib.rs deleted file mode 100644 index fa91fbb73..000000000 --- a/common/log/src/lib.rs +++ /dev/null @@ -1,64 +0,0 @@ -#![cfg_attr(docsrs, feature(doc_cfg))] - -/// Re-export of `log` for direct access (e.g. `serai_log::log::Level`). -pub use log; - -/// Coverage-gated `trace!`. Compiles to nothing under `cfg(coverage)`. -#[cfg(not(coverage))] -#[macro_export] -macro_rules! trace { - ($($arg:tt)+) => { $crate::log::trace!($($arg)+) }; -} -#[cfg(coverage)] -#[macro_export] -macro_rules! trace { - ($($arg:tt)+) => {}; -} - -/// Coverage-gated `debug!`. Compiles to nothing under `cfg(coverage)`. -#[cfg(not(coverage))] -#[macro_export] -macro_rules! debug { - ($($arg:tt)+) => { $crate::log::debug!($($arg)+) }; -} -#[cfg(coverage)] -#[macro_export] -macro_rules! debug { - ($($arg:tt)+) => {}; -} - -/// Coverage-gated `info!`. Compiles to nothing under `cfg(coverage)`. -#[cfg(not(coverage))] -#[macro_export] -macro_rules! info { - ($($arg:tt)+) => { $crate::log::info!($($arg)+) }; -} -#[cfg(coverage)] -#[macro_export] -macro_rules! info { - ($($arg:tt)+) => {}; -} - -/// Coverage-gated `warn!`. Compiles to nothing under `cfg(coverage)`. -#[cfg(not(coverage))] -#[macro_export] -macro_rules! warn { - ($($arg:tt)+) => { $crate::log::warn!($($arg)+) }; -} -#[cfg(coverage)] -#[macro_export] -macro_rules! warn { - ($($arg:tt)+) => {}; -} - -/// Coverage-gated `error!`. Compiles to nothing under `cfg(coverage)`. -#[cfg(not(coverage))] -#[macro_export] -macro_rules! error { - ($($arg:tt)+) => { $crate::log::error!($($arg)+) }; -} -#[cfg(coverage)] -#[macro_export] -macro_rules! error { - ($($arg:tt)+) => {}; -} diff --git a/coordinator/Cargo.toml b/coordinator/Cargo.toml index 925257860..fe1b76d05 100644 --- a/coordinator/Cargo.toml +++ b/coordinator/Cargo.toml @@ -45,7 +45,6 @@ tributary-sdk = { path = "./tributary-sdk" } serai-client-serai = { path = "../substrate/client/serai", default-features = false } log = { version = "0.4", default-features = false, features = ["std"] } -env_logger = { version = "0.10", default-features = false, features = ["humantime"] } tokio = { version = "1", default-features = false, features = ["time", "sync", "macros", "rt-multi-thread"] } diff --git a/coordinator/cosign/Cargo.toml b/coordinator/cosign/Cargo.toml index 55fca6c96..c7f48bbbb 100644 --- a/coordinator/cosign/Cargo.toml +++ b/coordinator/cosign/Cargo.toml @@ -23,7 +23,7 @@ blake2 = { version = "0.11.0-rc.5", default-features = false, features = ["alloc borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } serai-client-serai = { path = "../../substrate/client/serai", default-features = false } -serai-log = { path = "../../common/log", version = "0.1.0" } +serai-env = { path = "../../common/env", version = "0.1.0" } tokio = { version = "1", default-features = false } @@ -33,7 +33,6 @@ serai-task = { path = "../../common/task", version = "0.1" } serai-cosign-types = { path = "./types" } [dev-dependencies] -env_logger = { version = "0.10", default-features = false } serai-cosign-types = { path = "./types", features = ["test-helpers"] } serai-shim-rpc = { path = "../../tests/shim-rpc" } diff --git a/coordinator/cosign/src/delay.rs b/coordinator/cosign/src/delay.rs index 9c6ca655b..33d3521d5 100644 --- a/coordinator/cosign/src/delay.rs +++ b/coordinator/cosign/src/delay.rs @@ -54,7 +54,7 @@ impl ContinuallyRan for CosignDelayTask { let latest_acknowledged_block = LatestAcknowledgedBlock::get(&mut txn).unwrap_or(0); - serai_log::debug!( + serai_env::debug!( "beginning delay: block_number={block_number}, time_evaluated={time_evaluated}, latest_acknowledged_block={latest_acknowledged_block}", ); @@ -74,7 +74,7 @@ impl ContinuallyRan for CosignDelayTask { if time_valid_timestamp > now_timestamp { // Sleep until then let time_left = time_valid_timestamp - now_timestamp; - serai_log::debug!("beginning sleep: {time_left}s"); + serai_env::debug!("beginning sleep: {time_left}s"); tokio::time::sleep(Duration::from_secs(time_left)).await; } @@ -84,7 +84,7 @@ impl ContinuallyRan for CosignDelayTask { LatestAcknowledgedBlock::set(&mut txn, &block_number); txn.commit(); - serai_log::debug!("LatestAcknowledgedBlock={block_number}"); + serai_env::debug!("LatestAcknowledgedBlock={block_number}"); made_progress = true; } @@ -98,7 +98,7 @@ impl ContinuallyRan for CosignDelayTask { let mut txn = self.db.txn(); LatestAcknowledgedBlock::set(&mut txn, &evaluated); txn.commit(); - serai_log::debug!("LatestAcknowledgedBlock={evaluated} (caught up to evaluator)"); + serai_env::debug!("LatestAcknowledgedBlock={evaluated} (caught up to evaluator)"); made_progress = true; } } diff --git a/coordinator/cosign/src/evaluator.rs b/coordinator/cosign/src/evaluator.rs index c01e8342f..6e3d67e80 100644 --- a/coordinator/cosign/src/evaluator.rs +++ b/coordinator/cosign/src/evaluator.rs @@ -216,7 +216,7 @@ impl ContinuallyRan for CosignEvaluatorT } } - serai_log::log::debug!( + serai_env::log::debug!( "beginning evaluator: block_number={block_number}, has_events={:#?}", has_events ); @@ -313,10 +313,10 @@ impl ContinuallyRan for CosignEvaluatorT // Roughly ~1 hour, no need for repetitive logging #[cfg(not(test))] if (block_number % 500) == 0 { - serai_log::debug!("marking block #{block_number} as cosigned"); + serai_env::debug!("marking block #{block_number} as cosigned"); } #[cfg(test)] - serai_log::info!("marking block #{block_number} as cosigned"); + serai_env::debug!("marking block #{block_number} as cosigned"); made_progress = true; } diff --git a/coordinator/cosign/src/intend.rs b/coordinator/cosign/src/intend.rs index c6eb378a1..7fe2bc248 100644 --- a/coordinator/cosign/src/intend.rs +++ b/coordinator/cosign/src/intend.rs @@ -23,7 +23,7 @@ use serai_task::ContinuallyRan; use crate::*; -#[derive(BorshSerialize, BorshDeserialize)] +#[derive(Debug, BorshSerialize, BorshDeserialize)] pub(crate) struct Set { pub(crate) session: Session, pub(crate) key: Public, @@ -89,7 +89,7 @@ impl ContinuallyRan for CosignIntendTask { // Ephemeral RPC Err: task to re-run and continue trying .map_err(|e| format!("RPC error fetching latest finalized block number: {e}"))?; - serai_log::debug!( + serai_env::debug!( "beginning scan: start={start_scan_block_number}, latest={latest_serai_block_number}" ); @@ -148,7 +148,7 @@ impl ContinuallyRan for CosignIntendTask { ); BuildsUpon::set(&mut txn, &builds_upon); - serai_log::debug!("iterating over block_number={block_number}"); + serai_env::debug!("iterating over block_number={block_number}"); let mut has_events = HasEvents::No; let vset_events = serai_block_events.validator_sets(); @@ -237,7 +237,7 @@ impl ContinuallyRan for CosignIntendTask { &Set { session: set.session, key: key_pair.0, stake: Amount(stake) }, ); } else { - serai_log::debug!( + serai_env::debug!( "skipped session {:?} with 0 stake from being selected for cosigns", set.session ); @@ -253,7 +253,7 @@ impl ContinuallyRan for CosignIntendTask { let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn); - serai_log::debug!("type of has_events={has_events:?}"); + serai_env::debug!("type of has_events={has_events:?}"); // If this is notable, it creates a new global session, which we index into the database // now @@ -293,7 +293,7 @@ impl ContinuallyRan for CosignIntendTask { total_stake, }; - serai_log::debug!( + serai_env::debug!( "Notable block block_number={block_number}: new session created {next_global_session_info:?}" ); @@ -326,7 +326,7 @@ impl ContinuallyRan for CosignIntendTask { // Tell each set of their expectation to cosign this block for set in ending_global_session_info.sets { - serai_log::info!( + serai_env::prod_info!( "set will cosign {has_events:?} block: set={set:?}, block_number={block_number}" ); @@ -345,7 +345,7 @@ impl ContinuallyRan for CosignIntendTask { HasEvents::No => {} } - serai_log::debug!( + serai_env::debug!( "finished iterating block_number={block_number}: has_events={has_events:?}" ); diff --git a/coordinator/cosign/src/tests/cosigning.rs b/coordinator/cosign/src/tests/cosigning.rs index 9aad1cdec..ee5444b13 100644 --- a/coordinator/cosign/src/tests/cosigning.rs +++ b/coordinator/cosign/src/tests/cosigning.rs @@ -86,6 +86,7 @@ fn seed_minimal_state(db: &mut MemDb, session: &TestGlobalSession) { #[test] fn global_session_id_generation() { + serai_env::init_logger(); let network1 = ExternalNetworkId::Bitcoin; let set1 = ExternalValidatorSet { network: network1, session: Session(0) }; let set2 = ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }; @@ -119,6 +120,7 @@ mod intake_cosign_error { #[test] fn temporal_returns_true_for_temporal_errors() { + serai_env::init_logger(); assert!(IntakeCosignError::NotYetIndexedBlock.temporal()); assert!(IntakeCosignError::StaleCosign.temporal()); assert!(IntakeCosignError::UnrecognizedGlobalSession.temporal()); @@ -127,6 +129,7 @@ mod intake_cosign_error { #[test] fn temporal_returns_false_for_non_temporal_errors() { + serai_env::init_logger(); assert!(!IntakeCosignError::BeforeGlobalSessionStart.temporal()); assert!(!IntakeCosignError::AfterGlobalSessionEnd.temporal()); assert!(!IntakeCosignError::NonParticipatingNetwork.temporal()); @@ -139,6 +142,7 @@ mod spawn { #[tokio::test] async fn spawn_creates_cosigning_instance() { + serai_env::init_logger(); let db = MemDb::new(); let (_shim_serai, serai) = setup_shim_serai().await; let (request, _calls) = TestRequest::new(false); @@ -149,6 +153,7 @@ mod spawn { #[tokio::test] async fn spawn_with_tasks_to_run_upon_cosigning() { + serai_env::init_logger(); let db = MemDb::new(); let (_shim_serai, serai) = setup_shim_serai().await; let (request, _calls) = TestRequest::new(false); @@ -163,6 +168,7 @@ mod spawn { #[tokio::test] async fn spawn_initializes_cosigning_instance_correctly() { + serai_env::init_logger(); let db = MemDb::new(); let (_shim_serai, serai) = setup_shim_serai().await; let (request, _calls) = TestRequest::new(false); @@ -178,6 +184,7 @@ mod spawn { #[tokio::test] async fn spawn_tasks_chain_correctly() { + serai_env::init_logger(); let db = MemDb::new(); let (_shim_serai, serai) = setup_shim_serai().await; let (request, _calls) = TestRequest::new(false); @@ -192,6 +199,7 @@ mod spawn { #[tokio::test] async fn spawn_end_to_end() { + serai_env::init_logger(); let db = MemDb::new(); let (shim_serai, serai) = setup_shim_serai().await; let (request, _calls) = TestRequest::new(false); @@ -230,12 +238,14 @@ mod latest_acknowledged_block { #[test] fn latest_acknowledged_block_defaults_to_zero() { + serai_env::init_logger(); let db = MemDb::new(); assert_eq!(Cosigning::::latest_acknowledged_block(&db).unwrap(), 0); } #[test] fn latest_acknowledged_block_errors_when_faulted() { + serai_env::init_logger(); let mut db = MemDb::new(); { let mut txn = db.txn(); @@ -247,6 +257,7 @@ mod latest_acknowledged_block { #[test] fn latest_acknowledged_block_returns_stored_value() { + serai_env::init_logger(); let mut db = MemDb::new(); { let mut txn = db.txn(); @@ -262,6 +273,7 @@ mod cosigned_block { #[test] fn cosigned_block_returns_none_beyond_latest() { + serai_env::init_logger(); let mut db = MemDb::new(); { let mut txn = db.txn(); @@ -273,6 +285,7 @@ mod cosigned_block { #[test] fn cosigned_block_returns_hash_when_in_range() { + serai_env::init_logger(); let mut db = MemDb::new(); let block_hash = BlockHash([9u8; 32]); { @@ -286,6 +299,7 @@ mod cosigned_block { #[test] fn cosigned_block_errors_when_faulted() { + serai_env::init_logger(); let mut db = MemDb::new(); { let mut txn = db.txn(); @@ -297,6 +311,7 @@ mod cosigned_block { #[tokio::test] async fn cosigning_cosigned_block_returns_correct_hash() { + serai_env::init_logger(); let mut db = MemDb::new(); let block_hash_5 = BlockHash([42u8; 32]); let block_hash_10 = BlockHash([43u8; 32]); @@ -328,6 +343,7 @@ mod notable_cosigns { #[test] fn notable_cosigns_empty_without_cosigns() { + serai_env::init_logger(); let db = MemDb::new(); let cosigns = Cosigning::::notable_cosigns(&db, [1u8; 32]); assert!(cosigns.is_empty()); @@ -335,6 +351,7 @@ mod notable_cosigns { #[test] fn notable_cosigns_returns_cosigns_for_session() { + serai_env::init_logger(); let (session, keypair) = random_session(); let id = session.id(); @@ -369,6 +386,7 @@ mod cosigns_to_rebroadcast { #[test] fn cosigns_to_rebroadcast_excludes_cosigns_from_different_global_session() { + serai_env::init_logger(); let (session, keypair) = random_session(); let id = session.id(); @@ -423,6 +441,7 @@ mod cosigns_to_rebroadcast { #[test] fn cosigns_to_rebroadcast_returns_latest_cosigns_when_not_faulted() { + serai_env::init_logger(); let (session, keypair) = random_session(); let id = session.id(); @@ -452,6 +471,7 @@ mod cosigns_to_rebroadcast { #[test] fn cosigns_to_rebroadcast_returns_faults_and_honest_when_faulted() { + serai_env::init_logger(); let (session, keypair) = random_session(); let id = session.id(); @@ -504,6 +524,7 @@ mod intake_cosign { #[test] fn intake_cosign_rejects_not_yet_indexed_block() { + serai_env::init_logger(); let db = MemDb::new(); let (keypair, _) = random_keypair(&mut OsRng); @@ -521,6 +542,7 @@ mod intake_cosign { #[test] fn intake_cosign_accepts_valid_cosign() { + serai_env::init_logger(); let (session, keypair) = random_session(); let id = session.id(); @@ -545,6 +567,7 @@ mod intake_cosign { #[test] fn intake_cosign_rejects_stale_cosign() { + serai_env::init_logger(); let (session, keypair) = random_session(); let id = session.id(); @@ -583,6 +606,7 @@ mod intake_cosign { #[test] fn intake_cosign_rejects_unrecognized_global_session() { + serai_env::init_logger(); let (keypair, _) = random_keypair(&mut OsRng); let mut db = MemDb::new(); @@ -611,6 +635,7 @@ mod intake_cosign { #[test] fn intake_cosign_rejects_before_global_session_start() { + serai_env::init_logger(); let (mut session, keypair) = random_session(); session.start_block_number = 10; let id = session.id(); @@ -643,6 +668,7 @@ mod intake_cosign { #[test] fn intake_cosign_rejects_after_global_session_end() { + serai_env::init_logger(); let (session, keypair) = random_session(); let id = session.id(); @@ -675,6 +701,7 @@ mod intake_cosign { #[test] fn intake_cosign_rejects_invalid_signature() { + serai_env::init_logger(); let (session, _keypair) = random_session(); let id = session.id(); // Use a different keypair than the one in the session @@ -701,6 +728,7 @@ mod intake_cosign { #[test] fn intake_cosign_rejects_future_global_session() { + serai_env::init_logger(); let (mut session, keypair) = random_session(); session.start_block_number = 10; let id = session.id(); @@ -733,6 +761,7 @@ mod intake_cosign { #[test] fn intake_cosign_handles_faulty_cosign() { + serai_env::init_logger(); let (session, keypair) = random_session(); let id = session.id(); @@ -771,6 +800,7 @@ mod intake_cosign { #[test] fn intake_cosign_accepts_newer_cosign_when_existing_is_older() { + serai_env::init_logger(); let (session, keypair) = random_session(); let id = session.id(); @@ -811,6 +841,7 @@ mod intake_cosign { #[test] fn intake_cosign_accepts_cosign_at_global_session_last_block() { + serai_env::init_logger(); let (session, keypair) = random_session(); let id = session.id(); @@ -844,6 +875,7 @@ mod intake_cosign { #[test] fn intake_cosign_ignores_duplicate_fault_from_same_network() { + serai_env::init_logger(); let (session, keypair) = random_session(); let id = session.id(); @@ -896,6 +928,7 @@ mod intake_cosign { #[test] fn intake_cosign_rejects_non_participating_network() { + serai_env::init_logger(); let (session, _keypair) = random_session(); let id = session.id(); @@ -929,6 +962,7 @@ mod intake_cosign { #[test] fn intake_cosign_records_fault_below_threshold() { + serai_env::init_logger(); let network1 = ExternalNetworkId::Bitcoin; let network2 = ExternalNetworkId::Ethereum; let set1 = ExternalValidatorSet { network: network1, session: Session(0) }; @@ -988,6 +1022,7 @@ mod intended_cosigns { #[test] fn intended_cosigns_empty_returns_empty() { + serai_env::init_logger(); let mut db = MemDb::new(); let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; let mut txn = db.txn(); @@ -997,6 +1032,7 @@ mod intended_cosigns { #[test] fn intended_cosigns_receives_sent_intent() { + serai_env::init_logger(); let mut db = MemDb::new(); let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; diff --git a/coordinator/cosign/src/tests/delay.rs b/coordinator/cosign/src/tests/delay.rs index a0c9c6af1..39012c2ae 100644 --- a/coordinator/cosign/src/tests/delay.rs +++ b/coordinator/cosign/src/tests/delay.rs @@ -38,19 +38,23 @@ impl IntoTask for DelayTest { impl DelayTest { pub fn new() -> (Self, Instant) { let start = std::time::Instant::now(); - let _ = env_logger::try_init(); (Self::default(), start) } async fn assert_task_iteration_completes_with(&self, latest_acknowledged_block: u64) { - assert_eq!(LatestAcknowledgedBlock::get(&self.db), Some(latest_acknowledged_block)); - // Assert CosignedBlocks queue items have been consumed after task run - assert_eq!(CosignedBlocks::peek(&self.db), None); + use serai_env::log::debug; + let actual = LatestAcknowledgedBlock::get(&self.db); + let cosigned_pending = CosignedBlocks::peek(&self.db).is_some(); + debug!("LatestAcknowledgedBlock: {actual:?} (expected: Some({latest_acknowledged_block}))"); + debug!("CosignedBlocks pending: {cosigned_pending}"); + assert_eq!(actual, Some(latest_acknowledged_block)); + assert!(!cosigned_pending, "CosignedBlocks queue items should have been consumed"); } } #[tokio::test] async fn returns_false_with_no_messages() { + serai_env::init_logger(); let test = DelayTest::default(); let mut task = test.into_task(); @@ -80,7 +84,7 @@ async fn updates_latest_acknowledged_block_after_ack_delay() { TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; test.assert_task_iteration_completes_with(2).await; - serai_log::log::info!("Blocks 0-2 processed after {:?}", start.elapsed()); + serai_env::log::info!("Blocks 0-2 processed after {:?}", start.elapsed()); { let mut txn = test.db.txn(); @@ -96,7 +100,7 @@ async fn updates_latest_acknowledged_block_after_ack_delay() { TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; test.assert_task_iteration_completes_with(5).await; - serai_log::log::info!("Blocks 3-5 processed after {:?}", start.elapsed()); + serai_env::log::info!("Blocks 3-5 processed after {:?}", start.elapsed()); { let mut txn = test.db.txn(); @@ -112,11 +116,12 @@ async fn updates_latest_acknowledged_block_after_ack_delay() { TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; test.assert_task_iteration_completes_with(8).await; - serai_log::log::info!("Blocks 6-8 processed after {:?}", start.elapsed()); + serai_env::log::info!("Blocks 6-8 processed after {:?}", start.elapsed()); } #[tokio::test] async fn does_not_regress_and_skips_if_not_a_later_block() { + serai_env::init_logger(); let mut test = DelayTest::default(); { @@ -157,6 +162,7 @@ async fn does_not_regress_and_skips_if_not_a_later_block() { #[tokio::test] async fn respects_acknowledgement_delay() { + serai_env::init_logger(); let mut test = DelayTest::default(); let block_number = OsRng.next_u64(); diff --git a/coordinator/cosign/src/tests/evaluator.rs b/coordinator/cosign/src/tests/evaluator.rs index fc9a6ce03..d2b5b9d8d 100644 --- a/coordinator/cosign/src/tests/evaluator.rs +++ b/coordinator/cosign/src/tests/evaluator.rs @@ -4,15 +4,16 @@ use std::{ time::{Duration, Instant}, }; +use rand_core::OsRng; use serai_cosign_types::SignedCosign; use serai_db::{Db as _, DbTxn, MemDb}; use serai_client_serai::abi::primitives::{ - BlockHash, crypto::Public, network_id::ExternalNetworkId, validator_sets::{ExternalValidatorSet, Session}, }; +use serai_primitives::test_helpers::random_block_hash; use serai_task::ContinuallyRan; use crate::{ @@ -22,7 +23,7 @@ use crate::{ REQUEST_COSIGNS_SPACING, }, intend::{BlockEventData, BlockEvents, GlobalSessionsChannel}, - tests::{IntoTask, TaskTest, TestRequest}, + tests::{IntoTask, TaskTest, TestRequest, random_global_session}, }; pub(crate) struct EvaluatorTest { @@ -45,9 +46,8 @@ impl IntoTask for EvaluatorTest { } impl EvaluatorTest { - const GLOBAL_SESSION: [u8; 32] = [1u8; 32]; - fn init_global_session(&mut self, start_block_number: u64) -> [u8; 32] { + let global_session = random_global_session(); let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; let mut keys = HashMap::new(); @@ -60,10 +60,10 @@ impl EvaluatorTest { GlobalSession { start_block_number, sets: vec![set], keys, stakes, total_stake: 1u64 }; let mut txn = self.db.txn(); - GlobalSessionsChannel::send(&mut txn, &(Self::GLOBAL_SESSION, info)); + GlobalSessionsChannel::send(&mut txn, &(global_session, info)); txn.commit(); - Self::GLOBAL_SESSION + global_session } } @@ -72,11 +72,28 @@ impl EvaluatorTest { /// After a successful task run, all input channels should be consumed and the /// `CosignedBlocks` output channel should contain exactly the expected block range. fn verify_db_invariants(db: &mut MemDb, expected_cosigned_range: Option<(u64, u64)>) { + use serai_env::log::debug; + + let latest_evaluated = LatestEvaluatedBlock::get(db); + let current_session = CurrentlyEvaluatedGlobalSession::get(db); + let block_events_pending = BlockEvents::peek(db).is_some(); + let sessions_pending = GlobalSessionsChannel::peek(db).is_some(); + let cosigned_pending = CosignedBlocks::peek(db).is_some(); + + debug!("LatestEvaluatedBlock: {latest_evaluated:?}"); + debug!( + "CurrentlyEvaluatedGlobalSession: {:?}", + current_session.as_ref().map(|(id, gs)| (hex::encode(id), gs.start_block_number)) + ); + debug!("BlockEvents pending: {block_events_pending}"); + debug!("GlobalSessionsChannel pending: {sessions_pending}"); + debug!("CosignedBlocks pending: {cosigned_pending}"); + // All input channels should be fully consumed - assert!(BlockEvents::peek(db).is_none(), "BlockEvents should be fully consumed"); - assert!(GlobalSessionsChannel::peek(db).is_none(), "GlobalSessionsChannel should be consumed"); + assert!(!block_events_pending, "BlockEvents should be fully consumed"); + assert!(!sessions_pending, "GlobalSessionsChannel should be consumed"); - let has_session = CurrentlyEvaluatedGlobalSession::get(db).is_some(); + let has_session = current_session.is_some(); let mut txn = db.txn(); @@ -88,6 +105,7 @@ fn verify_db_invariants(db: &mut MemDb, expected_cosigned_range: Option<(u64, u6 for expected_block in start ..= end { let (block_number, _time) = CosignedBlocks::try_recv(&mut txn) .unwrap_or_else(|| panic!("expected cosigned block {expected_block}")); + debug!("CosignedBlock: block_number={block_number}"); assert_eq!(block_number, expected_block, "cosigned block mismatch"); } assert!(CosignedBlocks::try_recv(&mut txn).is_none(), "unexpected extra cosigned block"); @@ -106,7 +124,12 @@ fn signed_cosign( block_number: u64, ) -> SignedCosign { SignedCosign { - cosign: Cosign { global_session, block_number, block_hash: BlockHash([0u8; 32]), cosigner }, + cosign: Cosign { + global_session, + block_number, + block_hash: random_block_hash(&mut OsRng), + cosigner, + }, signature: [0u8; 64], } } @@ -121,6 +144,7 @@ async fn returns_false_with_no_block_events() { #[tokio::test] async fn processes_blocks_with_no_events() { + serai_env::init_logger(); let mut test = EvaluatorTest::default(); test.init_global_session(0); @@ -145,6 +169,7 @@ async fn processes_blocks_with_no_events() { #[tokio::test] async fn processes_notable_events_when_cosigned() { + serai_env::init_logger(); let mut test = EvaluatorTest::default(); let global_session = test.init_global_session(0); @@ -170,6 +195,7 @@ async fn processes_notable_events_when_cosigned() { #[tokio::test] async fn non_notable_uses_cached_known_cosign() { + serai_env::init_logger(); let mut test = EvaluatorTest::default(); let global_session = test.init_global_session(0); @@ -203,6 +229,7 @@ async fn non_notable_uses_cached_known_cosign() { #[tokio::test] async fn non_notable_with_cosign_returns_some() { + serai_env::init_logger(); let mut test = EvaluatorTest::default(); let global_session = test.init_global_session(0); @@ -228,6 +255,7 @@ async fn non_notable_with_cosign_returns_some() { #[tokio::test] async fn non_notable_computes_lowest_common_block() { + serai_env::init_logger(); let mut test = EvaluatorTest::default(); let global_session = { @@ -247,10 +275,11 @@ async fn non_notable_computes_lowest_common_block() { let info = GlobalSession { start_block_number: 0, sets, keys, stakes, total_stake: 100u64 }; let mut txn = test.db.txn(); - GlobalSessionsChannel::send(&mut txn, &(EvaluatorTest::GLOBAL_SESSION, info)); + let id = random_global_session(); + GlobalSessionsChannel::send(&mut txn, &(id, info)); txn.commit(); - EvaluatorTest::GLOBAL_SESSION + id }; { @@ -289,6 +318,7 @@ async fn non_notable_computes_lowest_common_block() { #[tokio::test] async fn advances_global_session_at_start_block() { + serai_env::init_logger(); let mut test = EvaluatorTest::default(); let session1 = [1u8; 32]; @@ -350,6 +380,7 @@ mod errors { #[tokio::test] async fn notable_events_without_cosign() { + serai_env::init_logger(); let mut test = EvaluatorTest::default(); test.init_global_session(0); @@ -391,6 +422,7 @@ mod errors { #[tokio::test] async fn notable_events_without_stakes() { + serai_env::init_logger(); let mut test = EvaluatorTest::default(); let global_session = { @@ -405,10 +437,11 @@ mod errors { GlobalSession { start_block_number: 0, sets: vec![set], keys, stakes, total_stake: 1u64 }; let mut txn = test.db.txn(); - GlobalSessionsChannel::send(&mut txn, &(EvaluatorTest::GLOBAL_SESSION, info)); + let id = random_global_session(); + GlobalSessionsChannel::send(&mut txn, &(id, info)); txn.commit(); - EvaluatorTest::GLOBAL_SESSION + id }; { @@ -432,6 +465,7 @@ mod errors { #[tokio::test] async fn non_notable_events_without_cosign() { + serai_env::init_logger(); let mut test = EvaluatorTest::default(); test.init_global_session(0); @@ -470,6 +504,7 @@ mod errors { #[tokio::test] async fn non_notable_events_without_stakes() { + serai_env::init_logger(); let mut test = EvaluatorTest::default(); let global_session = { @@ -484,10 +519,11 @@ mod errors { GlobalSession { start_block_number: 0, sets: vec![set], keys, stakes, total_stake: 1u64 }; let mut txn = test.db.txn(); - GlobalSessionsChannel::send(&mut txn, &(EvaluatorTest::GLOBAL_SESSION, info)); + let id = random_global_session(); + GlobalSessionsChannel::send(&mut txn, &(id, info)); txn.commit(); - EvaluatorTest::GLOBAL_SESSION + id }; { @@ -511,6 +547,7 @@ mod errors { #[tokio::test] async fn non_notable_cosign_too_low_does_not_add_weight() { + serai_env::init_logger(); let mut test = EvaluatorTest::default(); let global_session = test.init_global_session(0); @@ -535,6 +572,7 @@ mod errors { #[tokio::test] async fn request_notable_cosigns_failure() { + serai_env::init_logger(); let mut test = EvaluatorTest::default(); test.init_global_session(0); @@ -560,6 +598,7 @@ mod errors { #[tokio::test] async fn request_non_notable_cosigns_failure() { + serai_env::init_logger(); let mut test = EvaluatorTest::default(); test.init_global_session(0); @@ -586,6 +625,7 @@ mod errors { #[tokio::test] #[should_panic(expected = "candidate's start block number ")] async fn panics_when_session_starts_after_block() { + serai_env::init_logger(); let mut test = EvaluatorTest::default(); { @@ -601,7 +641,8 @@ mod errors { GlobalSession { start_block_number: 10, sets: vec![set], keys, stakes, total_stake: 1u64 }; let mut txn = test.db.txn(); - CurrentlyEvaluatedGlobalSession::set(&mut txn, &(EvaluatorTest::GLOBAL_SESSION, info)); + let id = random_global_session(); + CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, info)); BlockEvents::send(&mut txn, &BlockEventData { block_number: 5, has_events: HasEvents::No }); txn.commit(); } @@ -615,6 +656,7 @@ mod errors { expected = "currently_evaluated_global_session_strict wasn't called incrementally" )] async fn panics_when_called_non_incrementally() { + serai_env::init_logger(); let mut test = EvaluatorTest::default(); { @@ -658,6 +700,7 @@ mod errors { #[tokio::test] #[should_panic(expected = "attempt to add with overflow")] async fn weight_overflow_notable() { + serai_env::init_logger(); let mut test = EvaluatorTest::default(); let global_session = { @@ -677,10 +720,11 @@ mod errors { let info = GlobalSession { start_block_number: 0, sets, keys, stakes, total_stake: u64::MAX }; let mut txn = test.db.txn(); - GlobalSessionsChannel::send(&mut txn, &(EvaluatorTest::GLOBAL_SESSION, info)); + let id = random_global_session(); + GlobalSessionsChannel::send(&mut txn, &(id, info)); txn.commit(); - EvaluatorTest::GLOBAL_SESSION + id }; { @@ -711,6 +755,7 @@ mod errors { #[tokio::test] #[should_panic(expected = "attempt to add with overflow")] async fn weight_overflow_non_notable() { + serai_env::init_logger(); let mut test = EvaluatorTest::default(); let global_session = { @@ -730,10 +775,11 @@ mod errors { let info = GlobalSession { start_block_number: 0, sets, keys, stakes, total_stake: u64::MAX }; let mut txn = test.db.txn(); - GlobalSessionsChannel::send(&mut txn, &(EvaluatorTest::GLOBAL_SESSION, info)); + let id = random_global_session(); + GlobalSessionsChannel::send(&mut txn, &(id, info)); txn.commit(); - EvaluatorTest::GLOBAL_SESSION + id }; { diff --git a/coordinator/cosign/src/tests/intend.rs b/coordinator/cosign/src/tests/intend.rs index 49d3259ea..d1452c2a6 100644 --- a/coordinator/cosign/src/tests/intend.rs +++ b/coordinator/cosign/src/tests/intend.rs @@ -82,12 +82,15 @@ async fn setup_mock_test() -> (SeraiShimRpc, IntendTestStruct) { /// Verify all post-run DB invariants by replaying events from the Serai node. async fn verify_db_invariants(db: &MemDb, serai: &Serai, num_blocks: usize) { use serai_client_serai::abi::validator_sets::Event as VsEvent; + use serai_env::log::debug; let num_blocks_u64 = u64::try_from(num_blocks).unwrap(); // ScanCosignFrom should point to the block after the last processed + let scan_from = ScanCosignFrom::get(db); + debug!("ScanCosignFrom: {scan_from:?}"); assert_eq!( - ScanCosignFrom::get(db), + scan_from, Some(num_blocks_u64), "ScanCosignFrom should be {num_blocks} after processing blocks 0..={n}", n = num_blocks - 1 @@ -139,6 +142,7 @@ async fn verify_db_invariants(db: &MemDb, serai: &Serai, num_blocks: usize) { // Verify Stakes match the expected. for (&(network, validator), &expected_amount) in &expected_stakes { let db_stake = Stakes::get(db, network, validator); + debug!("Stakes[{network:?}, {validator:?}]: db={db_stake:?}, expected={expected_amount}"); assert_eq!( db_stake, Some(Amount(expected_amount)), @@ -149,6 +153,7 @@ async fn verify_db_invariants(db: &MemDb, serai: &Serai, num_blocks: usize) { // Verify LatestSet matches the expected. for (&network, &(session, stake)) in &expected_latest_set { let latest = LatestSet::get(db, network); + debug!("LatestSet[{network:?}]: db={latest:?}, expected=(session={session:?}, stake={stake})"); assert!(latest.is_some(), "LatestSet should exist for {network:?}"); let latest = latest.unwrap(); assert_eq!(latest.session, session, "LatestSet session mismatch for {network:?}"); @@ -160,24 +165,40 @@ async fn verify_db_invariants(db: &MemDb, serai: &Serai, num_blocks: usize) { let session_num = next_session.get(&network).copied().unwrap_or(0); if session_num > 0 { let last_set = ExternalValidatorSet { network, session: Session(session_num - 1) }; + let validators = Validators::get(db, last_set); + debug!("Validators[{last_set:?}]: {validators:?} (should be None)"); assert_eq!( - Validators::get(db, last_set), - None, + validators, None, "Validators for {last_set:?} should have been consumed by SetKeys" ); } } + // Log and verify LatestGlobalSessionIntended + let latest_session_id = LatestGlobalSessionIntended::get(db); + debug!("LatestGlobalSessionIntended: {:?}", latest_session_id.map(hex::encode)); + // If any SetKeys happened, a GlobalSession should exist with consistent total_stake if set_keys_count > 0 { - let session_id = LatestGlobalSessionIntended::get(db); assert!( - session_id.is_some(), + latest_session_id.is_some(), "LatestGlobalSessionIntended should exist after {set_keys_count} SetKeys events", ); - let session = GlobalSessions::get(db, session_id.unwrap()); + let session_id = latest_session_id.unwrap(); + let session = GlobalSessions::get(db, session_id); assert!(session.is_some(), "GlobalSession should exist"); let session = session.unwrap(); + + debug!( + "GlobalSession {}: start_block_number={}, total_stake={}, sets={:?}, stakes={:?}", + &hex::encode(session_id)[.. 16], + session.start_block_number, + session.total_stake, + session.sets, + session.stakes, + ); + debug!("last_block: {:?}", GlobalSessionsLastBlock::get(db, session_id)); + let sum: u64 = session.stakes.values().sum(); assert_eq!( session.total_stake, sum, @@ -185,7 +206,18 @@ async fn verify_db_invariants(db: &MemDb, serai: &Serai, num_blocks: usize) { ); } - serai_log::log::info!( + // SubstrateBlockHash index + let max_block = scan_from.unwrap_or(0); + if max_block > 0 { + debug!("SubstrateBlockHash index ({max_block} blocks):"); + for b in 0 .. max_block { + if let Some(hash) = SubstrateBlockHash::get(db, b) { + debug!(" #{b}: {}…", &hex::encode(hash.0)[.. 16]); + } + } + } + + debug!( "DB invariants verified: {} blocks, {} stake entries, {} LatestSets, {} SetKeys events", num_blocks, expected_stakes.len(), @@ -199,6 +231,7 @@ mod errors { #[tokio::test] async fn errors_if_chain_is_not_linear() { + serai_env::init_logger(); let (serai, task_test) = setup_mock_test().await; serai.make_block(0, vec![]).await; @@ -222,6 +255,7 @@ mod errors { #[tokio::test] async fn errors_if_block_not_found() { + serai_env::init_logger(); let (serai, task_test) = setup_mock_test().await; serai.make_block(0, vec![]).await; @@ -247,6 +281,7 @@ mod errors { #[tokio::test] async fn handles_rpc_error_on_block_fetch() { + serai_env::init_logger(); let (serai, task_test) = setup_mock_test().await; serai.make_block(0, vec![]).await; @@ -268,6 +303,7 @@ mod errors { #[tokio::test] async fn handles_rpc_error_on_events_fetch() { + serai_env::init_logger(); let (serai, task_test) = setup_mock_test().await; serai.make_block(0, vec![]).await; @@ -289,6 +325,7 @@ mod errors { #[tokio::test] async fn errors_if_set_decided_has_empty_validators() { + serai_env::init_logger(); let (serai, task_test) = setup_mock_test().await; serai.make_block(0, vec![]).await; @@ -312,6 +349,7 @@ mod errors { #[tokio::test] async fn handles_rpc_error_on_latest_finalized() { + serai_env::init_logger(); let (serai, task_test) = setup_mock_test().await; serai.make_block(0, vec![]).await; @@ -321,7 +359,7 @@ mod errors { let mut task = task_test.into_task(); TaskTest::task_runs_and_fails_with(&mut task, "RPC error fetching latest finalized").await; - // No blocks processed — error happened before scanning + // No blocks processed, error happened before scanning assert_eq!(ScanCosignFrom::get(&task_test.db), None); serai.clear_error("blockchain/latest_finalized_block_number").await; @@ -334,8 +372,6 @@ mod errors { /// Random event, state, and block generator. pub(super) struct EventFuzzer { - /// Seed bytes. - pub(super) seed: [u8; 32], /// Available validator addresses. pub(super) validators: Vec, /// All networks. @@ -369,7 +405,6 @@ impl EventFuzzer { let networks: Vec = NetworkId::all().collect(); Self { - seed, validators, networks, stakes: HashMap::new(), @@ -385,9 +420,7 @@ impl EventFuzzer { &slice[usize::try_from(i).unwrap()] } - /// Generate a random amount using a weighted distribution: - /// ~25% tiny (1..=10), ~35% small (11..=1_000), ~25% medium (1_001..=100_000), - /// ~15% large (100_001..=10_000_000). + /// Generate a random amount using a weighted distribution fn random_amount(&mut self) -> u64 { match OsRng.next_u64() % 20 { 0 ..= 4 => (OsRng.next_u64() % 10) + 1, @@ -410,7 +443,7 @@ impl EventFuzzer { /// Generate a random deallocation event. Returns `None` if no validator has stake. fn random_deallocation(&mut self) -> Option { - // ~25% chance of generating a Serai deallocation (exercises the `continue` branch) + // ~25% chance of generating a Serai deallocation if OsRng.next_u64() % 4 == 0 { let validator = *self.pick(&self.validators.clone()); let amount = self.random_amount(); @@ -478,7 +511,7 @@ impl EventFuzzer { let keys: Vec = self.pending_keys.keys().copied().collect(); let i = usize::try_from(OsRng.next_u64() % u64::try_from(keys.len()).unwrap()).unwrap(); let set = keys[i]; - // Remove from pending — the task will Validators::take it + // Remove from pending - the task will Validators::take it self.pending_keys.remove(&set); // Advance session for this network so the next SetDecided gets session+1 @@ -579,19 +612,14 @@ impl EventFuzzer { #[tokio::test] async fn fuzzed_event_processing() { - let _ = env_logger::try_init(); + serai_env::init_logger(); let num_blocks = 1000; let mut fuzzer = EventFuzzer::new(); let blocks = fuzzer.generate_blocks(num_blocks); - serai_log::log::info!( - "Fuzz test: {} blocks, {} validators, seed={:?}", - num_blocks, - fuzzer.validators.len(), - hex::encode(fuzzer.seed) - ); + serai_env::log::info!("Fuzz test: {} blocks, {} validators", num_blocks, fuzzer.validators.len(),); let (serai, task_test) = setup_mock_test().await; for (i, events) in blocks.into_iter().enumerate() { diff --git a/coordinator/cosign/src/tests/mod.rs b/coordinator/cosign/src/tests/mod.rs index 37413c0c4..7d01f8756 100644 --- a/coordinator/cosign/src/tests/mod.rs +++ b/coordinator/cosign/src/tests/mod.rs @@ -13,13 +13,18 @@ mod cosigning; #[cfg(test)] mod full_stack; -use std::{ - sync::{ - Arc, - atomic::{AtomicUsize, Ordering}, - }, +use std::sync::{ + Arc, + atomic::{AtomicUsize, Ordering}, }; +pub(crate) fn random_global_session() -> [u8; 32] { + use rand::RngCore; + let mut id = [0u8; 32]; + rand_core::OsRng.fill_bytes(&mut id); + id +} + use serai_shim_rpc::{SeraiShimRpc, ShimState}; use serai_client_serai::Serai; pub(crate) use serai_test_task::{IntoTask, TaskTest}; diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 6d6784934..3c62e743f 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -328,12 +328,7 @@ async fn handle_network( #[tokio::main] async fn main() { // Initialize the logger - env_logger::builder() - .filter_level( - log::LevelFilter::from_str(&serai_env::var("RUST_LOG").unwrap_or_else(|| "info".to_owned())) - .expect("`RUST_LOG` environment variable had an invalid filter"), - ) - .init(); + serai_env::init_logger(); log::info!("starting coordinator service..."); // Read the Serai key from the env diff --git a/message-queue/Cargo.toml b/message-queue/Cargo.toml index bb28e85db..8585f2950 100644 --- a/message-queue/Cargo.toml +++ b/message-queue/Cargo.toml @@ -33,7 +33,6 @@ schnorr-signatures = { path = "../crypto/schnorr", default-features = false, fea # Application log = { version = "0.4", default-features = false, features = ["std"] } -env_logger = { version = "0.10", default-features = false, features = ["humantime"] } # Uses a single threaded runtime since this shouldn't ever be CPU-bound tokio = { version = "1", default-features = false, features = ["rt", "time", "io-util", "net", "macros"] } diff --git a/message-queue/src/main.rs b/message-queue/src/main.rs index 6cc40fb7d..5185e3485 100644 --- a/message-queue/src/main.rs +++ b/message-queue/src/main.rs @@ -156,12 +156,7 @@ pub(crate) fn ack_message(from: Service, to: Service, id: u64, sig: SchnorrSigna #[tokio::main(flavor = "current_thread")] async fn main() { - env_logger::builder() - .filter_level( - log::LevelFilter::from_str(&serai_env::var("RUST_LOG").unwrap_or_else(|| "info".to_owned())) - .expect("`RUST_LOG` environment variable had an invalid filter"), - ) - .init(); + serai_env::init_logger(); log::info!("Starting message-queue service..."); // Open the DB diff --git a/networks/ethereum/relayer/Cargo.toml b/networks/ethereum/relayer/Cargo.toml index 89d8e99e7..51e61de6a 100644 --- a/networks/ethereum/relayer/Cargo.toml +++ b/networks/ethereum/relayer/Cargo.toml @@ -18,7 +18,6 @@ workspace = true [dependencies] log = { version = "0.4", default-features = false, features = ["std"] } -env_logger = { version = "0.10", default-features = false, features = ["humantime"] } tokio = { version = "1", default-features = false, features = ["rt", "time", "io-util", "net", "macros"] } diff --git a/networks/ethereum/relayer/src/main.rs b/networks/ethereum/relayer/src/main.rs index 1290ae089..7f7adbbc9 100644 --- a/networks/ethereum/relayer/src/main.rs +++ b/networks/ethereum/relayer/src/main.rs @@ -9,12 +9,7 @@ use serai_db::{Get as _, DbTxn as _, Db as _}; #[tokio::main(flavor = "current_thread")] async fn main() { - env_logger::builder() - .filter_level( - log::LevelFilter::from_str(&serai_env::var("RUST_LOG").unwrap_or_else(|| "info".to_owned())) - .expect("`RUST_LOG` environment variable had an invalid filter"), - ) - .init(); + serai_env::init_logger(); log::info!("Starting Ethereum relayer server..."); // Open the DB diff --git a/processor/bin/Cargo.toml b/processor/bin/Cargo.toml index b91a9973b..30b5c6db8 100644 --- a/processor/bin/Cargo.toml +++ b/processor/bin/Cargo.toml @@ -29,7 +29,6 @@ serai-primitives = { path = "../../substrate/primitives", default-features = fal serai-cosign = { package = "serai-cosign-types", path = "../../coordinator/cosign/types" } log = { version = "0.4", default-features = false, features = ["std"] } -env_logger = { version = "0.10", default-features = false, features = ["humantime"] } tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } serai-env = { path = "../../common/env" } diff --git a/processor/bin/src/lib.rs b/processor/bin/src/lib.rs index 1a4289173..d86dd35c4 100644 --- a/processor/bin/src/lib.rs +++ b/processor/bin/src/lib.rs @@ -47,12 +47,7 @@ pub type Db = serai_db::RocksDB; /// /// Yields the database. pub fn init() -> Db { - env_logger::builder() - .filter_level( - log::LevelFilter::from_str(&serai_env::var("RUST_LOG").unwrap_or_else(|| "info".to_owned())) - .expect("`RUST_LOG` environment variable had an invalid filter"), - ) - .init(); + serai_env::init_logger(); log::info!("Starting processor service..."); #[cfg(all(feature = "parity-db", not(feature = "rocksdb")))] diff --git a/tests/task/Cargo.toml b/tests/task/Cargo.toml index 0ce905cf5..fbf99b71f 100644 --- a/tests/task/Cargo.toml +++ b/tests/task/Cargo.toml @@ -18,4 +18,4 @@ workspace = true [dependencies] serai-task = { path = "../../common/task" } -serai-log = { path = "../../common/log", version = "0.1.0" } +serai-env = { path = "../../common/env", version = "0.1.0" } diff --git a/tests/task/src/lib.rs b/tests/task/src/lib.rs index 38b19f896..879bcf6c1 100644 --- a/tests/task/src/lib.rs +++ b/tests/task/src/lib.rs @@ -13,13 +13,13 @@ impl TaskTest { task: &mut T, made_progress: bool, ) { - serai_log::log::debug!("running task once: {}", core::any::type_name::()); + serai_env::log::debug!("running task once: {}", core::any::type_name::()); assert_eq!(task.run_iteration().await.unwrap(), made_progress); } /// Assert that a task iteration fails with an error containing the given string. pub async fn task_runs_and_fails_with(task: &mut T, error: &str) { - serai_log::log::debug!("running task (expecting failure): {}", core::any::type_name::()); + serai_env::log::debug!("running task (expecting failure): {}", core::any::type_name::()); let err = task.run_iteration().await.unwrap_err(); let err_str = format!("{err:?}"); assert!(err_str.contains(error), "{err_str}"); From 3350b9be05cfb4d5ff1a1137a40d5dde99ab5b59 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Mon, 16 Mar 2026 16:58:08 -0300 Subject: [PATCH 35/71] feat(tests/shim-rpc): add random failure rate --- tests/shim-rpc/Cargo.toml | 1 + tests/shim-rpc/src/lib.rs | 12 ++++++++++++ tests/shim-rpc/src/rpc.rs | 26 ++++++++++++++++++++++++++ tests/shim-rpc/src/state.rs | 17 +++++++++++++++++ 4 files changed, 56 insertions(+) diff --git a/tests/shim-rpc/Cargo.toml b/tests/shim-rpc/Cargo.toml index 871605015..613cb6971 100644 --- a/tests/shim-rpc/Cargo.toml +++ b/tests/shim-rpc/Cargo.toml @@ -22,6 +22,7 @@ tokio = { version = "1", default-features = false } serai-abi = { path = "../../substrate/abi", default-features = false, features = ["std"] } +rand_core = { version = "0.6", default-features = false } borsh = { version = "1", default-features = false, features = ["std", "derive"] } hex = { version = "0.4", default-features = false, features = ["std"] } blake2 = { version = "0.11.0-rc.0", default-features = false } diff --git a/tests/shim-rpc/src/lib.rs b/tests/shim-rpc/src/lib.rs index 82f803f05..c4029a58b 100644 --- a/tests/shim-rpc/src/lib.rs +++ b/tests/shim-rpc/src/lib.rs @@ -139,6 +139,18 @@ impl SeraiShimRpc { &self.state } + /// Set the probability (0–100) that any RPC request randomly fails. + /// + /// 0 disables fuzzing (the default), 100 fails every request. + pub async fn set_failure_rate(&self, percent: u8) { + self.state.write().await.errors.failure_rate = percent; + } + + /// Disable random request failures. + pub async fn clear_failure_rate(&self) { + self.state.write().await.errors.failure_rate = 0; + } + /// Stop the shim RPC node server. pub fn stop(&self) { self.handle.stop().expect("failed to stop shim RPC node"); diff --git a/tests/shim-rpc/src/rpc.rs b/tests/shim-rpc/src/rpc.rs index 54913f80a..8ad83f2ed 100644 --- a/tests/shim-rpc/src/rpc.rs +++ b/tests/shim-rpc/src/rpc.rs @@ -122,6 +122,11 @@ pub fn build_rpc_module(state: SharedState) -> Result, Er "blockchain/latest_finalized_block_number", async |_params, state, _ext| { let state = state.read().await; + if let Some(err) = + state.errors.check_random_failure("blockchain/latest_finalized_block_number") + { + return Err(Error::Internal(err)); + } if let Some(err) = state.errors.check_method("blockchain/latest_finalized_block_number") { return Err(Error::Internal(err.to_owned())); } @@ -133,6 +138,9 @@ pub fn build_rpc_module(state: SharedState) -> Result, Er module .register_async_method("blockchain/is_finalized", async |params, state, _ext| { let state = state.read().await; + if let Some(err) = state.errors.check_random_failure("blockchain/is_finalized") { + return Err(Error::Internal(err)); + } if let Some(err) = state.errors.check_method("blockchain/is_finalized") { return Err(Error::Internal(err.to_owned())); } @@ -149,6 +157,9 @@ pub fn build_rpc_module(state: SharedState) -> Result, Er module .register_async_method("blockchain/block", async |params, state, _ext| { let state = state.read().await; + if let Some(err) = state.errors.check_random_failure("blockchain/block") { + return Err(Error::Internal(err)); + } if let Some(err) = state.errors.check_method("blockchain/block") { return Err(Error::Internal(err.to_owned())); } @@ -177,6 +188,9 @@ pub fn build_rpc_module(state: SharedState) -> Result, Er module .register_async_method("blockchain/events", async |params, state, _ext| { let state = state.read().await; + if let Some(err) = state.errors.check_random_failure("blockchain/events") { + return Err(Error::Internal(err)); + } if let Some(err) = state.errors.check_method("blockchain/events") { return Err(Error::Internal(err.to_owned())); } @@ -204,6 +218,9 @@ pub fn build_rpc_module(state: SharedState) -> Result, Er module .register_async_method("validator-sets/current_session", async |params, state, _ext| { let state = state.read().await; + if let Some(err) = state.errors.check_random_failure("validator-sets/current_session") { + return Err(Error::Internal(err)); + } if let Some(err) = state.errors.check_method("validator-sets/current_session") { return Err(Error::Internal(err.to_owned())); } @@ -219,6 +236,9 @@ pub fn build_rpc_module(state: SharedState) -> Result, Er module .register_async_method("validator-sets/current_stake", async |params, state, _ext| { let state = state.read().await; + if let Some(err) = state.errors.check_random_failure("validator-sets/current_stake") { + return Err(Error::Internal(err)); + } if let Some(err) = state.errors.check_method("validator-sets/current_stake") { return Err(Error::Internal(err.to_owned())); } @@ -234,6 +254,9 @@ pub fn build_rpc_module(state: SharedState) -> Result, Er module .register_async_method("validator-sets/keys", async |params, state, _ext| { let state = state.read().await; + if let Some(err) = state.errors.check_random_failure("validator-sets/keys") { + return Err(Error::Internal(err)); + } if let Some(err) = state.errors.check_method("validator-sets/keys") { return Err(Error::Internal(err.to_owned())); } @@ -249,6 +272,9 @@ pub fn build_rpc_module(state: SharedState) -> Result, Er module .register_async_method("validator-sets/current_validators", async |params, state, _ext| { let state = state.read().await; + if let Some(err) = state.errors.check_random_failure("validator-sets/current_validators") { + return Err(Error::Internal(err)); + } if let Some(err) = state.errors.check_method("validator-sets/current_validators") { return Err(Error::Internal(err.to_owned())); } diff --git a/tests/shim-rpc/src/state.rs b/tests/shim-rpc/src/state.rs index 8c760794a..51d19f622 100644 --- a/tests/shim-rpc/src/state.rs +++ b/tests/shim-rpc/src/state.rs @@ -39,9 +39,26 @@ pub struct ErrorInjection { pub block_number_errors: HashMap<(String, u64), String>, /// Fails for a specific block hash. pub block_hash_errors: HashMap<(String, BlockHash), String>, + /// Probability (0–100) that any request randomly fails. 0 = never, 100 = always. + pub failure_rate: u8, } impl ErrorInjection { + /// Check if this request should randomly fail based on the configured `failure_rate`. + pub fn check_random_failure(&self, method: &str) -> Option { + if self.failure_rate == 0 { + return None; + } + use rand_core::RngCore; + let val = rand_core::OsRng.next_u32() % 100; + #[expect(clippy::as_conversions)] + if val < self.failure_rate as u32 { + Some(format!("fuzz: random failure on `{method}` (rate={}%)", self.failure_rate)) + } else { + None + } + } + /// Check if an error should be injected for this method call. pub fn check_method(&self, method: &str) -> Option<&String> { self.method_errors.get(method) From 75c5ade4c6d4ac7cd8f729d9f9df67105fbd2fc7 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Tue, 17 Mar 2026 17:19:02 -0300 Subject: [PATCH 36/71] feat(coordinator/cosign): more touch ups & fix evaluate -> delay flow for no events --- common/env/src/lib.rs | 6 +- coordinator/cosign/src/delay.rs | 55 +- coordinator/cosign/src/evaluator.rs | 34 +- coordinator/cosign/src/lib.rs | 47 +- coordinator/cosign/src/tests/cosigning.rs | 1067 ++++++++++----------- coordinator/cosign/src/tests/delay.rs | 50 +- coordinator/cosign/src/tests/evaluator.rs | 55 +- coordinator/cosign/src/tests/mod.rs | 36 +- coordinator/src/main.rs | 2 +- coordinator/src/tributary.rs | 2 +- coordinator/substrate/src/canonical.rs | 2 +- coordinator/substrate/src/ephemeral.rs | 2 +- tests/shim-rpc/src/lib.rs | 5 +- tests/shim-rpc/src/state.rs | 14 +- 14 files changed, 647 insertions(+), 730 deletions(-) diff --git a/common/env/src/lib.rs b/common/env/src/lib.rs index bdb5bba6a..3f17a1180 100644 --- a/common/env/src/lib.rs +++ b/common/env/src/lib.rs @@ -14,11 +14,7 @@ pub fn var(variable: &str) -> Option { } pub fn init_logger() { - env_logger::builder() - .filter_level( - log::LevelFilter::from_str(&var("RUST_LOG").unwrap_or_else(|| "info".to_owned())) - .expect("`RUST_LOG` environment variable had an invalid filter"), - ) + env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")) .try_init() .ok(); } diff --git a/coordinator/cosign/src/delay.rs b/coordinator/cosign/src/delay.rs index 33d3521d5..4073c74ec 100644 --- a/coordinator/cosign/src/delay.rs +++ b/coordinator/cosign/src/delay.rs @@ -4,7 +4,7 @@ use std::time::{Duration, SystemTime}; use serai_db::*; use serai_task::{DoesNotError, ContinuallyRan}; -use crate::evaluator::{CosignedBlocks, LatestEvaluatedBlock}; +use crate::evaluator::CosignedBlocks; #[cfg(not(any(test)))] /// How often callers should broadcast the cosigns flagged for rebroadcasting. @@ -27,8 +27,9 @@ pub(crate) fn now_timestamp() -> Duration { create_db!( SubstrateCosignDelay { - // The latest block number acknowledged by the delay task. - LatestAcknowledgedBlock: () -> u64, + // The latest block number finalized by the delay task. + // Finalized after a delay if it has events, simply marked as finalized if the block has no events. + LatestCosignedBlockNumber: () -> u64, } ); @@ -43,36 +44,43 @@ impl ContinuallyRan for CosignDelayTask { fn run_iteration(&mut self) -> impl Send + Future> { async move { let mut made_progress = false; - loop { - let mut txn = self.db.txn(); + let latest_finalized = LatestCosignedBlockNumber::get(&self.db).unwrap_or(0); - // Peek the next block to mark as cosigned, without consuming yet - let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else { + let mut txn = self.db.txn(); + let Some((block_number, time_evaluated, has_events)) = CosignedBlocks::try_recv(&mut txn) + else { break; }; - let latest_acknowledged_block = LatestAcknowledgedBlock::get(&mut txn).unwrap_or(0); - serai_env::debug!( - "beginning delay: block_number={block_number}, time_evaluated={time_evaluated}, latest_acknowledged_block={latest_acknowledged_block}", + "beginning delay: block_number={block_number}, time_evaluated={time_evaluated}, \ + has_events={has_events}, latest_finalized={latest_finalized}", ); - if block_number <= latest_acknowledged_block { - // If we've already acknowledged a later block, consume and skip (don't sleep). + if block_number <= latest_finalized { + // Already finalized a later block, consume and skip without sleeping. txn.commit(); continue; } + // No events means no cosigns to wait for, finalize immediately + if !has_events { + LatestCosignedBlockNumber::set(&mut txn, &block_number); + txn.commit(); + serai_env::debug!("LatestFinalizedBlock={block_number} (no events, skipped delay)"); + made_progress = true; + continue; + } + // Calculate when we should mark it as valid let now_timestamp = now_timestamp().as_secs(); let time_valid_timestamp = time_evaluated + ACKNOWLEDGEMENT_DELAY.as_secs(); - // drop txn during sleep + // Drop txn during sleep drop(txn); if time_valid_timestamp > now_timestamp { - // Sleep until then let time_left = time_valid_timestamp - now_timestamp; serai_env::debug!("beginning sleep: {time_left}s"); tokio::time::sleep(Duration::from_secs(time_left)).await; @@ -81,28 +89,13 @@ impl ContinuallyRan for CosignDelayTask { let mut txn = self.db.txn(); // Consume block to continue CosignedBlocks::try_recv(&mut txn); - LatestAcknowledgedBlock::set(&mut txn, &block_number); + LatestCosignedBlockNumber::set(&mut txn, &block_number); txn.commit(); - serai_env::debug!("LatestAcknowledgedBlock={block_number}"); - + serai_env::debug!("LatestFinalizedBlock={block_number}"); made_progress = true; } - // Catch up to HasEvents::No blocks that don't go through CosignedBlocks - // they only advance LatestEvaluatedBlock. These blocks need no sleep delay. - // since no cosign means no need for equivocation prevention - if let Some(evaluated) = LatestEvaluatedBlock::get(&self.db) { - let acknowledged = LatestAcknowledgedBlock::get(&self.db).unwrap_or(0); - if evaluated > acknowledged { - let mut txn = self.db.txn(); - LatestAcknowledgedBlock::set(&mut txn, &evaluated); - txn.commit(); - serai_env::debug!("LatestAcknowledgedBlock={evaluated} (caught up to evaluator)"); - made_progress = true; - } - } - Ok(made_progress) } } diff --git a/coordinator/cosign/src/evaluator.rs b/coordinator/cosign/src/evaluator.rs index 6e3d67e80..8f55be028 100644 --- a/coordinator/cosign/src/evaluator.rs +++ b/coordinator/cosign/src/evaluator.rs @@ -21,21 +21,19 @@ create_db!( SubstrateCosignEvaluator { // The global session currently being evaluated. CurrentlyEvaluatedGlobalSession: () -> ([u8; 32], GlobalSession), - // The latest block number the evaluator has processed. - LatestEvaluatedBlock: () -> u64, } ); db_channel!( SubstrateCosignEvaluatorChannels { - // (cosigned block, time cosign was evaluated) - CosignedBlocks: () -> (u64, u64), + // (cosigned block, time cosign was evaluated, has_events) + CosignedBlocks: () -> (u64, u64, bool), } ); /// Commit a block as evaluated without sending it for cosign delay. -fn commit_evaluated_block(mut txn: impl DbTxn, block_number: u64) { - LatestEvaluatedBlock::set(&mut txn, &block_number); +fn commit_evaluated_block(mut txn: impl DbTxn, block_number: u64, has_events: bool) { + CosignedBlocks::send(&mut txn, &(block_number, now_timestamp().as_secs(), has_events)); txn.commit(); } @@ -195,19 +193,29 @@ impl ContinuallyRan for CosignEvaluatorT break; }; + serai_env::log::debug!( + "beginning evaluator: block_number={block_number}, has_events={:#?}", + has_events + ); + // If no session is being evaluated yet, check if this block can be processed if currently_evaluated_global_session(&txn).is_none() { match GlobalSessionsChannel::peek(&txn) { // No global session declared yet: this block predates all sessions, skip it // this means only HasEvents:No blocks have been consumed so far None => { - commit_evaluated_block(txn, block_number); + serai_env::log::debug!("No global session declared yet"); + commit_evaluated_block(txn, block_number, false); made_progress = true; continue; } // Session queued but starts after this block, skip it Some(next) if next.1.start_block_number > block_number => { - commit_evaluated_block(txn, block_number); + serai_env::log::debug!( + "session {block_number} is queued for {}", + next.1.start_block_number + ); + commit_evaluated_block(txn, block_number, false); made_progress = true; continue; } @@ -216,11 +224,6 @@ impl ContinuallyRan for CosignEvaluatorT } } - serai_env::log::debug!( - "beginning evaluator: block_number={block_number}, has_events={:#?}", - has_events - ); - // Fetch the global session information let (global_session, global_session_info) = currently_evaluated_global_session_strict(&mut txn, block_number); @@ -300,15 +303,14 @@ impl ContinuallyRan for CosignEvaluatorT // If this block has no events necessitating cosigning, we can immediately consider the // block cosigned (making this block a NOP) HasEvents::No => { - commit_evaluated_block(txn, block_number); + commit_evaluated_block(txn, block_number, false); made_progress = true; continue; } } // Since we checked we had the necessary cosigns, send it for delay before acknowledgement - CosignedBlocks::send(&mut txn, &(block_number, now_timestamp().as_secs())); - commit_evaluated_block(txn, block_number); + commit_evaluated_block(txn, block_number, true); // Roughly ~1 hour, no need for repetitive logging #[cfg(not(test))] diff --git a/coordinator/cosign/src/lib.rs b/coordinator/cosign/src/lib.rs index 5ed0f9ad9..79ea2994f 100644 --- a/coordinator/cosign/src/lib.rs +++ b/coordinator/cosign/src/lib.rs @@ -32,7 +32,7 @@ mod evaluator; /// The task to delay acknowledgement of the cosigns. mod delay; pub use delay::BROADCAST_FREQUENCY; -use delay::LatestAcknowledgedBlock; +use delay::LatestCosignedBlockNumber; #[cfg(test)] /// Test helpers and fixtures. @@ -204,7 +204,7 @@ impl Cosigning { db: D, serai: Arc, request: R, - tasks_to_run_upon_cosigning: Vec, + tasks_to_run_upon_finalizing_blocks: Vec, ) -> Self { let (intend_task, intend_task_handle) = Task::new(); let (evaluator_task, evaluator_task_handle) = Task::new(); @@ -223,18 +223,18 @@ impl Cosigning { ); tokio::spawn( (delay::CosignDelayTask { db: db.clone() }) - .continually_run(delay_task, tasks_to_run_upon_cosigning), + .continually_run(delay_task, tasks_to_run_upon_finalizing_blocks), ); Self { db, _task_handles: vec![intend_task_handle, evaluator_task_handle, delay_task_handle] } } /// The latest acknowledged block number. - pub fn latest_acknowledged_block(getter: &impl Get) -> Result { + pub fn latest_finalized_block(getter: &impl Get) -> Result { if FaultedSession::get(getter).is_some() { Err(Faulted)?; } - Ok(LatestAcknowledgedBlock::get(getter).unwrap_or(0)) + Ok(LatestCosignedBlockNumber::get(getter).unwrap_or(0)) } /// Fetch a cosigned Substrate block's hash by its block number. @@ -242,7 +242,7 @@ impl Cosigning { getter: &impl Get, block_number: u64, ) -> Result, Faulted> { - if block_number == 0 || block_number > Self::latest_acknowledged_block(getter)? { + if block_number == 0 || block_number > Self::latest_finalized_block(getter)? { return Ok(None); } @@ -268,32 +268,24 @@ impl Cosigning { /// The cosigns to rebroadcast every `BROADCAST_FREQUENCY` seconds. /// - /// This will be the most recent cosigns, in case the initial broadcast failed, or the faulty - /// cosigns, in case of a fault, to induce identification of the fault by others. + /// This will be the most recent cosigns in case the initial broadcast failed. + /// Or, the faulty cosigns in case of a fault. To induce identification of the fault by others. pub fn cosigns_to_rebroadcast(&self) -> Vec { if let Some(faulted) = FaultedSession::get(&self.db) { let mut cosigns = Faults::get(&self.db, faulted).expect("faulted with no faults"); // Also include all of our recognized-as-honest cosigns in an attempt to induce fault // identification in those who see the faulty cosigns as honest - for network in ExternalNetworkId::all() { - if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, faulted, network) { - if cosign.cosign.global_session == faulted { - cosigns.push(cosign); - } - } - } + cosigns.extend( + Self::notable_cosigns(&self.db, faulted) + .into_iter() + .filter(|c| c.cosign.global_session == faulted), + ); cosigns } else { let Some(global_session) = evaluator::currently_evaluated_global_session(&self.db) else { return vec![]; }; - let mut cosigns = vec![]; - for network in ExternalNetworkId::all() { - if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) { - cosigns.push(cosign); - } - } - cosigns + Self::notable_cosigns(&self.db, global_session) } } @@ -305,10 +297,10 @@ impl Cosigning { let network = cosign.cosigner; // Check our indexed blockchain includes a block with this block number - let Some(our_block_hash) = SubstrateBlockHash::get(&self.db, cosign.block_number) else { + let Some(indexed_block_hash) = SubstrateBlockHash::get(&self.db, cosign.block_number) else { Err(IntakeCosignError::NotYetIndexedBlock)? }; - let faulty = cosign.block_hash != our_block_hash; + let faulty = cosign.block_hash != indexed_block_hash; // Check this isn't a dated cosign within its global session (as it would be if rebroadcasted) if !faulty { @@ -357,12 +349,11 @@ impl Cosigning { if !faulty { // If this is for a future global session, we don't acknowledge this cosign at this time - let latest_evaluated_block = evaluator::LatestEvaluatedBlock::get(&txn).unwrap_or(0); + let latest_cosigned_block = delay::LatestCosignedBlockNumber::get(&txn).unwrap_or(0); // This global session starts the block *after* its declaration, so we want to check if the // block declaring it was evaluated - if (global_session.start_block_number - 1) > latest_evaluated_block { - drop(txn); - return Err(IntakeCosignError::FutureGlobalSession); + if (global_session.start_block_number - 1) > latest_cosigned_block { + Err(IntakeCosignError::FutureGlobalSession)?; } // This is safe as it's in-range and newer, as prior checked since it isn't faulty diff --git a/coordinator/cosign/src/tests/cosigning.rs b/coordinator/cosign/src/tests/cosigning.rs index ee5444b13..b6dfcdad4 100644 --- a/coordinator/cosign/src/tests/cosigning.rs +++ b/coordinator/cosign/src/tests/cosigning.rs @@ -1,15 +1,22 @@ -use std::{collections::HashMap, time::Duration}; +use std::{ + collections::HashMap, + time::Duration, + sync::{ + Arc, + atomic::{AtomicBool, Ordering}, + }, +}; use borsh::{BorshDeserialize, BorshSerialize}; -use blake2::{Blake2s256, Digest}; - use rand_core::OsRng; +use rand::{Rng, RngCore}; + use serai_db::{Db as _, DbTxn, MemDb}; -use serai_primitives::test_helpers::random_keypair; +use serai_primitives::test_helpers::{random_block_hash, random_keypair}; use serai_cosign_types::tests::sign_cosign; -use serai_task::Task; +use serai_task::{Task, ContinuallyRan}; use serai_client_serai::abi::primitives::{ BlockHash, @@ -21,8 +28,14 @@ use serai_client_serai::abi::primitives::{ use crate::{ Cosign, CosignIntent, Cosigning, Faulted, FaultedSession, Faults, GlobalSession, GlobalSessions, GlobalSessionsLastBlock, IntakeCosignError, NetworksLatestCosignedBlock, SignedCosign, - SubstrateBlockHash, delay::LatestAcknowledgedBlock, evaluator::CurrentlyEvaluatedGlobalSession, - intend::IntendedCosigns, tests::TestRequest, tests::setup_shim_serai, + SubstrateBlockHash, + delay::LatestCosignedBlockNumber, + evaluator::CurrentlyEvaluatedGlobalSession, + intend::IntendedCosigns, + tests::{ + TestRequest, default_test_validator_set, random_global_session, random_validator_set, + setup_shim_serai, + }, }; #[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] @@ -35,9 +48,7 @@ struct TestGlobalSession { } impl TestGlobalSession { fn id(&self) -> [u8; 32] { - let mut sets = self.sets.clone(); - sets.sort_by_key(|a| borsh::to_vec(a).unwrap()); - Blake2s256::digest(borsh::to_vec(&sets).unwrap()).into() + GlobalSession::id(self.sets.clone()) } fn to_global(&self) -> GlobalSession { @@ -51,68 +62,70 @@ impl TestGlobalSession { } } -fn random_session() -> (TestGlobalSession, schnorrkel::Keypair) { - let network = ExternalNetworkId::Bitcoin; - let set = ExternalValidatorSet { network, session: Session(0) }; +fn random_test_session() -> (TestGlobalSession, schnorrkel::Keypair) { + let set = default_test_validator_set(); + let network = set.network; let (keypair, public) = random_keypair(&mut OsRng); let mut keys = HashMap::new(); let mut stakes = HashMap::new(); + let total_stake = OsRng.gen_range(1u64 .. u64::MAX / 17); keys.insert(network, public); - stakes.insert(network, 100); - - let session = - TestGlobalSession { start_block_number: 1, sets: vec![set], keys, stakes, total_stake: 100 }; + stakes.insert(network, total_stake); + + let session = TestGlobalSession { + start_block_number: u64::from(set.session.0) + 1, + sets: vec![set], + keys, + stakes, + total_stake, + }; (session, keypair) } -fn seed_minimal_state(db: &mut MemDb, session: &TestGlobalSession) { +fn seed_minimal_state(db: &mut MemDb, random_test_session: &TestGlobalSession) { let mut txn = db.txn(); - let id = session.id(); + let id = random_test_session.id(); // Required by `Cosigning::intake_cosign`. - GlobalSessions::set(&mut txn, id, &session.to_global()); + GlobalSessions::set(&mut txn, id, &random_test_session.to_global()); // Required by `Cosigning::cosigns_to_rebroadcast` in the non-faulted case. - CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, session.to_global())); + CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, random_test_session.to_global())); // Required for `intake_cosign` to not classify a session as "future". - LatestAcknowledgedBlock::set(&mut txn, &0u64); + LatestCosignedBlockNumber::set(&mut txn, &0u64); txn.commit(); } #[test] -fn global_session_id_generation() { +fn fuzz_global_session_id() { serai_env::init_logger(); - let network1 = ExternalNetworkId::Bitcoin; - let set1 = ExternalValidatorSet { network: network1, session: Session(0) }; - let set2 = ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }; - - // Create two vectors with the same sets but in different order - let cosigners1 = vec![set1, set2]; - let cosigners2 = vec![set2, set1]; - - // Both should produce the same ID (order-independent) - let id1 = GlobalSession::id(cosigners1.clone()); - let id2 = GlobalSession::id(cosigners2.clone()); - assert_eq!(id1, id2, "IDs should be the same regardless of input order"); - - // Same input should always produce the same ID (deterministic) - let id3 = GlobalSession::id(cosigners1.clone()); - let id4 = GlobalSession::id(cosigners2.clone()); - assert_eq!(id1, id3, "same input should produce the same ID"); - assert_eq!(id2, id4, "same input should produce the same ID"); - - // Different sets should produce different IDs - let set3 = ExternalValidatorSet { network: network1, session: Session(1) }; // same network as set1, different session - assert_ne!( - GlobalSession::id(vec![set1]), - GlobalSession::id(vec![set3]), - "different validator sets should produce different IDs" - ); + for _ in 0 .. 100 { + let num_sets = OsRng.gen_range(1u8 ..= 3); + let sets: Vec<_> = (0 .. num_sets).map(|_| random_validator_set(&mut OsRng)).collect(); + + let id1 = GlobalSession::id(sets.clone()); + let id2 = GlobalSession::id(sets.clone()); + + // Determinism: same input always produces same ID + assert_eq!(id1, id2); + + // Order-independence: any permutation produces the same ID + let mut reversed = sets.clone(); + reversed.reverse(); + assert_eq!(id1, GlobalSession::id(reversed)); + + // Collision resistance: changing any set should change the ID + let mut altered = sets.clone(); + altered[0] = random_validator_set(&mut OsRng); + if altered != sets { + assert_ne!(id1, GlobalSession::id(altered)); + } + } } mod intake_cosign_error { @@ -120,7 +133,6 @@ mod intake_cosign_error { #[test] fn temporal_returns_true_for_temporal_errors() { - serai_env::init_logger(); assert!(IntakeCosignError::NotYetIndexedBlock.temporal()); assert!(IntakeCosignError::StaleCosign.temporal()); assert!(IntakeCosignError::UnrecognizedGlobalSession.temporal()); @@ -129,7 +141,6 @@ mod intake_cosign_error { #[test] fn temporal_returns_false_for_non_temporal_errors() { - serai_env::init_logger(); assert!(!IntakeCosignError::BeforeGlobalSessionStart.temporal()); assert!(!IntakeCosignError::AfterGlobalSessionEnd.temporal()); assert!(!IntakeCosignError::NonParticipatingNetwork.temporal()); @@ -137,237 +148,171 @@ mod intake_cosign_error { } } -mod spawn { - use super::*; - - #[tokio::test] - async fn spawn_creates_cosigning_instance() { - serai_env::init_logger(); - let db = MemDb::new(); - let (_shim_serai, serai) = setup_shim_serai().await; - let (request, _calls) = TestRequest::new(false); - let cosigning = Cosigning::spawn(db, serai, request, vec![]); - - assert!(cosigning.cosigns_to_rebroadcast().is_empty()); - } - - #[tokio::test] - async fn spawn_with_tasks_to_run_upon_cosigning() { - serai_env::init_logger(); - let db = MemDb::new(); - let (_shim_serai, serai) = setup_shim_serai().await; - let (request, _calls) = TestRequest::new(false); - - let (_task, task_handle) = Task::new(); - let tasks_to_run = vec![task_handle]; - - let cosigning = Cosigning::spawn(db.clone(), serai, request, tasks_to_run); - - assert!(cosigning.cosigns_to_rebroadcast().is_empty()); +// More cases are tested in ./full_stack.rs with fuzzing for different event type blocks +#[tokio::test] +async fn spawn_end_to_end() { + serai_env::init_logger(); + let db = MemDb::new(); + let (shim_serai, serai) = setup_shim_serai().await; + let (request, _calls) = TestRequest::new(false); + + /// Create a trivial task that logs and sets a flag when triggered whose handle is passed to the cosigning pipeline. + struct LogOnTrigger(Arc); + impl ContinuallyRan for LogOnTrigger { + type Error = std::convert::Infallible; + fn run_iteration( + &mut self, + ) -> impl Send + std::future::Future> { + async { + serai_env::info!("dependent task triggered by cosigning pipeline"); + self.0.store(true, Ordering::SeqCst); + Ok(false) + } + } } + let (dependent_task, dependent_handle) = Task::new(); + let triggered = Arc::new(AtomicBool::new(false)); + tokio::spawn(LogOnTrigger(triggered.clone()).continually_run(dependent_task, vec![])); - #[tokio::test] - async fn spawn_initializes_cosigning_instance_correctly() { - serai_env::init_logger(); - let db = MemDb::new(); - let (_shim_serai, serai) = setup_shim_serai().await; - let (request, _calls) = TestRequest::new(false); - - let cosigning = Cosigning::spawn(db.clone(), serai, request, vec![]); + // Spawn cosigning tasks with the dependent task handle + let cosigning = Cosigning::spawn(db.clone(), serai, request, vec![dependent_handle]); + // Just started: results are empty + { assert!(cosigning.cosigns_to_rebroadcast().is_empty()); - - let latest = Cosigning::::latest_acknowledged_block(&db); + let latest = Cosigning::::latest_finalized_block(&db); assert!(latest.is_ok()); assert_eq!(latest.unwrap(), 0); } - #[tokio::test] - async fn spawn_tasks_chain_correctly() { - serai_env::init_logger(); - let db = MemDb::new(); - let (_shim_serai, serai) = setup_shim_serai().await; - let (request, _calls) = TestRequest::new(false); - - let _cosigning = Cosigning::spawn(db.clone(), serai, request, vec![]); - - tokio::time::sleep(Duration::from_millis(10)).await; - - let latest = Cosigning::::latest_acknowledged_block(&db); - assert!(latest.is_ok()); - } - - #[tokio::test] - async fn spawn_end_to_end() { - serai_env::init_logger(); - let db = MemDb::new(); - let (shim_serai, serai) = setup_shim_serai().await; - let (request, _calls) = TestRequest::new(false); - - // Create block 0 so the intend task has something to scan - shim_serai.make_block(0, vec![]).await; - - // Spawn cosigning tasks in the background - let _cosigning = Cosigning::spawn(db.clone(), serai, request, vec![]); - - // Keep adding new blocks while the background tasks process them - let total_blocks = 10u64; - for _ in 1 ..= total_blocks { - shim_serai.add_block_with_events(vec![]).await; - tokio::time::sleep(Duration::from_millis(50)).await; - } - - // Wait for the full pipeline to process in a polling loop: - // intend (indexes blocks) -> evaluator (passes HasEvents::No through) -> delay (waits - // ACKNOWLEDGEMENT_DELAY) - loop { - let latest = Cosigning::::latest_acknowledged_block(&db); - if latest.map(|n| n >= total_blocks).unwrap_or(false) { - break; + // Run block production and pipeline polling concurrently + let total_blocks = 10; + tokio::join!( + // Produce blocks + async { + for _ in 0 ..= total_blocks { + shim_serai.add_block_with_events(vec![]).await; + tokio::time::sleep(Duration::from_millis(50)).await; + } + }, + // Poll until the pipeline has processed all blocks + async { + loop { + let latest = Cosigning::::latest_finalized_block(&db); + if latest.map(|n| n >= total_blocks).unwrap_or(false) { + break; + } + tokio::time::sleep(Duration::from_millis(100)).await; } - tokio::time::sleep(Duration::from_millis(100)).await; } + ); - let latest = Cosigning::::latest_acknowledged_block(&db).unwrap(); - assert!(latest >= total_blocks); - } + let latest = Cosigning::::latest_finalized_block(&db).unwrap(); + assert!(latest == total_blocks); + + // Verify the dependent task was triggered by the pipeline + assert!(triggered.load(Ordering::SeqCst)); } -mod latest_acknowledged_block { - use super::*; +#[test] +fn latest_finalized_block() { + serai_env::init_logger(); - #[test] - fn latest_acknowledged_block_defaults_to_zero() { - serai_env::init_logger(); + // Defaults to zero + { let db = MemDb::new(); - assert_eq!(Cosigning::::latest_acknowledged_block(&db).unwrap(), 0); + assert_eq!(Cosigning::::latest_finalized_block(&db).unwrap(), 0); } - #[test] - fn latest_acknowledged_block_errors_when_faulted() { - serai_env::init_logger(); + // Errors when faulted session exists + { let mut db = MemDb::new(); - { - let mut txn = db.txn(); - FaultedSession::set(&mut txn, &[1u8; 32]); - txn.commit(); - } - assert!(matches!(Cosigning::::latest_acknowledged_block(&db), Err(Faulted))); + let mut txn = db.txn(); + FaultedSession::set(&mut txn, &random_global_session(&mut OsRng)); + txn.commit(); + assert!(matches!(Cosigning::::latest_finalized_block(&db), Err(Faulted))); } - #[test] - fn latest_acknowledged_block_returns_stored_value() { - serai_env::init_logger(); + // Returns stored value + { let mut db = MemDb::new(); - { - let mut txn = db.txn(); - LatestAcknowledgedBlock::set(&mut txn, &42u64); - txn.commit(); - } - assert_eq!(Cosigning::::latest_acknowledged_block(&db).unwrap(), 42); + let mut txn = db.txn(); + let latest_finalized_block = OsRng.next_u64(); + LatestCosignedBlockNumber::set(&mut txn, &latest_finalized_block); + txn.commit(); + assert_eq!(Cosigning::::latest_finalized_block(&db).unwrap(), latest_finalized_block); } } -mod cosigned_block { - use super::*; - - #[test] - fn cosigned_block_returns_none_beyond_latest() { - serai_env::init_logger(); - let mut db = MemDb::new(); - { - let mut txn = db.txn(); - LatestAcknowledgedBlock::set(&mut txn, &5u64); - txn.commit(); - } - assert_eq!(Cosigning::::cosigned_block(&db, 6).unwrap(), None); - } +#[test] +fn cosigned_block() { + serai_env::init_logger(); - #[test] - fn cosigned_block_returns_hash_when_in_range() { - serai_env::init_logger(); + // Returns None beyond latest finalized block + { let mut db = MemDb::new(); - let block_hash = BlockHash([9u8; 32]); - { - let mut txn = db.txn(); - LatestAcknowledgedBlock::set(&mut txn, &5u64); - SubstrateBlockHash::set(&mut txn, 3, &block_hash); - txn.commit(); - } - assert_eq!(Cosigning::::cosigned_block(&db, 3).unwrap(), Some(block_hash)); + assert_eq!(Cosigning::::cosigned_block(&db, 0).unwrap(), None); + let mut txn = db.txn(); + let latest_finalized_block = OsRng.next_u64(); + LatestCosignedBlockNumber::set(&mut txn, &latest_finalized_block); + txn.commit(); + assert_eq!(Cosigning::::cosigned_block(&db, latest_finalized_block + 1).unwrap(), None); } - #[test] - fn cosigned_block_errors_when_faulted() { - serai_env::init_logger(); + // Returns hash when block is in range + { let mut db = MemDb::new(); - { - let mut txn = db.txn(); - FaultedSession::set(&mut txn, &[1u8; 32]); - txn.commit(); - } - assert!(matches!(Cosigning::::cosigned_block(&db, 1), Err(Faulted))); + let block_hash = random_block_hash(&mut OsRng); + let latest_finalized_block = OsRng.next_u64(); + let mut txn = db.txn(); + LatestCosignedBlockNumber::set(&mut txn, &latest_finalized_block); + SubstrateBlockHash::set(&mut txn, latest_finalized_block - 1, &block_hash); + txn.commit(); + assert_eq!( + Cosigning::::cosigned_block(&db, latest_finalized_block - 1).unwrap(), + Some(block_hash) + ); } - #[tokio::test] - async fn cosigning_cosigned_block_returns_correct_hash() { - serai_env::init_logger(); + // Errors when faulted session exists + { let mut db = MemDb::new(); - let block_hash_5 = BlockHash([42u8; 32]); - let block_hash_10 = BlockHash([43u8; 32]); - - { - let mut txn = db.txn(); - SubstrateBlockHash::set(&mut txn, 5, &block_hash_5); - SubstrateBlockHash::set(&mut txn, 10, &block_hash_10); - LatestAcknowledgedBlock::set(&mut txn, &10u64); - txn.commit(); - } - - let result = Cosigning::::cosigned_block(&db, 5); - assert!(result.is_ok()); - assert_eq!(result.unwrap(), Some(block_hash_5)); - - let result_10 = Cosigning::::cosigned_block(&db, 10); - assert!(result_10.is_ok()); - assert_eq!(result_10.unwrap(), Some(block_hash_10)); - - let result_11 = Cosigning::::cosigned_block(&db, 11); - assert!(result_11.is_ok()); - assert_eq!(result_11.unwrap(), None); + let mut txn = db.txn(); + FaultedSession::set(&mut txn, &random_global_session(&mut OsRng)); + txn.commit(); + assert!(matches!(Cosigning::::cosigned_block(&db, OsRng.next_u64()), Err(Faulted))); } } -mod notable_cosigns { - use super::*; +#[test] +fn notable_cosigns() { + serai_env::init_logger(); - #[test] - fn notable_cosigns_empty_without_cosigns() { - serai_env::init_logger(); + // Empty without cosigns + { let db = MemDb::new(); - let cosigns = Cosigning::::notable_cosigns(&db, [1u8; 32]); + let cosigns = Cosigning::::notable_cosigns(&db, random_global_session(&mut OsRng)); assert!(cosigns.is_empty()); } - #[test] - fn notable_cosigns_returns_cosigns_for_session() { - serai_env::init_logger(); - let (session, keypair) = random_session(); + // Returns cosigns for session + { + let (session, keypair) = random_test_session(); let id = session.id(); + let network = session.sets[0].network; let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); - let block_number = 1; - let block_hash = BlockHash([9u8; 32]); + let block_number = OsRng.next_u64(); + let block_hash = random_block_hash(&mut OsRng); { let mut txn = db.txn(); SubstrateBlockHash::set(&mut txn, block_number, &block_hash); txn.commit(); } - let cosign = - Cosign { global_session: id, block_number, block_hash, cosigner: ExternalNetworkId::Bitcoin }; + let cosign = Cosign { global_session: id, block_number, block_hash, cosigner: network }; let signed = sign_cosign(cosign, &keypair); let mut cosigning = Cosigning::new(db.clone()); @@ -377,87 +322,79 @@ mod notable_cosigns { assert_eq!(notable.len(), 1); assert_eq!(notable[0].cosign.block_number, block_number); assert_eq!(notable[0].cosign.block_hash, block_hash); - assert_eq!(notable[0].cosign.cosigner, ExternalNetworkId::Bitcoin); + assert_eq!(notable[0].cosign.cosigner, network); } } -mod cosigns_to_rebroadcast { - use super::*; +#[test] +fn cosigns_to_rebroadcast() { + serai_env::init_logger(); - #[test] - fn cosigns_to_rebroadcast_excludes_cosigns_from_different_global_session() { - serai_env::init_logger(); - let (session, keypair) = random_session(); + // Excludes cosigns from different global session + { + let (session, keypair) = random_test_session(); let id = session.id(); + let network = session.sets[0].network; let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); - let block_number = 1; - let our_hash = BlockHash([1u8; 32]); - let faulty_hash = BlockHash([2u8; 32]); + let block_number = OsRng.next_u64(); + let our_hash = random_block_hash(&mut OsRng); + let faulty_hash = random_block_hash(&mut OsRng); { let mut txn = db.txn(); SubstrateBlockHash::set(&mut txn, block_number, &our_hash); txn.commit(); } - let faulty_cosign = Cosign { - global_session: id, - block_number, - block_hash: faulty_hash, - cosigner: ExternalNetworkId::Bitcoin, - }; + let faulty_cosign = + Cosign { global_session: id, block_number, block_hash: faulty_hash, cosigner: network }; let faulty_signed = sign_cosign(faulty_cosign, &keypair); let mut cosigning = Cosigning::new(db.clone()); cosigning.intake_cosign(&faulty_signed).unwrap(); - let different_session_id = [99u8; 32]; + let different_session_id = random_global_session(&mut OsRng); let different_cosign = Cosign { global_session: different_session_id, block_number, block_hash: our_hash, - cosigner: ExternalNetworkId::Bitcoin, + cosigner: network, }; let different_signed = sign_cosign(different_cosign, &keypair); { let mut txn = db.txn(); - NetworksLatestCosignedBlock::set(&mut txn, id, ExternalNetworkId::Bitcoin, &different_signed); + NetworksLatestCosignedBlock::set(&mut txn, id, network, &different_signed); txn.commit(); } let cosigning = Cosigning::new(db); let rebroadcast = cosigning.cosigns_to_rebroadcast(); - assert_eq!( - rebroadcast.len(), - 1, - "should only include faults, not cosigns from different sessions" - ); + assert_eq!(rebroadcast.len(), 1,); assert_eq!(rebroadcast[0].cosign.block_hash, faulty_hash); assert_eq!(rebroadcast[0].cosign.global_session, id); } - #[test] - fn cosigns_to_rebroadcast_returns_latest_cosigns_when_not_faulted() { - serai_env::init_logger(); - let (session, keypair) = random_session(); + // Returns latest cosigns when not faulted + { + let (session, keypair) = random_test_session(); let id = session.id(); + let network = session.sets[0].network; let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); - let block_number = 1; - let block_hash = BlockHash([9u8; 32]); + let block_number = OsRng.next_u64(); + let block_hash = random_block_hash(&mut OsRng); { let mut txn = db.txn(); SubstrateBlockHash::set(&mut txn, block_number, &block_hash); txn.commit(); } - let cosign = - Cosign { global_session: id, block_number, block_hash, cosigner: ExternalNetworkId::Bitcoin }; + let cosign = Cosign { global_session: id, block_number, block_hash, cosigner: network }; let signed = sign_cosign(cosign, &keypair); let mut cosigning = Cosigning::new(db.clone()); @@ -469,45 +406,37 @@ mod cosigns_to_rebroadcast { assert_eq!(rebroadcast[0].cosign.block_hash, block_hash); } - #[test] - fn cosigns_to_rebroadcast_returns_faults_and_honest_when_faulted() { - serai_env::init_logger(); - let (session, keypair) = random_session(); + // Returns faults and honest cosigns when faulted + { + let (session, keypair) = random_test_session(); let id = session.id(); + let network = session.sets[0].network; let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); - let block_number = 1; - let our_hash = BlockHash([1u8; 32]); - let faulty_hash = BlockHash([2u8; 32]); + let block_number = OsRng.next_u64(); + let our_hash = random_block_hash(&mut OsRng); + let faulty_hash = random_block_hash(&mut OsRng); { let mut txn = db.txn(); SubstrateBlockHash::set(&mut txn, block_number, &our_hash); txn.commit(); } - let faulty_cosign = Cosign { - global_session: id, - block_number, - block_hash: faulty_hash, - cosigner: ExternalNetworkId::Bitcoin, - }; + let faulty_cosign = + Cosign { global_session: id, block_number, block_hash: faulty_hash, cosigner: network }; let faulty_signed = sign_cosign(faulty_cosign, &keypair); let mut cosigning = Cosigning::new(db.clone()); cosigning.intake_cosign(&faulty_signed).unwrap(); - let honest_cosign = Cosign { - global_session: id, - block_number, - block_hash: our_hash, - cosigner: ExternalNetworkId::Bitcoin, - }; + let honest_cosign = + Cosign { global_session: id, block_number, block_hash: our_hash, cosigner: network }; let honest_signed = sign_cosign(honest_cosign, &keypair); { let mut txn = db.txn(); - NetworksLatestCosignedBlock::set(&mut txn, id, ExternalNetworkId::Bitcoin, &honest_signed); + NetworksLatestCosignedBlock::set(&mut txn, id, network, &honest_signed); txn.commit(); } @@ -522,271 +451,289 @@ mod cosigns_to_rebroadcast { mod intake_cosign { use super::*; - #[test] - fn intake_cosign_rejects_not_yet_indexed_block() { - serai_env::init_logger(); - let db = MemDb::new(); - let (keypair, _) = random_keypair(&mut OsRng); + mod errors { + use super::*; + + #[test] + fn rejects_not_yet_indexed_block() { + serai_env::init_logger(); + let db = MemDb::new(); + let (keypair, _) = random_keypair(&mut OsRng); + + let cosign = Cosign { + global_session: random_global_session(&mut OsRng), + block_number: OsRng.next_u64(), + block_hash: random_block_hash(&mut OsRng), + cosigner: ExternalNetworkId::Bitcoin, + }; + let signed = sign_cosign(cosign, &keypair); + + let mut cosigning = Cosigning::new(db); + assert!(matches!( + cosigning.intake_cosign(&signed), + Err(IntakeCosignError::NotYetIndexedBlock) + )); + } - let cosign = Cosign { - global_session: [1u8; 32], - block_number: 1, - block_hash: BlockHash([9u8; 32]), - cosigner: ExternalNetworkId::Bitcoin, - }; - let signed = sign_cosign(cosign, &keypair); + #[test] + fn rejects_stale_cosign() { + serai_env::init_logger(); + let (session, keypair) = random_test_session(); + let id = session.id(); + let network = session.sets[0].network; + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_hash_1 = random_block_hash(&mut OsRng); + let block_hash_2 = random_block_hash(&mut OsRng); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, 1, &block_hash_1); + SubstrateBlockHash::set(&mut txn, 2, &block_hash_2); + txn.commit(); + } - let mut cosigning = Cosigning::new(db); - assert!(matches!(cosigning.intake_cosign(&signed), Err(IntakeCosignError::NotYetIndexedBlock))); - } + let first_cosign = + Cosign { global_session: id, block_number: 2, block_hash: block_hash_2, cosigner: network }; + let first_signed = sign_cosign(first_cosign, &keypair); - #[test] - fn intake_cosign_accepts_valid_cosign() { - serai_env::init_logger(); - let (session, keypair) = random_session(); - let id = session.id(); + let mut cosigning = Cosigning::new(db.clone()); + cosigning.intake_cosign(&first_signed).unwrap(); - let mut db = MemDb::new(); - seed_minimal_state(&mut db, &session); + let stale_cosign = + Cosign { global_session: id, block_number: 1, block_hash: block_hash_1, cosigner: network }; + let stale_signed = sign_cosign(stale_cosign, &keypair); - let block_number = 1; - let block_hash = BlockHash([9u8; 32]); - { - let mut txn = db.txn(); - SubstrateBlockHash::set(&mut txn, block_number, &block_hash); - txn.commit(); + assert!(matches!( + cosigning.intake_cosign(&stale_signed), + Err(IntakeCosignError::StaleCosign) + )); } - let cosign = - Cosign { global_session: id, block_number, block_hash, cosigner: ExternalNetworkId::Bitcoin }; - let signed = sign_cosign(cosign, &keypair); - - let mut cosigning = Cosigning::new(db); - assert!(cosigning.intake_cosign(&signed).is_ok()); - } - - #[test] - fn intake_cosign_rejects_stale_cosign() { - serai_env::init_logger(); - let (session, keypair) = random_session(); - let id = session.id(); - - let mut db = MemDb::new(); - seed_minimal_state(&mut db, &session); + #[test] + fn rejects_unrecognized_global_session() { + serai_env::init_logger(); + let (keypair, _) = random_keypair(&mut OsRng); + + let mut db = MemDb::new(); + let block_number = OsRng.next_u64(); + let block_hash = random_block_hash(&mut OsRng); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &block_hash); + txn.commit(); + } - let block_hash = BlockHash([9u8; 32]); - { - let mut txn = db.txn(); - SubstrateBlockHash::set(&mut txn, 1, &block_hash); - SubstrateBlockHash::set(&mut txn, 2, &BlockHash([2u8; 32])); - txn.commit(); + let cosign = Cosign { + global_session: random_global_session(&mut OsRng), + block_number, + block_hash, + cosigner: ExternalNetworkId::Bitcoin, + }; + let signed = sign_cosign(cosign, &keypair); + + let mut cosigning = Cosigning::new(db); + assert!(matches!( + cosigning.intake_cosign(&signed), + Err(IntakeCosignError::UnrecognizedGlobalSession) + )); } - let first_cosign = Cosign { - global_session: id, - block_number: 2, - block_hash: BlockHash([2u8; 32]), - cosigner: ExternalNetworkId::Bitcoin, - }; - let first_signed = sign_cosign(first_cosign, &keypair); + #[test] + fn rejects_before_global_session_start() { + serai_env::init_logger(); + let (mut session, keypair) = random_test_session(); + let network = session.sets[0].network; + session.start_block_number = 10; + let id = session.id(); + + let block_hash = random_block_hash(&mut OsRng); + let mut db = MemDb::new(); + { + let mut txn = db.txn(); + GlobalSessions::set(&mut txn, id, &session.to_global()); + CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, session.to_global())); + LatestCosignedBlockNumber::set(&mut txn, &10u64); + SubstrateBlockHash::set(&mut txn, 5, &block_hash); + txn.commit(); + } - let mut cosigning = Cosigning::new(db.clone()); - cosigning.intake_cosign(&first_signed).unwrap(); + let cosign = Cosign { global_session: id, block_number: 5, block_hash, cosigner: network }; + let signed = sign_cosign(cosign, &keypair); - let stale_cosign = Cosign { - global_session: id, - block_number: 1, - block_hash, - cosigner: ExternalNetworkId::Bitcoin, - }; - let stale_signed = sign_cosign(stale_cosign, &keypair); + let mut cosigning = Cosigning::new(db); + assert!(matches!( + cosigning.intake_cosign(&signed), + Err(IntakeCosignError::BeforeGlobalSessionStart) + )); + } - assert!(matches!(cosigning.intake_cosign(&stale_signed), Err(IntakeCosignError::StaleCosign))); - } + #[test] + fn rejects_after_global_session_end() { + serai_env::init_logger(); + let (session, keypair) = random_test_session(); + let id = session.id(); + let network = session.sets[0].network; + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_hash = random_block_hash(&mut OsRng); + { + let mut txn = db.txn(); + GlobalSessionsLastBlock::set(&mut txn, id, &5u64); + SubstrateBlockHash::set(&mut txn, 10, &block_hash); + txn.commit(); + } - #[test] - fn intake_cosign_rejects_unrecognized_global_session() { - serai_env::init_logger(); - let (keypair, _) = random_keypair(&mut OsRng); + let cosign = Cosign { global_session: id, block_number: 10, block_hash, cosigner: network }; + let signed = sign_cosign(cosign, &keypair); - let mut db = MemDb::new(); - let block_number = 1; - let block_hash = BlockHash([9u8; 32]); - { - let mut txn = db.txn(); - SubstrateBlockHash::set(&mut txn, block_number, &block_hash); - txn.commit(); + let mut cosigning = Cosigning::new(db); + assert!(matches!( + cosigning.intake_cosign(&signed), + Err(IntakeCosignError::AfterGlobalSessionEnd) + )); } - let cosign = Cosign { - global_session: [99u8; 32], - block_number, - block_hash, - cosigner: ExternalNetworkId::Bitcoin, - }; - let signed = sign_cosign(cosign, &keypair); - - let mut cosigning = Cosigning::new(db); - assert!(matches!( - cosigning.intake_cosign(&signed), - Err(IntakeCosignError::UnrecognizedGlobalSession) - )); - } - - #[test] - fn intake_cosign_rejects_before_global_session_start() { - serai_env::init_logger(); - let (mut session, keypair) = random_session(); - session.start_block_number = 10; - let id = session.id(); + #[test] + fn rejects_invalid_signature() { + serai_env::init_logger(); + let (session, _keypair) = random_test_session(); + let id = session.id(); + let network = session.sets[0].network; + let (wrong_keypair, _) = random_keypair(&mut OsRng); + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_number = OsRng.next_u64(); + let block_hash = random_block_hash(&mut OsRng); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &block_hash); + txn.commit(); + } - let mut db = MemDb::new(); - { - let mut txn = db.txn(); - GlobalSessions::set(&mut txn, id, &session.to_global()); - CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, session.to_global())); - LatestAcknowledgedBlock::set(&mut txn, &10u64); + let cosign = Cosign { global_session: id, block_number, block_hash, cosigner: network }; + let signed = sign_cosign(cosign, &wrong_keypair); - SubstrateBlockHash::set(&mut txn, 5, &BlockHash([5u8; 32])); - txn.commit(); + let mut cosigning = Cosigning::new(db); + assert!(matches!(cosigning.intake_cosign(&signed), Err(IntakeCosignError::InvalidSignature))); } - let cosign = Cosign { - global_session: id, - block_number: 5, - block_hash: BlockHash([5u8; 32]), - cosigner: ExternalNetworkId::Bitcoin, - }; - let signed = sign_cosign(cosign, &keypair); - - let mut cosigning = Cosigning::new(db); - assert!(matches!( - cosigning.intake_cosign(&signed), - Err(IntakeCosignError::BeforeGlobalSessionStart) - )); - } + #[test] + fn rejects_future_global_session() { + serai_env::init_logger(); + let (mut session, keypair) = random_test_session(); + let network = session.sets[0].network; + session.start_block_number = 10; + let id = session.id(); + + let block_hash = random_block_hash(&mut OsRng); + let mut db = MemDb::new(); + { + let mut txn = db.txn(); + GlobalSessions::set(&mut txn, id, &session.to_global()); + CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, session.to_global())); + LatestCosignedBlockNumber::set(&mut txn, &5u64); + SubstrateBlockHash::set(&mut txn, 10, &block_hash); + txn.commit(); + } - #[test] - fn intake_cosign_rejects_after_global_session_end() { - serai_env::init_logger(); - let (session, keypair) = random_session(); - let id = session.id(); + let cosign = Cosign { global_session: id, block_number: 10, block_hash, cosigner: network }; + let signed = sign_cosign(cosign, &keypair); - let mut db = MemDb::new(); - seed_minimal_state(&mut db, &session); + let mut cosigning = Cosigning::new(db); + assert!(matches!( + cosigning.intake_cosign(&signed), + Err(IntakeCosignError::FutureGlobalSession) + )); + } - { - let mut txn = db.txn(); + #[test] + fn rejects_non_participating_network() { + serai_env::init_logger(); + let (session, _keypair) = random_test_session(); + let id = session.id(); + let session_network = session.sets[0].network; + + let non_participating = ExternalNetworkId::all().find(|n| *n != session_network).unwrap(); + let (other_keypair, _) = random_keypair(&mut OsRng); + + let mut db = MemDb::new(); + seed_minimal_state(&mut db, &session); + + let block_number = OsRng.next_u64(); + let block_hash = random_block_hash(&mut OsRng); + { + let mut txn = db.txn(); + SubstrateBlockHash::set(&mut txn, block_number, &block_hash); + txn.commit(); + } - GlobalSessionsLastBlock::set(&mut txn, id, &5u64); + let cosign = + Cosign { global_session: id, block_number, block_hash, cosigner: non_participating }; + let signed = sign_cosign(cosign, &other_keypair); - SubstrateBlockHash::set(&mut txn, 10, &BlockHash([10u8; 32])); - txn.commit(); + let mut cosigning = Cosigning::new(db); + assert!(matches!( + cosigning.intake_cosign(&signed), + Err(IntakeCosignError::NonParticipatingNetwork) + )); } - - let cosign = Cosign { - global_session: id, - block_number: 10, - block_hash: BlockHash([10u8; 32]), - cosigner: ExternalNetworkId::Bitcoin, - }; - let signed = sign_cosign(cosign, &keypair); - - let mut cosigning = Cosigning::new(db); - assert!(matches!( - cosigning.intake_cosign(&signed), - Err(IntakeCosignError::AfterGlobalSessionEnd) - )); } #[test] - fn intake_cosign_rejects_invalid_signature() { + fn accepts_valid_cosign() { serai_env::init_logger(); - let (session, _keypair) = random_session(); + let (session, keypair) = random_test_session(); let id = session.id(); - // Use a different keypair than the one in the session - let (wrong_keypair, _) = random_keypair(&mut OsRng); + let network = session.sets[0].network; let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); - let block_number = 1; - let block_hash = BlockHash([9u8; 32]); + let block_number = OsRng.next_u64(); + let block_hash = random_block_hash(&mut OsRng); { let mut txn = db.txn(); SubstrateBlockHash::set(&mut txn, block_number, &block_hash); txn.commit(); } - let cosign = - Cosign { global_session: id, block_number, block_hash, cosigner: ExternalNetworkId::Bitcoin }; - let signed = sign_cosign(cosign, &wrong_keypair); - - let mut cosigning = Cosigning::new(db); - assert!(matches!(cosigning.intake_cosign(&signed), Err(IntakeCosignError::InvalidSignature))); - } - - #[test] - fn intake_cosign_rejects_future_global_session() { - serai_env::init_logger(); - let (mut session, keypair) = random_session(); - session.start_block_number = 10; - let id = session.id(); - - let mut db = MemDb::new(); - { - let mut txn = db.txn(); - GlobalSessions::set(&mut txn, id, &session.to_global()); - CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, session.to_global())); - - LatestAcknowledgedBlock::set(&mut txn, &5u64); - SubstrateBlockHash::set(&mut txn, 10, &BlockHash([10u8; 32])); - txn.commit(); - } - - let cosign = Cosign { - global_session: id, - block_number: 10, - block_hash: BlockHash([10u8; 32]), - cosigner: ExternalNetworkId::Bitcoin, - }; + let cosign = Cosign { global_session: id, block_number, block_hash, cosigner: network }; let signed = sign_cosign(cosign, &keypair); let mut cosigning = Cosigning::new(db); - assert!(matches!( - cosigning.intake_cosign(&signed), - Err(IntakeCosignError::FutureGlobalSession) - )); + assert!(cosigning.intake_cosign(&signed).is_ok()); } #[test] - fn intake_cosign_handles_faulty_cosign() { + fn handles_faulty_cosign() { serai_env::init_logger(); - let (session, keypair) = random_session(); + let (session, keypair) = random_test_session(); let id = session.id(); + let network = session.sets[0].network; let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); - let block_number = 1; - let our_hash = BlockHash([1u8; 32]); - let faulty_hash = BlockHash([2u8; 32]); + let block_number = OsRng.next_u64(); + let our_hash = random_block_hash(&mut OsRng); + let faulty_hash = random_block_hash(&mut OsRng); { let mut txn = db.txn(); SubstrateBlockHash::set(&mut txn, block_number, &our_hash); txn.commit(); } - let cosign = Cosign { - global_session: id, - block_number, - block_hash: faulty_hash, - cosigner: ExternalNetworkId::Bitcoin, - }; + let cosign = + Cosign { global_session: id, block_number, block_hash: faulty_hash, cosigner: network }; let signed = sign_cosign(cosign, &keypair); let mut cosigning = Cosigning::new(db.clone()); - assert!(cosigning.intake_cosign(&signed).is_ok()); let faults: Option> = Faults::get(&db, id); @@ -799,60 +746,60 @@ mod intake_cosign { } #[test] - fn intake_cosign_accepts_newer_cosign_when_existing_is_older() { + fn accepts_newer_cosign_when_existing_is_older() { serai_env::init_logger(); - let (session, keypair) = random_session(); + let (session, keypair) = random_test_session(); let id = session.id(); + let network = session.sets[0].network; let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); + let block_hash_1 = random_block_hash(&mut OsRng); + let block_hash_2 = random_block_hash(&mut OsRng); { let mut txn = db.txn(); - SubstrateBlockHash::set(&mut txn, 1, &BlockHash([1u8; 32])); - SubstrateBlockHash::set(&mut txn, 2, &BlockHash([2u8; 32])); + SubstrateBlockHash::set(&mut txn, 1, &block_hash_1); + SubstrateBlockHash::set(&mut txn, 2, &block_hash_2); txn.commit(); } - let first_cosign = Cosign { - global_session: id, - block_number: 1, - block_hash: BlockHash([1u8; 32]), - cosigner: ExternalNetworkId::Bitcoin, - }; + let first_cosign = + Cosign { global_session: id, block_number: 1, block_hash: block_hash_1, cosigner: network }; let first_signed = sign_cosign(first_cosign, &keypair); let mut cosigning = Cosigning::new(db.clone()); cosigning.intake_cosign(&first_signed).unwrap(); - let newer_cosign = Cosign { - global_session: id, - block_number: 2, - block_hash: BlockHash([2u8; 32]), - cosigner: ExternalNetworkId::Bitcoin, - }; + let newer_cosign = + Cosign { global_session: id, block_number: 2, block_hash: block_hash_2, cosigner: network }; let newer_signed = sign_cosign(newer_cosign, &keypair); assert!(cosigning.intake_cosign(&newer_signed).is_ok()); - let latest = NetworksLatestCosignedBlock::get(&db, id, ExternalNetworkId::Bitcoin).unwrap(); + let latest = NetworksLatestCosignedBlock::get(&db, id, network).unwrap(); assert_eq!(latest.cosign.block_number, 2); } #[test] - fn intake_cosign_accepts_cosign_at_global_session_last_block() { + fn accepts_cosign_at_global_session_last_block() { serai_env::init_logger(); - let (session, keypair) = random_session(); + let (session, keypair) = random_test_session(); let id = session.id(); + let network = session.sets[0].network; let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); + let last_block = 5u64; + let mut block_hashes = Vec::new(); { let mut txn = db.txn(); - GlobalSessionsLastBlock::set(&mut txn, id, &5u64); - for i in 1 ..= 5 { - SubstrateBlockHash::set(&mut txn, i, &BlockHash([i as u8; 32])); + GlobalSessionsLastBlock::set(&mut txn, id, &last_block); + for i in 1 ..= last_block { + let hash = random_block_hash(&mut OsRng); + SubstrateBlockHash::set(&mut txn, i, &hash); + block_hashes.push(hash); } txn.commit(); } @@ -861,43 +808,40 @@ mod intake_cosign { let cosign = Cosign { global_session: id, - block_number: 5, - block_hash: BlockHash([5u8; 32]), - cosigner: ExternalNetworkId::Bitcoin, + block_number: last_block, + block_hash: block_hashes[last_block as usize - 1], + cosigner: network, }; let signed = sign_cosign(cosign, &keypair); assert!(cosigning.intake_cosign(&signed).is_ok()); - let latest = NetworksLatestCosignedBlock::get(&db, id, ExternalNetworkId::Bitcoin).unwrap(); - assert_eq!(latest.cosign.block_number, 5); + let latest = NetworksLatestCosignedBlock::get(&db, id, network).unwrap(); + assert_eq!(latest.cosign.block_number, last_block); } #[test] - fn intake_cosign_ignores_duplicate_fault_from_same_network() { + fn ignores_duplicate_fault_from_same_network() { serai_env::init_logger(); - let (session, keypair) = random_session(); + let (session, keypair) = random_test_session(); let id = session.id(); + let network = session.sets[0].network; let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); - let block_number = 1; - let our_hash = BlockHash([1u8; 32]); - let faulty_hash_1 = BlockHash([2u8; 32]); - let faulty_hash_2 = BlockHash([3u8; 32]); + let block_number = OsRng.next_u64(); + let our_hash = random_block_hash(&mut OsRng); + let faulty_hash_1 = random_block_hash(&mut OsRng); + let faulty_hash_2 = random_block_hash(&mut OsRng); { let mut txn = db.txn(); SubstrateBlockHash::set(&mut txn, block_number, &our_hash); txn.commit(); } - let faulty_cosign_1 = Cosign { - global_session: id, - block_number, - block_hash: faulty_hash_1, - cosigner: ExternalNetworkId::Bitcoin, - }; + let faulty_cosign_1 = + Cosign { global_session: id, block_number, block_hash: faulty_hash_1, cosigner: network }; let faulty_signed_1 = sign_cosign(faulty_cosign_1, &keypair); let mut cosigning = Cosigning::new(db.clone()); @@ -907,12 +851,8 @@ mod intake_cosign { assert_eq!(faults_after_first.len(), 1); assert_eq!(faults_after_first[0].cosign.block_hash, faulty_hash_1); - let faulty_cosign_2 = Cosign { - global_session: id, - block_number, - block_hash: faulty_hash_2, - cosigner: ExternalNetworkId::Bitcoin, - }; + let faulty_cosign_2 = + Cosign { global_session: id, block_number, block_hash: faulty_hash_2, cosigner: network }; let faulty_signed_2 = sign_cosign(faulty_cosign_2, &keypair); assert!(cosigning.intake_cosign(&faulty_signed_2).is_ok()); @@ -927,41 +867,7 @@ mod intake_cosign { } #[test] - fn intake_cosign_rejects_non_participating_network() { - serai_env::init_logger(); - let (session, _keypair) = random_session(); - let id = session.id(); - - let (eth_keypair, _) = random_keypair(&mut OsRng); - - let mut db = MemDb::new(); - seed_minimal_state(&mut db, &session); - - let block_number = 1; - let block_hash = BlockHash([9u8; 32]); - { - let mut txn = db.txn(); - SubstrateBlockHash::set(&mut txn, block_number, &block_hash); - txn.commit(); - } - - let cosign = Cosign { - global_session: id, - block_number, - block_hash, - cosigner: ExternalNetworkId::Ethereum, - }; - let signed = sign_cosign(cosign, ð_keypair); - - let mut cosigning = Cosigning::new(db); - assert!(matches!( - cosigning.intake_cosign(&signed), - Err(IntakeCosignError::NonParticipatingNetwork) - )); - } - - #[test] - fn intake_cosign_records_fault_below_threshold() { + fn records_fault_below_threshold() { serai_env::init_logger(); let network1 = ExternalNetworkId::Bitcoin; let network2 = ExternalNetworkId::Ethereum; @@ -992,9 +898,9 @@ mod intake_cosign { let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); - let block_number = 1; - let our_hash = BlockHash([1u8; 32]); - let faulty_hash = BlockHash([2u8; 32]); + let block_number = OsRng.next_u64(); + let our_hash = random_block_hash(&mut OsRng); + let faulty_hash = random_block_hash(&mut OsRng); { let mut txn = db.txn(); SubstrateBlockHash::set(&mut txn, block_number, &our_hash); @@ -1017,29 +923,28 @@ mod intake_cosign { } } -mod intended_cosigns { - use super::*; +#[test] +fn intended_cosigns() { + serai_env::init_logger(); - #[test] - fn intended_cosigns_empty_returns_empty() { - serai_env::init_logger(); + // Empty returns empty + { let mut db = MemDb::new(); - let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + let set = random_validator_set(&mut OsRng); let mut txn = db.txn(); assert!(Cosigning::::intended_cosigns(&mut txn, set).is_empty()); txn.commit(); } - #[test] - fn intended_cosigns_receives_sent_intent() { - serai_env::init_logger(); + // Receives sent intent + { let mut db = MemDb::new(); - let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + let set = random_validator_set(&mut OsRng); let intent = CosignIntent { - global_session: [1u8; 32], - block_number: 5, - block_hash: BlockHash([5u8; 32]), + global_session: random_global_session(&mut OsRng), + block_number: OsRng.next_u64(), + block_hash: random_block_hash(&mut OsRng), notable: true, }; diff --git a/coordinator/cosign/src/tests/delay.rs b/coordinator/cosign/src/tests/delay.rs index 39012c2ae..11e911686 100644 --- a/coordinator/cosign/src/tests/delay.rs +++ b/coordinator/cosign/src/tests/delay.rs @@ -5,7 +5,7 @@ use rand_core::OsRng; use serai_task::ContinuallyRan; use crate::{ - LatestAcknowledgedBlock, + LatestCosignedBlockNumber, delay::{ACKNOWLEDGEMENT_DELAY, CosignDelayTask, now_timestamp}, evaluator::CosignedBlocks, tests::{IntoTask, TaskTest}, @@ -41,13 +41,13 @@ impl DelayTest { (Self::default(), start) } - async fn assert_task_iteration_completes_with(&self, latest_acknowledged_block: u64) { + async fn assert_task_iteration_completes_with(&self, latest_finalized_block: u64) { use serai_env::log::debug; - let actual = LatestAcknowledgedBlock::get(&self.db); + let actual = LatestCosignedBlockNumber::get(&self.db); let cosigned_pending = CosignedBlocks::peek(&self.db).is_some(); - debug!("LatestAcknowledgedBlock: {actual:?} (expected: Some({latest_acknowledged_block}))"); + debug!("LatestFinalizedBlock: {actual:?} (expected: Some({latest_finalized_block}))"); debug!("CosignedBlocks pending: {cosigned_pending}"); - assert_eq!(actual, Some(latest_acknowledged_block)); + assert_eq!(actual, Some(latest_finalized_block)); assert!(!cosigned_pending, "CosignedBlocks queue items should have been consumed"); } } @@ -60,12 +60,12 @@ async fn returns_false_with_no_messages() { TaskTest::task_runs_once_and_matches_progress(&mut task, false).await; - assert_eq!(LatestAcknowledgedBlock::get(&test.db), None); + assert_eq!(LatestCosignedBlockNumber::get(&test.db), None); assert_eq!(CosignedBlocks::peek(&test.db), None); } #[tokio::test] -async fn updates_latest_acknowledged_block_after_ack_delay() { +async fn updates_latest_finalized_block_after_ack_delay() { let (mut test, start) = DelayTest::new(); { @@ -73,9 +73,9 @@ async fn updates_latest_acknowledged_block_after_ack_delay() { // blocks with the same timestamps // nothing unusual happens, the task follow block numbers let now = now_secs(); - CosignedBlocks::send(&mut txn, &(0, now)); - CosignedBlocks::send(&mut txn, &(1, now)); - CosignedBlocks::send(&mut txn, &(2, now)); + CosignedBlocks::send(&mut txn, &(0, now, true)); + CosignedBlocks::send(&mut txn, &(1, now, true)); + CosignedBlocks::send(&mut txn, &(2, now, true)); txn.commit(); } @@ -91,9 +91,9 @@ async fn updates_latest_acknowledged_block_after_ack_delay() { // timestamps out of order // nothing unusual happens, the task stil follows block numbers let now = now_secs(); - CosignedBlocks::send(&mut txn, &(3, now)); - CosignedBlocks::send(&mut txn, &(4, now - 1)); - CosignedBlocks::send(&mut txn, &(5, now - 2)); + CosignedBlocks::send(&mut txn, &(3, now, true)); + CosignedBlocks::send(&mut txn, &(4, now - 1, true)); + CosignedBlocks::send(&mut txn, &(5, now - 2, true)); txn.commit(); } @@ -107,9 +107,9 @@ async fn updates_latest_acknowledged_block_after_ack_delay() { // timestamps increasing in order // nothing unusual happens, the task stil follows block numbers let now = now_secs(); - CosignedBlocks::send(&mut txn, &(6, now)); - CosignedBlocks::send(&mut txn, &(7, now + 1)); - CosignedBlocks::send(&mut txn, &(8, now + 2)); + CosignedBlocks::send(&mut txn, &(6, now, true)); + CosignedBlocks::send(&mut txn, &(7, now + 1, true)); + CosignedBlocks::send(&mut txn, &(8, now + 2, true)); txn.commit(); } @@ -126,13 +126,13 @@ async fn does_not_regress_and_skips_if_not_a_later_block() { { let mut txn = test.db.txn(); - CosignedBlocks::send(&mut txn, &(1, now_secs())); - CosignedBlocks::send(&mut txn, &(2, now_secs())); + CosignedBlocks::send(&mut txn, &(1, now_secs(), true)); + CosignedBlocks::send(&mut txn, &(2, now_secs(), true)); // Sent out of order below - CosignedBlocks::send(&mut txn, &(4, now_secs())); + CosignedBlocks::send(&mut txn, &(4, now_secs(), true)); // 3 will be skipped after 4 was processed - CosignedBlocks::send(&mut txn, &(3, now_secs())); + CosignedBlocks::send(&mut txn, &(3, now_secs(), true)); txn.commit(); } @@ -148,7 +148,7 @@ async fn does_not_regress_and_skips_if_not_a_later_block() { { let mut txn = test.db.txn(); // Sends the same previous block number - CosignedBlocks::send(&mut txn, &(4, now_secs())); + CosignedBlocks::send(&mut txn, &(4, now_secs(), true)); txn.commit(); } @@ -169,7 +169,7 @@ async fn respects_acknowledgement_delay() { let now = now_secs(); { let mut txn = test.db.txn(); - CosignedBlocks::send(&mut txn, &(block_number, now)); + CosignedBlocks::send(&mut txn, &(block_number, now, true)); txn.commit(); } @@ -181,14 +181,14 @@ async fn respects_acknowledgement_delay() { // Well before ACKNOWLEDGEMENT_DELAY, the block must not be acknowledged tokio::time::sleep(Duration::from_secs(ACKNOWLEDGEMENT_DELAY.as_secs().saturating_sub(2))).await; - assert!(LatestAcknowledgedBlock::get(&test.db).is_none()); + assert!(LatestCosignedBlockNumber::get(&test.db).is_none()); // Wait for the task to complete let made_progress = task_handle.await.unwrap().unwrap(); assert!(made_progress); - // Block is now acknowledged - assert_eq!(LatestAcknowledgedBlock::get(&test.db), Some(block_number)); + // Block is now finalized + assert_eq!(LatestCosignedBlockNumber::get(&test.db), Some(block_number)); // The elapsed time must be at least ACKNOWLEDGEMENT_DELAY let elapsed = start.elapsed(); diff --git a/coordinator/cosign/src/tests/evaluator.rs b/coordinator/cosign/src/tests/evaluator.rs index d2b5b9d8d..df9554db3 100644 --- a/coordinator/cosign/src/tests/evaluator.rs +++ b/coordinator/cosign/src/tests/evaluator.rs @@ -19,8 +19,7 @@ use serai_task::ContinuallyRan; use crate::{ Cosign, GlobalSession, HasEvents, NetworksLatestCosignedBlock, evaluator::{ - CosignEvaluatorTask, CosignedBlocks, CurrentlyEvaluatedGlobalSession, LatestEvaluatedBlock, - REQUEST_COSIGNS_SPACING, + CosignEvaluatorTask, CosignedBlocks, CurrentlyEvaluatedGlobalSession, REQUEST_COSIGNS_SPACING, }, intend::{BlockEventData, BlockEvents, GlobalSessionsChannel}, tests::{IntoTask, TaskTest, TestRequest, random_global_session}, @@ -47,7 +46,7 @@ impl IntoTask for EvaluatorTest { impl EvaluatorTest { fn init_global_session(&mut self, start_block_number: u64) -> [u8; 32] { - let global_session = random_global_session(); + let global_session = random_global_session(&mut OsRng); let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; let mut keys = HashMap::new(); @@ -74,13 +73,11 @@ impl EvaluatorTest { fn verify_db_invariants(db: &mut MemDb, expected_cosigned_range: Option<(u64, u64)>) { use serai_env::log::debug; - let latest_evaluated = LatestEvaluatedBlock::get(db); let current_session = CurrentlyEvaluatedGlobalSession::get(db); let block_events_pending = BlockEvents::peek(db).is_some(); let sessions_pending = GlobalSessionsChannel::peek(db).is_some(); let cosigned_pending = CosignedBlocks::peek(db).is_some(); - debug!("LatestEvaluatedBlock: {latest_evaluated:?}"); debug!( "CurrentlyEvaluatedGlobalSession: {:?}", current_session.as_ref().map(|(id, gs)| (hex::encode(id), gs.start_block_number)) @@ -103,7 +100,7 @@ fn verify_db_invariants(db: &mut MemDb, expected_cosigned_range: Option<(u64, u6 assert!(has_session, "CurrentlyEvaluatedGlobalSession should exist after processing blocks"); for expected_block in start ..= end { - let (block_number, _time) = CosignedBlocks::try_recv(&mut txn) + let (block_number, _time, _has_events) = CosignedBlocks::try_recv(&mut txn) .unwrap_or_else(|| panic!("expected cosigned block {expected_block}")); debug!("CosignedBlock: block_number={block_number}"); assert_eq!(block_number, expected_block, "cosigned block mismatch"); @@ -160,11 +157,18 @@ async fn processes_blocks_with_no_events() { TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; assert!(BlockEvents::peek(&test.db).is_none(), "BlockEvents should be fully consumed"); - assert!( - CosignedBlocks::peek(&test.db).is_none(), - "HasEvent::No blocks shouldn't produce CosignedBlocks" - ); - assert_eq!(LatestEvaluatedBlock::get(&test.db), Some(2)); + + // HasEvents::No blocks are sent through CosignedBlocks with has_events=false + { + let mut txn = test.db.txn(); + for expected in 0 ..= 2 { + let (block_number, _time, has_events) = CosignedBlocks::try_recv(&mut txn).unwrap(); + assert_eq!(block_number, expected); + assert_eq!(has_events, false); + } + assert!(CosignedBlocks::try_recv(&mut txn).is_none(), "no more blocks expected"); + txn.commit(); + } } #[tokio::test] @@ -275,7 +279,7 @@ async fn non_notable_computes_lowest_common_block() { let info = GlobalSession { start_block_number: 0, sets, keys, stakes, total_stake: 100u64 }; let mut txn = test.db.txn(); - let id = random_global_session(); + let id = random_global_session(&mut OsRng); GlobalSessionsChannel::send(&mut txn, &(id, info)); txn.commit(); @@ -363,11 +367,18 @@ async fn advances_global_session_at_start_block() { TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; assert!(BlockEvents::peek(&test.db).is_none(), "BlockEvents should be fully consumed"); - assert!( - CosignedBlocks::peek(&test.db).is_none(), - "HasEvent::No blocks shouldn't produce CosignedBlocks" - ); - assert_eq!(LatestEvaluatedBlock::get(&test.db), Some(3)); + // HasEvents::No blocks are sent through CosignedBlocks with has_events=false + { + let mut txn = test.db.txn(); + for expected in 1 ..= 3 { + let (block_number, _time, has_events) = CosignedBlocks::try_recv(&mut txn) + .unwrap_or_else(|| panic!("expected cosigned block {expected}")); + assert_eq!(block_number, expected); + assert!(!has_events, "HasEvents::No blocks should have has_events=false"); + } + assert!(CosignedBlocks::try_recv(&mut txn).is_none(), "no more blocks expected"); + txn.commit(); + } let current = CurrentlyEvaluatedGlobalSession::get(&test.db).expect("should have current session"); @@ -437,7 +448,7 @@ mod errors { GlobalSession { start_block_number: 0, sets: vec![set], keys, stakes, total_stake: 1u64 }; let mut txn = test.db.txn(); - let id = random_global_session(); + let id = random_global_session(&mut OsRng); GlobalSessionsChannel::send(&mut txn, &(id, info)); txn.commit(); @@ -519,7 +530,7 @@ mod errors { GlobalSession { start_block_number: 0, sets: vec![set], keys, stakes, total_stake: 1u64 }; let mut txn = test.db.txn(); - let id = random_global_session(); + let id = random_global_session(&mut OsRng); GlobalSessionsChannel::send(&mut txn, &(id, info)); txn.commit(); @@ -641,7 +652,7 @@ mod errors { GlobalSession { start_block_number: 10, sets: vec![set], keys, stakes, total_stake: 1u64 }; let mut txn = test.db.txn(); - let id = random_global_session(); + let id = random_global_session(&mut OsRng); CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, info)); BlockEvents::send(&mut txn, &BlockEventData { block_number: 5, has_events: HasEvents::No }); txn.commit(); @@ -720,7 +731,7 @@ mod errors { let info = GlobalSession { start_block_number: 0, sets, keys, stakes, total_stake: u64::MAX }; let mut txn = test.db.txn(); - let id = random_global_session(); + let id = random_global_session(&mut OsRng); GlobalSessionsChannel::send(&mut txn, &(id, info)); txn.commit(); @@ -775,7 +786,7 @@ mod errors { let info = GlobalSession { start_block_number: 0, sets, keys, stakes, total_stake: u64::MAX }; let mut txn = test.db.txn(); - let id = random_global_session(); + let id = random_global_session(&mut OsRng); GlobalSessionsChannel::send(&mut txn, &(id, info)); txn.commit(); diff --git a/coordinator/cosign/src/tests/mod.rs b/coordinator/cosign/src/tests/mod.rs index 7d01f8756..8cff8a7e4 100644 --- a/coordinator/cosign/src/tests/mod.rs +++ b/coordinator/cosign/src/tests/mod.rs @@ -18,15 +18,16 @@ use std::sync::{ atomic::{AtomicUsize, Ordering}, }; -pub(crate) fn random_global_session() -> [u8; 32] { - use rand::RngCore; - let mut id = [0u8; 32]; - rand_core::OsRng.fill_bytes(&mut id); - id -} +use rand::{CryptoRng, Rng, RngCore}; use serai_shim_rpc::{SeraiShimRpc, ShimState}; -use serai_client_serai::Serai; +use serai_client_serai::{ + Serai, + abi::primitives::{ + network_id::ExternalNetworkId, + validator_sets::{ExternalValidatorSet, Session}, + }, +}; pub(crate) use serai_test_task::{IntoTask, TaskTest}; use crate::RequestNotableCosigns; @@ -73,3 +74,24 @@ async fn setup_shim_serai() -> (SeraiShimRpc, Arc) { let serai = Arc::new(Serai::new(shim_serai.url()).unwrap()); (shim_serai, serai) } + +pub(crate) fn random_global_session(rng: &mut R) -> [u8; 32] { + let mut id = [0u8; 32]; + rng.fill_bytes(&mut id); + id +} + +/// For whe external validator set does not alter or affect the behavior of the functions being tested +/// this can be used just as a default value any time +pub(crate) fn default_test_validator_set() -> ExternalValidatorSet { + ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) } +} +pub(crate) fn random_validator_set(rng: &mut R) -> ExternalValidatorSet { + let network = match rng.gen_range(0u8 ..= 2) { + 0 => ExternalNetworkId::Bitcoin, + 1 => ExternalNetworkId::Ethereum, + 2 => ExternalNetworkId::Monero, + _ => unreachable!(), + }; + ExternalValidatorSet { network, session: Session(rng.gen()) } +} diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 3c62e743f..9fc547f34 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -91,7 +91,7 @@ fn spawn_cosigning( let last_cosign_rebroadcast = Instant::now(); loop { // Intake our own cosigns - match Cosigning::::latest_acknowledged_block(&db) { + match Cosigning::::latest_finalized_block(&db) { Ok(latest_acknowledged_block) => { let mut txn = db.txn(); // The cosigns we prior tried to intake yet failed to diff --git a/coordinator/src/tributary.rs b/coordinator/src/tributary.rs index 040e6ff85..25a54344f 100644 --- a/coordinator/src/tributary.rs +++ b/coordinator/src/tributary.rs @@ -103,7 +103,7 @@ impl ContinuallyRan pending_notable_cosign = cosign.notable; // If we (Serai) haven't cosigned this block, break as this is still pending - let latest = match Cosigning::::latest_acknowledged_block(&txn) { + let latest = match Cosigning::::latest_finalized_block(&txn) { Ok(latest) => latest, Err(Faulted) => { log::error!("cosigning faulted"); diff --git a/coordinator/substrate/src/canonical.rs b/coordinator/substrate/src/canonical.rs index 5a24873b0..1d70b7048 100644 --- a/coordinator/substrate/src/canonical.rs +++ b/coordinator/substrate/src/canonical.rs @@ -44,7 +44,7 @@ impl ContinuallyRan for CanonicalEventStream { async move { let next_block = NextBlock::get(&self.db).unwrap_or(0); let latest_finalized_block = - Cosigning::::latest_acknowledged_block(&self.db).map_err(|e| format!("{e:?}"))?; + Cosigning::::latest_finalized_block(&self.db).map_err(|e| format!("{e:?}"))?; // These are all the events which generate canonical messages struct CanonicalEvents { diff --git a/coordinator/substrate/src/ephemeral.rs b/coordinator/substrate/src/ephemeral.rs index b94aca661..32a969f23 100644 --- a/coordinator/substrate/src/ephemeral.rs +++ b/coordinator/substrate/src/ephemeral.rs @@ -54,7 +54,7 @@ impl ContinuallyRan for EphemeralEventStream { async move { let next_block = NextBlock::get(&self.db).unwrap_or(0); let latest_finalized_block = - Cosigning::::latest_acknowledged_block(&self.db).map_err(|e| format!("{e:?}"))?; + Cosigning::::latest_finalized_block(&self.db).map_err(|e| format!("{e:?}"))?; // These are all the events which generate canonical messages struct EphemeralEvents { diff --git a/tests/shim-rpc/src/lib.rs b/tests/shim-rpc/src/lib.rs index c4029a58b..0a06164a7 100644 --- a/tests/shim-rpc/src/lib.rs +++ b/tests/shim-rpc/src/lib.rs @@ -61,7 +61,10 @@ impl SeraiShimRpc { /// Returns the hash of the newly created block. pub async fn add_block_with_events(&self, events: Vec>) -> BlockHash { let mut state = self.state.write().await; - let number = state.latest_finalized_block_number() + 1; + let Some(latest_block) = state.blocks_by_number.keys().copied().max() else { + return state.make_block(0, events); + }; + let number = latest_block + 1; state.make_block(number, events) } diff --git a/tests/shim-rpc/src/state.rs b/tests/shim-rpc/src/state.rs index 51d19f622..14cb95e51 100644 --- a/tests/shim-rpc/src/state.rs +++ b/tests/shim-rpc/src/state.rs @@ -147,22 +147,16 @@ impl ShimState { /// /// Unlike [`Self::make_block`], this does **not** advance the internal /// `builds_upon` state, so subsequent calls to `make_block` remain valid. - pub fn make_non_linear_block( - &mut self, - number: u64, - events: Vec>, - ) -> BlockHash { + pub fn make_non_linear_block(&mut self, number: u64, events: Vec>) -> BlockHash { let block = Block { header: Header::V1(HeaderV1 { number, - // Use an empty tree — this will NOT match what the task expects + // Use an empty tree: this will NOT match what the task expects builds_upon: IncrementalUnbalancedMerkleTree::new().calculate(BLOCK_BRANCH_TAG), proposer: SeraiAddress([0; 32]), #[expect(clippy::cast_possible_truncation, clippy::as_conversions)] - unix_time_in_millis: SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_millis() as u64, + unix_time_in_millis: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis() + as u64, transactions_commitment: UnbalancedMerkleTree::EMPTY, events_commitment: UnbalancedMerkleTree::EMPTY, consensus_commitment: [0; 32], From d7572c337f3fa864dd0c308216628d8c96d41a9f Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Fri, 20 Mar 2026 16:17:52 -0300 Subject: [PATCH 37/71] feat(coordinator/cosign): full_stack.rs tests --- coordinator/cosign/src/intend.rs | 26 +- coordinator/cosign/src/tests/full_stack.rs | 355 +++++++++++++++++---- coordinator/cosign/src/tests/intend.rs | 6 +- substrate/primitives/src/test_helpers.rs | 9 +- tests/shim-rpc/src/lib.rs | 4 +- tests/shim-rpc/src/rpc.rs | 4 +- 6 files changed, 330 insertions(+), 74 deletions(-) diff --git a/coordinator/cosign/src/intend.rs b/coordinator/cosign/src/intend.rs index 7fe2bc248..e9a1f5c12 100644 --- a/coordinator/cosign/src/intend.rs +++ b/coordinator/cosign/src/intend.rs @@ -89,16 +89,16 @@ impl ContinuallyRan for CosignIntendTask { // Ephemeral RPC Err: task to re-run and continue trying .map_err(|e| format!("RPC error fetching latest finalized block number: {e}"))?; - serai_env::debug!( - "beginning scan: start={start_scan_block_number}, latest={latest_serai_block_number}" - ); - if latest_serai_block_number < start_scan_block_number { // made_progress = False // Skip block already indexed return Ok(false); } + serai_env::debug!( + "beginning scan: start={start_scan_block_number}, latest={latest_serai_block_number}" + ); + let mut made_progress = false; for block_number in start_scan_block_number ..= latest_serai_block_number { @@ -294,8 +294,19 @@ impl ContinuallyRan for CosignIntendTask { }; serai_env::debug!( - "Notable block block_number={block_number}: new session created {next_global_session_info:?}" - ); + "Notable block block_number={block_number}: new session created \ + start_block_number={start_block}, sets={sets:?}, keys={{ {keys_hex} }}, \ + stakes={stakes:?}, total_stake={total_stake}", + start_block = next_global_session_info.start_block_number, + sets = next_global_session_info.sets, + keys_hex = next_global_session_info + .keys + .iter() + .map(|(net, key)| format!("{net:?}: 0x{}", hex::encode(key.0))) + .collect::>() + .join(", "), + stakes = next_global_session_info.stakes, + ); GlobalSessions::set(&mut txn, new_global_session, &next_global_session_info); if let Some(ending_global_session) = global_session_for_this_block { @@ -309,6 +320,9 @@ impl ContinuallyRan for CosignIntendTask { // we flag it as not having any events requiring cosigning so we don't attempt to // sign/require a cosign for it if global_session_for_this_block.is_none() { + serai_env::debug!( + "no previous global session available to cosign, has_events = HasEvents::No" + ); has_events = HasEvents::No; } diff --git a/coordinator/cosign/src/tests/full_stack.rs b/coordinator/cosign/src/tests/full_stack.rs index 1dd06c9fc..d346a3be4 100644 --- a/coordinator/cosign/src/tests/full_stack.rs +++ b/coordinator/cosign/src/tests/full_stack.rs @@ -1,74 +1,54 @@ -use std::{collections::HashSet, time::Duration}; +//! Full-stack integration tests for the cosign library's public API. +//! +//! While the individual components (intend, evaluate, delay) are unit-tested in their +//! respective modules, these tests verify how they integrate together as a production +//! pipeline. State is injected via the Serai node shim (`serai_shim_rpc`), exercising +//! the same `pub` API surface a real coordinator would use. -use serai_db::{Db as _, DbTxn, MemDb}; +use std::time::Duration; -use serai_cosign_types::tests::sign_cosign; +use rand::{Rng, seq::SliceRandom}; +use rand_core::OsRng; +use serai_db::{Db as _, DbTxn, MemDb}; +use serai_primitives::test_helpers::random_block_hash; use serai_client_serai::abi::primitives::{ network_id::ExternalNetworkId, validator_sets::{ExternalValidatorSet, Session}, }; +use serai_cosign_types::tests::sign_cosign; use crate::{ - CosignIntent, Cosigning, GlobalSessions, IntakeCosignError, + Cosign, CosignIntent, Cosigning, Faulted, FaultedSession, GlobalSessions, IntakeCosignError, + SubstrateBlockHash, + delay::LatestCosignedBlockNumber, + evaluator::{cosign_threshold, currently_evaluated_global_session}, tests::{TestRequest, setup_shim_serai}, }; -#[tokio::test] -async fn full_stack_fuzzed() { - use super::intend::EventFuzzer; - - let _ = env_logger::try_init(); - - let num_blocks = 20; - let mut fuzzer = EventFuzzer::new(); - let blocks = fuzzer.generate_blocks(num_blocks); - - serai_log::log::info!( - "Full-stack fuzz: {} blocks, {} validators, seed={}", - num_blocks, - fuzzer.validators.len(), - hex::encode(fuzzer.seed), - ); - - let (shim, serai) = setup_shim_serai().await; - for (i, events) in blocks.into_iter().enumerate() { - shim.make_block(u64::try_from(i).unwrap(), events).await; - } - - let mut db = MemDb::new(); +use super::intend::EventFuzzer; - let (request, _calls) = TestRequest::new(false); - let mut cosigning = Cosigning::spawn(db.clone(), serai, request, vec![]); - - let target = u64::try_from(num_blocks - 1).unwrap(); - - // Buffer for intents whose cosigns were rejected as FutureGlobalSession. - // These are retried each iteration until the delay task catches up. +/// Run the honest cosigning loop: drain intents from all keyed sessions, sign them +/// with the EventFuzzer's keypairs, intake them, and repeat until `should_break` returns `true`. +/// +/// `should_break` is called each iteration with the current `latest_cosigned_block_number`. +async fn run_honest_cosigning( + db: &MemDb, + cosigning: &mut Cosigning, + event_fuzzer: &EventFuzzer, + mut should_break: impl FnMut(Option) -> bool, +) { let mut pending_intents: Vec<(ExternalNetworkId, CosignIntent)> = Vec::new(); - let mut seen_global_sessions: HashSet<[u8; 32]> = HashSet::new(); - - let deadline = tokio::time::Instant::now() + Duration::from_secs(300); - loop { - assert!( - tokio::time::Instant::now() < deadline, - "timed out waiting for all blocks to be cosigned (target={target}, \ - latest={:?}, pending_intents={})", - Cosigning::::latest_acknowledged_block(&db), - pending_intents.len(), - ); - - // Drain new intended cosigns for all validator sets that have had SetKeys { + let mut db = db.clone(); let mut txn = db.txn(); for network in ExternalNetworkId::all() { - let max_session = fuzzer.next_session.get(&network).copied().unwrap_or(0); + let max_session = event_fuzzer.next_session.get(&network).copied().unwrap_or(0); for session_num in 0 .. max_session { let set = ExternalValidatorSet { network, session: Session(session_num) }; let intents = Cosigning::::intended_cosigns(&mut txn, set); for intent in intents { - seen_global_sessions.insert(intent.global_session); pending_intents.push((network, intent)); } } @@ -76,16 +56,15 @@ async fn full_stack_fuzzed() { txn.commit(); } - // Try to intake all pending intents, keeping those that fail with temporal errors let mut still_pending = Vec::new(); for (network, intent) in pending_intents.drain(..) { let cosign = intent.into_cosign(network); - let Some(gs) = GlobalSessions::get(&db, intent.global_session) else { + let Some(gs) = GlobalSessions::get(db, intent.global_session) else { still_pending.push((network, intent)); continue; }; let Some(public) = gs.keys.get(&network) else { continue }; - let Some(keypair) = fuzzer.keypairs.get(&public.0) else { continue }; + let Some(keypair) = event_fuzzer.keypairs.get(&public.0) else { continue }; let signed = sign_cosign(cosign, keypair); match cosigning.intake_cosign(&signed) { Ok(()) => {} @@ -94,10 +73,9 @@ async fn full_stack_fuzzed() { Err(IntakeCosignError::NotYetIndexedBlock) => { still_pending.push((network, intent)); } - // StaleCosign means a newer cosign already exists; safe to drop Err(IntakeCosignError::StaleCosign) => {} Err(ref e) => { - serai_log::log::warn!( + serai_env::log::warn!( "intake_cosign dropped: block={}, network={:?}, err={:?}", intent.block_number, network, @@ -108,18 +86,273 @@ async fn full_stack_fuzzed() { } pending_intents = still_pending; - match Cosigning::::latest_acknowledged_block(&db) { - Ok(n) if n >= target => break, - _ => {} + let latest = match Cosigning::::latest_cosigned_block_number(db) { + Ok(Some(n)) => Some(n), + _ => None, + }; + if should_break(latest) { + break; } tokio::time::sleep(Duration::from_millis(50)).await; } +} + +/// Full-stack fuzz test: intend -> evaluator -> delay pipeline with random events. +/// +/// Uses the `EventFuzzer` to generate random blocks, spawns the full `Cosigning` pipeline, +/// then simulates the cosigner role by draining intended cosigns, signing them, and feeding +/// them back via `intake_cosign`. Waits for all blocks to be cosigned. +/// +/// The shim RPC has a random failure rate enabled so that RPC calls from the intend task +/// occasionally fail, exercising the `ContinuallyRan` error/retry paths. +#[tokio::test] +async fn full_stack_fuzzed() { + serai_env::init_logger(); - let latest = Cosigning::::latest_acknowledged_block(&db).unwrap(); - assert!(latest >= target, "expected latest cosigned block >= {target}, got {latest}"); + let iterations = 5; + for i in 1 .. iterations + 1 { + let num_blocks = OsRng.gen_range(5 .. 20); + let mut event_fuzzer = EventFuzzer::new(); + let blocks = event_fuzzer.generate_blocks(num_blocks); - serai_log::log::info!("Full-stack fuzz completed: all {num_blocks} blocks cosigned"); + serai_env::log::info!( + "Starting full-stack fuzz: 0..{} blocks, {} validators ({i}/{iterations})", + num_blocks - 1, + event_fuzzer.validators.len(), + ); - let session_ids: Vec<[u8; 32]> = seen_global_sessions.into_iter().collect(); + let (shim, serai) = setup_shim_serai().await; + for (i, events) in blocks.into_iter().enumerate() { + shim.make_block(u64::try_from(i).unwrap(), events).await; + } + + // Random RPC failure rate between 5% and 30%, unless disabled via env var + shim.set_failure_rate(OsRng.gen_range(5 ..= 30)).await; + + let db = MemDb::new(); + + let (request, _calls) = TestRequest::new(false); + let mut cosigning = Cosigning::spawn(db.clone(), serai, request, vec![]); + + let target = u64::try_from(num_blocks - 1).unwrap(); + + run_honest_cosigning( + &db, + &mut cosigning, + &event_fuzzer, + |latest| matches!(latest, Some(n) if n >= target), + ) + .await; + + let latest = Cosigning::::latest_cosigned_block_number(&db).unwrap().unwrap(); + assert!(latest >= target, "expected latest cosigned block >= {target}, got {latest}"); + + serai_env::log::info!("Full-stack fuzz completed: all {num_blocks} blocks cosigned"); + } +} + +/// Fuzzed full-stack equivocation test. +/// +/// Mirrors `full_stack_fuzzed`, random events via `EventFuzzer`, full `Cosigning` pipeline +/// but at a random point during honest cosigning, one or more networks equivocate by signing +/// a block with a different hash. Once the faulty stake reaches the 17% threshold the protocol +/// must halt immediately, and all subsequent operations must reflect the fault. +#[tokio::test] +async fn equivocation_halts_protocol() { + serai_env::init_logger(); + + let iterations = 5; + for iteration in 1 ..= iterations { + let num_blocks = OsRng.gen_range(5 .. 20); + let mut event_fuzzer = EventFuzzer::new(); + let blocks = event_fuzzer.generate_blocks(num_blocks); + + serai_env::log::info!( + "equivocation fuzz: 0..{} blocks, {} validators ({iteration}/{iterations})", + num_blocks - 1, + event_fuzzer.validators.len(), + ); + + let (shim, serai) = setup_shim_serai().await; + for (i, events) in blocks.into_iter().enumerate() { + shim.make_block(u64::try_from(i).unwrap(), events).await; + } + + let mut db = MemDb::new(); + let (request, _calls) = TestRequest::new(false); + let mut cosigning = Cosigning::spawn(db.clone(), serai, request, vec![]); + + let target = u64::try_from(num_blocks - 1).unwrap(); + + // Pick a random target for when to attempt equivocation: after cosigning block N. + // We pick from the lower half so there's room for honest progress first. + let equivocation_after_block: u64 = OsRng.gen_range(2 ..= target / 2); + + let mut reached_equivocation_point = false; + let deadline = tokio::time::Instant::now() + Duration::from_secs(300); + + // Step 1: run the honest pipeline until we've cosigned enough blocks to equivocate + // We need at least one global session to exist and at least one block cosigned under it. + run_honest_cosigning(&db, &mut cosigning, &event_fuzzer, |latest| { + assert!( + tokio::time::Instant::now() < deadline, + "timed out waiting to reach equivocation point (target cosigned block \ + {equivocation_after_block}, latest={latest:?})", + ); + match latest { + Some(n) if n >= equivocation_after_block => { + reached_equivocation_point = true; + true + } + Some(n) if n >= target => true, + _ => false, + } + }) + .await; + + if !reached_equivocation_point { + serai_env::log::info!( + "equivocation fuzz ({iteration}/{iterations}): no global session formed, skipping" + ); + continue; + } + + assert!(FaultedSession::get(&db).is_none(), "should not be faulted before equivocation"); + + // Step 2: inject equivocation + + let equivocation_block = equivocation_after_block; + let indexed_hash = SubstrateBlockHash::get(&db, equivocation_block) + .expect("equivocation block should be indexed"); + + // Find the global session that covers this block via the evaluator's current session + let Some(global_session_id) = currently_evaluated_global_session(&db) else { + serai_env::log::info!( + "equivocation fuzz ({iteration}/{iterations}): no evaluated global session, skipping" + ); + continue; + }; + let global_session = + GlobalSessions::get(&db, global_session_id).expect("evaluated session should exist in DB"); + if equivocation_block < global_session.start_block_number { + serai_env::log::info!( + "equivocation fuzz ({iteration}/{iterations}): equivocation block \ + {equivocation_block} predates session start {}, skipping", + global_session.start_block_number, + ); + continue; + } + + // Pick which networks equivocate: 1 to all networks in this session + let session_networks: Vec = global_session.keys.keys().copied().collect(); + let num_faulty = OsRng.gen_range(1 ..= session_networks.len()); + let faulty_networks: Vec = + session_networks.choose_multiple(&mut OsRng, num_faulty).copied().collect(); + + let fault_threshold = (global_session.total_stake * 17) / 100; + let faulty_stake: u64 = + faulty_networks.iter().map(|n| global_session.stakes.get(n).copied().unwrap_or(0)).sum(); + + // Generate a hash that differs from the indexed one + let mut faulty_block_hash = random_block_hash(&mut OsRng); + if faulty_block_hash == indexed_hash { + faulty_block_hash = random_block_hash(&mut OsRng); + } + + serai_env::log::info!( + "equivocation fuzz ({iteration}/{iterations}): block={equivocation_block}, \ + faulty={faulty_networks:?}, faulty_stake={faulty_stake}, threshold={fault_threshold}, \ + will_fault={}", + faulty_stake >= fault_threshold, + ); + + // Submit equivocating cosigns one at a time, tracking cumulative fault weight + let mut cumulative_faulty_stake: u64 = 0; + for (fi, &faulty_net) in faulty_networks.iter().enumerate() { + let faulty_cosign = Cosign { + global_session: global_session_id, + block_number: equivocation_block, + block_hash: faulty_block_hash, + cosigner: faulty_net, + }; + let public = + global_session.keys.get(&faulty_net).expect("faulty network not in global session"); + let keypair = + event_fuzzer.keypairs.get(&public.0).expect("missing keypair for faulty network"); + let faulty_signed = sign_cosign(faulty_cosign, keypair); + cosigning.intake_cosign(&faulty_signed).unwrap(); + + let net_stake = global_session.stakes.get(&faulty_net).copied().unwrap_or(0); + cumulative_faulty_stake += net_stake; + let faulted_now = cumulative_faulty_stake >= fault_threshold; + + serai_env::log::info!( + "faulty cosign {}/{num_faulty} from {faulty_net:?} (stake={net_stake}): \ + cumulative={cumulative_faulty_stake}, threshold={fault_threshold}, faulted={faulted_now}", + fi + 1, + ); + + if faulted_now { + assert_eq!( + FaultedSession::get(&db), + Some(global_session_id), + "session should be faulted after {faulty_net:?}: cumulative stake \ + {cumulative_faulty_stake} >= threshold {fault_threshold}" + ); + } else { + assert!( + FaultedSession::get(&db).is_none(), + "session should NOT be faulted after {faulty_net:?}: cumulative stake \ + {cumulative_faulty_stake} < threshold {fault_threshold}" + ); + } + } + + if faulty_stake < fault_threshold { + serai_env::log::info!( + "equivocation fuzz ({iteration}/{iterations}): faulty stake {faulty_stake} below \ + threshold {fault_threshold}, verifying protocol continues" + ); + assert!(FaultedSession::get(&db).is_none()); + continue; + } + + // Step 3: verify the protocol is halted + + assert!( + matches!(Cosigning::::latest_cosigned_block_number(&db), Err(Faulted)), + "latest_cosigned_block_number should return Faulted" + ); + + // Verify cosigns_to_rebroadcast includes the faulty cosign(s) + let rebroadcast = cosigning.cosigns_to_rebroadcast(); + assert!( + rebroadcast.iter().any(|c| c.cosign.block_hash == faulty_block_hash), + "rebroadcast should include the faulty cosign" + ); + + // Verify that the protocol remains permanently faulted: drain any remaining intents + // and confirm latest_cosigned_block_number is still Err(Faulted). + { + let mut txn = db.txn(); + for network in ExternalNetworkId::all() { + let max_session = event_fuzzer.next_session.get(&network).copied().unwrap_or(0); + for session_num in 0 .. max_session { + let set = ExternalValidatorSet { network, session: Session(session_num) }; + let _ = Cosigning::::intended_cosigns(&mut txn, set); + } + } + txn.commit(); + } + + assert!( + matches!(Cosigning::::latest_cosigned_block_number(&db), Err(Faulted)), + "latest_cosigned_block_number should remain Faulted after further operations" + ); + + serai_env::log::info!( + "equivocation fuzz ({iteration}/{iterations}): protocol halted as expected" + ); + } } diff --git a/coordinator/cosign/src/tests/intend.rs b/coordinator/cosign/src/tests/intend.rs index d1452c2a6..33a48657d 100644 --- a/coordinator/cosign/src/tests/intend.rs +++ b/coordinator/cosign/src/tests/intend.rs @@ -14,7 +14,7 @@ use serai_client_serai::{ address::SeraiAddress, balance::{Amount, ExternalBalance}, coin::ExternalCoin, - crypto::{ExternalKey, KeyPair}, + crypto::KeyPair, instructions::{OutInstruction, OutInstructionWithBalance}, network_id::{ExternalNetworkId, NetworkId}, validator_sets::{ExternalValidatorSet, KeyShares, Session, ValidatorSet}, @@ -22,7 +22,7 @@ use serai_client_serai::{ validator_sets, }, }; -use serai_primitives::test_helpers::{random_external_address, random_keypair}; +use serai_primitives::test_helpers::{random_external_address, random_external_key, random_keypair}; use crate::{intend::*, tests::*, *}; @@ -519,7 +519,7 @@ impl EventFuzzer { let (keypair, public) = random_keypair(&mut OsRng); self.keypairs.insert(public.0, keypair); - let external_key = ExternalKey(vec![1u8].try_into().unwrap()); + let external_key = random_external_key(&mut OsRng); let key_pair = KeyPair(public, external_key); Some(Event::ValidatorSets(validator_sets::Event::SetKeys { set, key_pair })) diff --git a/substrate/primitives/src/test_helpers.rs b/substrate/primitives/src/test_helpers.rs index f80e7a5bc..617c0cb85 100644 --- a/substrate/primitives/src/test_helpers.rs +++ b/substrate/primitives/src/test_helpers.rs @@ -2,7 +2,7 @@ use rand_core::{RngCore, CryptoRng}; -use crate::{BlockHash, address::{SeraiAddress, ExternalAddress}, crypto::Public}; +use crate::{BlockHash, address::{SeraiAddress, ExternalAddress}, crypto::{Public, ExternalKey}}; /// Generate a random [`ExternalAddress`]. pub fn random_external_address(rng: &mut R) -> ExternalAddress { @@ -32,6 +32,13 @@ pub fn random_keypair(rng: &mut R) -> (schnorrkel::Keypa (keypair, public) } +/// Generate a random [`ExternalKey`]. +pub fn random_external_key(rng: &mut R) -> ExternalKey { + let mut key = [0; 32]; + rng.fill_bytes(&mut key); + ExternalKey(key.to_vec().try_into().unwrap()) +} + /// Generate a random [`BlockHash`]. pub fn random_block_hash(rng: &mut R) -> BlockHash { let mut hash = [0; 32]; diff --git a/tests/shim-rpc/src/lib.rs b/tests/shim-rpc/src/lib.rs index 0a06164a7..173b3ba3f 100644 --- a/tests/shim-rpc/src/lib.rs +++ b/tests/shim-rpc/src/lib.rs @@ -145,8 +145,10 @@ impl SeraiShimRpc { /// Set the probability (0–100) that any RPC request randomly fails. /// /// 0 disables fuzzing (the default), 100 fails every request. + /// If the `RUST_TEST_NO_RPC_FUZZ` env var is set, the rate is forced to 0. pub async fn set_failure_rate(&self, percent: u8) { - self.state.write().await.errors.failure_rate = percent; + let effective = if std::env::var("RUST_TEST_NO_RPC_FUZZ").is_ok() { 0 } else { percent }; + self.state.write().await.errors.failure_rate = effective; } /// Disable random request failures. diff --git a/tests/shim-rpc/src/rpc.rs b/tests/shim-rpc/src/rpc.rs index 8ad83f2ed..b261a0cab 100644 --- a/tests/shim-rpc/src/rpc.rs +++ b/tests/shim-rpc/src/rpc.rs @@ -42,8 +42,8 @@ impl From for ErrorObjectOwned { /// Resolve a block hash from JSON-RPC params. /// /// Mirrors `substrate/node/src/rpc/utils.rs`: -/// - `{ "block": "hex_hash" }` → lookup by hash -/// - `{ "block": 123 }` → lookup by number +/// - `{ "block": "hex_hash" }` = lookup by hash +/// - `{ "block": 123 }` = lookup by number fn resolve_block_hash( params: &jsonrpsee::types::params::Params, state: &crate::state::ShimState, From 11bf62c0887cef7e7cee559e65ff23e437f4cca8 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Fri, 20 Mar 2026 16:28:44 -0300 Subject: [PATCH 38/71] feat(coordinator/cosign): initial review changes --- coordinator/cosign/Cargo.toml | 1 + coordinator/cosign/src/delay.rs | 32 +++-- coordinator/cosign/src/evaluator.rs | 35 +++-- coordinator/cosign/src/intend.rs | 49 +++---- coordinator/cosign/src/lib.rs | 36 +++-- coordinator/cosign/src/tests/cosigning.rs | 144 +++++++++++++------- coordinator/cosign/src/tests/intend.rs | 59 ++++++-- coordinator/cosign/src/tests/mod.rs | 6 +- coordinator/cosign/types/Cargo.toml | 4 +- coordinator/cosign/types/src/tests/mod.rs | 93 ++++++++----- coordinator/src/main.rs | 9 +- coordinator/src/tributary.rs | 4 +- coordinator/substrate/src/canonical.rs | 9 +- coordinator/substrate/src/ephemeral.rs | 7 +- substrate/abi/src/modules/validator_sets.rs | 2 - tests/shim-rpc/Cargo.toml | 2 +- tests/shim-rpc/LICENSE | 28 ++-- tests/shim-rpc/README.md | 2 - tests/shim-rpc/src/lib.rs | 5 - tests/shim-rpc/tests/integration.rs | 9 -- tests/task/Cargo.toml | 2 +- tests/task/LICENSE | 28 ++-- 22 files changed, 321 insertions(+), 245 deletions(-) diff --git a/coordinator/cosign/Cargo.toml b/coordinator/cosign/Cargo.toml index c7f48bbbb..ba6e03351 100644 --- a/coordinator/cosign/Cargo.toml +++ b/coordinator/cosign/Cargo.toml @@ -37,6 +37,7 @@ serai-cosign-types = { path = "./types" } serai-cosign-types = { path = "./types", features = ["test-helpers"] } serai-shim-rpc = { path = "../../tests/shim-rpc" } serai-test-task = { path = "../../tests/task" } +serai-abi = { path = "../../substrate/abi", default-features = false, features = ["std"] } hex = { version = "0.4", default-features = false } schnorrkel = { version = "0.11", default-features = false, features = ["std"] } diff --git a/coordinator/cosign/src/delay.rs b/coordinator/cosign/src/delay.rs index 4073c74ec..d8aecb688 100644 --- a/coordinator/cosign/src/delay.rs +++ b/coordinator/cosign/src/delay.rs @@ -27,8 +27,9 @@ pub(crate) fn now_timestamp() -> Duration { create_db!( SubstrateCosignDelay { - // The latest block number finalized by the delay task. - // Finalized after a delay if it has events, simply marked as finalized if the block has no events. + // The latest block number marked as cosigned by the delay task. + // Cosigned after a delay if it had events and cosigns, + // simply marked as cosigned if the block had no events and no cosigns. LatestCosignedBlockNumber: () -> u64, } ); @@ -45,7 +46,7 @@ impl ContinuallyRan for CosignDelayTask { async move { let mut made_progress = false; loop { - let latest_finalized = LatestCosignedBlockNumber::get(&self.db).unwrap_or(0); + let latest_cosigned_block_number = LatestCosignedBlockNumber::get(&self.db).unwrap_or(0); let mut txn = self.db.txn(); let Some((block_number, time_evaluated, has_events)) = CosignedBlocks::try_recv(&mut txn) @@ -53,22 +54,26 @@ impl ContinuallyRan for CosignDelayTask { break; }; - serai_env::debug!( - "beginning delay: block_number={block_number}, time_evaluated={time_evaluated}, \ - has_events={has_events}, latest_finalized={latest_finalized}", + serai_env::trace!( + "{block_number}: beginning delay: time_evaluated={time_evaluated}, \ + has_events={has_events}, latest_cosigned={latest_cosigned_block_number}", ); - if block_number <= latest_finalized { - // Already finalized a later block, consume and skip without sleeping. + // Defensive check, not likely to happen but does not allow regressing + if block_number <= latest_cosigned_block_number { + serai_env::warn!("Attempting to delay on an already cosigned block number ({block_number}, latest={latest_cosigned_block_number})"); + // consume and skip without sleeping. txn.commit(); continue; } - // No events means no cosigns to wait for, finalize immediately + // No events means no cosigns to wait for, mark as cosigned immediately if !has_events { LatestCosignedBlockNumber::set(&mut txn, &block_number); txn.commit(); - serai_env::debug!("LatestFinalizedBlock={block_number} (no events, skipped delay)"); + serai_env::trace!( + "{block_number}: LatestCosignedBlockNumber={block_number} (no events, skipped delay)" + ); made_progress = true; continue; } @@ -80,9 +85,8 @@ impl ContinuallyRan for CosignDelayTask { // Drop txn during sleep drop(txn); - if time_valid_timestamp > now_timestamp { - let time_left = time_valid_timestamp - now_timestamp; - serai_env::debug!("beginning sleep: {time_left}s"); + if let Some(time_left) = time_valid_timestamp.checked_sub(now_timestamp) { + serai_env::debug!("{block_number}: sleeping for {time_left}s"); tokio::time::sleep(Duration::from_secs(time_left)).await; } @@ -92,7 +96,7 @@ impl ContinuallyRan for CosignDelayTask { LatestCosignedBlockNumber::set(&mut txn, &block_number); txn.commit(); - serai_env::debug!("LatestFinalizedBlock={block_number}"); + serai_env::trace!("{block_number}: LatestCosignedBlockNumber={block_number}"); made_progress = true; } diff --git a/coordinator/cosign/src/evaluator.rs b/coordinator/cosign/src/evaluator.rs index 8f55be028..3926c198d 100644 --- a/coordinator/cosign/src/evaluator.rs +++ b/coordinator/cosign/src/evaluator.rs @@ -62,7 +62,7 @@ fn currently_evaluated_global_session_strict( }; assert!( existing.1.start_block_number <= block_number, - "candidate's start block number {:#?} exceeds our block number {block_number}", + "candidate's start block number ({:#?}) exceeds our block number ({block_number})", existing.1.start_block_number ); existing @@ -99,7 +99,7 @@ fn should_request_cosigns(last_request_for_cosigns: &mut Instant) -> bool { } //// Calculate the minimum threshold required for cosigning -fn cosign_threshold(total_stake: u64) -> u64 { +pub(crate) fn cosign_threshold(total_stake: u64) -> u64 { ((total_stake * COSIGN_COMMIT_THRESHOLD) / 100) + 1 } @@ -156,13 +156,20 @@ async fn ensure_cosigned( return Ok(()); } + // For HasEvents::Notable request the superseding notable cosigns over the network + // If this session hasn't yet produced notable cosigns, then we presume we'll see + // the desired non-notable cosigns as part of normal operations, without needing to + // explicitly request them + // + // For HasEvents::NonNotable request the necessary cosigns over the network if should_request_cosigns(last_request_for_cosigns) { request .request_notable_cosigns(global_session) .await - .map_err(|e| format!("RPC error fetching notable cosigns: {e:?}"))?; + .map_err(|e| format!("Error fetching notable cosigns: {e:?}"))?; } + // We return an error so the delay before this task is run again increases Err(format!("{label} block (#{block_number}) wasn't yet cosigned. this should resolve shortly")) } @@ -193,26 +200,23 @@ impl ContinuallyRan for CosignEvaluatorT break; }; - serai_env::log::debug!( - "beginning evaluator: block_number={block_number}, has_events={:#?}", - has_events - ); - // If no session is being evaluated yet, check if this block can be processed if currently_evaluated_global_session(&txn).is_none() { match GlobalSessionsChannel::peek(&txn) { // No global session declared yet: this block predates all sessions, skip it // this means only HasEvents:No blocks have been consumed so far None => { - serai_env::log::debug!("No global session declared yet"); + serai_env::trace!( + "{block_number}: No global session declared yet. Ending evaluator." + ); commit_evaluated_block(txn, block_number, false); made_progress = true; continue; } // Session queued but starts after this block, skip it Some(next) if next.1.start_block_number > block_number => { - serai_env::log::debug!( - "session {block_number} is queued for {}", + serai_env::trace!( + "{block_number}: Cannot cosign: GlobalSession is queued for block {}", next.1.start_block_number ); commit_evaluated_block(txn, block_number, false); @@ -224,6 +228,8 @@ impl ContinuallyRan for CosignEvaluatorT } } + serai_env::trace!("{block_number}: beginning evaluator: has_events={:#?}", has_events); + // Fetch the global session information let (global_session, global_session_info) = currently_evaluated_global_session_strict(&mut txn, block_number); @@ -236,7 +242,7 @@ impl ContinuallyRan for CosignEvaluatorT for set in global_session_info.sets { // Check if we have the cosign from this set - if NetworksLatestCosignedBlock::get(&mut txn, global_session, set.network) + if NetworksLatestCosignedBlock::get(&txn, global_session, set.network) .map(|signed_cosign| signed_cosign.cosign.block_number) == Some(block_number) { @@ -312,11 +318,12 @@ impl ContinuallyRan for CosignEvaluatorT // Since we checked we had the necessary cosigns, send it for delay before acknowledgement commit_evaluated_block(txn, block_number, true); - // Roughly ~1 hour, no need for repetitive logging + // INFOs roughly every ~1 hour, no need for repetitive logging on prod, #[cfg(not(test))] if (block_number % 500) == 0 { - serai_env::debug!("marking block #{block_number} as cosigned"); + serai_env::prod_info!("marking block #{block_number} as cosigned"); } + // for tests debug on every block #[cfg(test)] serai_env::debug!("marking block #{block_number} as cosigned"); diff --git a/coordinator/cosign/src/intend.rs b/coordinator/cosign/src/intend.rs index e9a1f5c12..2b0c0a2bc 100644 --- a/coordinator/cosign/src/intend.rs +++ b/coordinator/cosign/src/intend.rs @@ -89,14 +89,8 @@ impl ContinuallyRan for CosignIntendTask { // Ephemeral RPC Err: task to re-run and continue trying .map_err(|e| format!("RPC error fetching latest finalized block number: {e}"))?; - if latest_serai_block_number < start_scan_block_number { - // made_progress = False - // Skip block already indexed - return Ok(false); - } - - serai_env::debug!( - "beginning scan: start={start_scan_block_number}, latest={latest_serai_block_number}" + serai_env::trace!( + "beginning intend scan: start={start_scan_block_number}, latest={latest_serai_block_number}" ); let mut made_progress = false; @@ -148,7 +142,7 @@ impl ContinuallyRan for CosignIntendTask { ); BuildsUpon::set(&mut txn, &builds_upon); - serai_env::debug!("iterating over block_number={block_number}"); + serai_env::trace!("iterating over block_number={block_number}"); let mut has_events = HasEvents::No; let vset_events = serai_block_events.validator_sets(); @@ -183,7 +177,7 @@ impl ContinuallyRan for CosignIntendTask { // this is a critical issue and will not be solved after re-tries, // missing Stakes from previous blocks will remain missing until re-indexed // if encountered halt the process - .expect("unable to deallocate with no prior existing stake"); + .expect("unable to deallocate with no prior indexed Stake"); Stakes::set(&mut txn, network, *validator, &Amount(existing.0 - amount.0)); } @@ -197,9 +191,7 @@ impl ContinuallyRan for CosignIntendTask { let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue }; if validators.is_empty() { - // Maybe ephemeral: event blocks from RPC returned empty set list - // could resolve after retry. or will get forever stuck. - Err(format!("validator set from Event::SetDecided was empty"))?; + panic!("validator set from Event::SetDecided was empty"); } Validators::set( @@ -237,9 +229,10 @@ impl ContinuallyRan for CosignIntendTask { &Set { session: set.session, key: key_pair.0, stake: Amount(stake) }, ); } else { - serai_env::debug!( - "skipped session {:?} with 0 stake from being selected for cosigns", - set.session + serai_env::trace!( + "{block_number}: skipped session {:?} of {:?} with 0 stake from being selected for cosigns", + set.session, + set.network ); } } @@ -253,7 +246,7 @@ impl ContinuallyRan for CosignIntendTask { let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn); - serai_env::debug!("type of has_events={has_events:?}"); + serai_env::trace!("{block_number}: type of has_events={has_events:?}"); // If this is notable, it creates a new global session, which we index into the database // now @@ -293,18 +286,12 @@ impl ContinuallyRan for CosignIntendTask { total_stake, }; - serai_env::debug!( - "Notable block block_number={block_number}: new session created \ - start_block_number={start_block}, sets={sets:?}, keys={{ {keys_hex} }}, \ + serai_env::trace!( + "{block_number}: Notable block block_number={block_number}: new session created \ + start_block_number={start_block}, sets={sets:?}, \ stakes={stakes:?}, total_stake={total_stake}", start_block = next_global_session_info.start_block_number, sets = next_global_session_info.sets, - keys_hex = next_global_session_info - .keys - .iter() - .map(|(net, key)| format!("{net:?}: 0x{}", hex::encode(key.0))) - .collect::>() - .join(", "), stakes = next_global_session_info.stakes, ); @@ -319,9 +306,9 @@ impl ContinuallyRan for CosignIntendTask { // If there isn't anyone available to cosign this block, meaning it'll never be cosigned, // we flag it as not having any events requiring cosigning so we don't attempt to // sign/require a cosign for it - if global_session_for_this_block.is_none() { - serai_env::debug!( - "no previous global session available to cosign, has_events = HasEvents::No" + if (has_events != HasEvents::No) && global_session_for_this_block.is_none() { + serai_env::trace!( + "{block_number}: no previous global session available to cosign, has_events = HasEvents::No" ); has_events = HasEvents::No; } @@ -341,7 +328,7 @@ impl ContinuallyRan for CosignIntendTask { // Tell each set of their expectation to cosign this block for set in ending_global_session_info.sets { serai_env::prod_info!( - "set will cosign {has_events:?} block: set={set:?}, block_number={block_number}" + "{block_number}: set will cosign {has_events:?} block: set={set:?}, block_number={block_number}" ); IntendedCosigns::send( @@ -359,7 +346,7 @@ impl ContinuallyRan for CosignIntendTask { HasEvents::No => {} } - serai_env::debug!( + serai_env::trace!( "finished iterating block_number={block_number}: has_events={has_events:?}" ); diff --git a/coordinator/cosign/src/lib.rs b/coordinator/cosign/src/lib.rs index 79ea2994f..19103a555 100644 --- a/coordinator/cosign/src/lib.rs +++ b/coordinator/cosign/src/lib.rs @@ -4,11 +4,7 @@ #![allow(clippy::std_instead_of_alloc, clippy::std_instead_of_core)] use core::{fmt::Debug, future::Future}; -use std::{ - collections::HashMap, - sync::Arc, - time::{Duration, Instant}, -}; +use std::{collections::HashMap, sync::Arc, time::Instant}; use serai_client_serai::Serai; @@ -38,13 +34,6 @@ use delay::LatestCosignedBlockNumber; /// Test helpers and fixtures. pub mod tests; -#[cfg(not(any(test)))] -/// The interval at which the cosigning loop runs. -pub const COSIGN_LOOP_INTERVAL: Duration = Duration::from_secs(5); -#[cfg(any(test))] -/// The interval at which the cosigning loop runs. -pub const COSIGN_LOOP_INTERVAL: Duration = Duration::from_millis(10); - /// A 'global session', defined as all validator sets used for cosigning at a given moment. /// /// We evaluate cosign faults within a global session. This ensures even if cosigners cosign @@ -130,7 +119,7 @@ create_db! { } /// An object usable to request notable cosigns for a block. -pub trait RequestNotableCosigns: 'static + Send { +pub trait RequestNotableCosigns: 'static + Send + Sync { /// The error type which may be encountered when requesting notable cosigns. type Error: Debug; @@ -185,7 +174,6 @@ impl IntakeCosignError { /// The interface to manage cosigning with. pub struct Cosigning { db: D, - _task_handles: Vec, } impl Cosigning { #[cfg(test)] @@ -193,20 +181,24 @@ impl Cosigning { /// /// This does not spawn any background tasks; use `Cosigning::spawn` for the full service. pub fn new(db: D) -> Self { - Self { db, _task_handles: vec![] } + Self { db } } /// Spawn the tasks to intend and evaluate cosigns. /// /// The database specified must only be used with a singular instance of the Serai network, and /// only used once at any given time. - pub fn spawn( + pub fn spawn( db: D, serai: Arc, request: R, tasks_to_run_upon_finalizing_blocks: Vec, ) -> Self { let (intend_task, intend_task_handle) = Task::new(); + // Forget the intend task handle, as dropping the handle would stop the task + // keeps all cosign tasks running in the background + core::mem::forget(intend_task_handle); + let (evaluator_task, evaluator_task_handle) = Task::new(); let (delay_task, delay_task_handle) = Task::new(); tokio::spawn( @@ -225,16 +217,17 @@ impl Cosigning { (delay::CosignDelayTask { db: db.clone() }) .continually_run(delay_task, tasks_to_run_upon_finalizing_blocks), ); - Self { db, _task_handles: vec![intend_task_handle, evaluator_task_handle, delay_task_handle] } + + Self { db } } /// The latest acknowledged block number. - pub fn latest_finalized_block(getter: &impl Get) -> Result { + pub fn latest_cosigned_block_number(getter: &impl Get) -> Result, Faulted> { if FaultedSession::get(getter).is_some() { Err(Faulted)?; } - Ok(LatestCosignedBlockNumber::get(getter).unwrap_or(0)) + Ok(LatestCosignedBlockNumber::get(getter)) } /// Fetch a cosigned Substrate block's hash by its block number. @@ -242,7 +235,10 @@ impl Cosigning { getter: &impl Get, block_number: u64, ) -> Result, Faulted> { - if block_number == 0 || block_number > Self::latest_finalized_block(getter)? { + let Some(latest) = Self::latest_cosigned_block_number(getter)? else { + return Ok(None); + }; + if block_number > latest { return Ok(None); } diff --git a/coordinator/cosign/src/tests/cosigning.rs b/coordinator/cosign/src/tests/cosigning.rs index b6dfcdad4..3c78303d5 100644 --- a/coordinator/cosign/src/tests/cosigning.rs +++ b/coordinator/cosign/src/tests/cosigning.rs @@ -19,7 +19,6 @@ use serai_cosign_types::tests::sign_cosign; use serai_task::{Task, ContinuallyRan}; use serai_client_serai::abi::primitives::{ - BlockHash, crypto::Public, network_id::ExternalNetworkId, validator_sets::{ExternalValidatorSet, Session}, @@ -180,9 +179,9 @@ async fn spawn_end_to_end() { // Just started: results are empty { assert!(cosigning.cosigns_to_rebroadcast().is_empty()); - let latest = Cosigning::::latest_finalized_block(&db); + let latest = Cosigning::::latest_cosigned_block_number(&db); assert!(latest.is_ok()); - assert_eq!(latest.unwrap(), 0); + assert_eq!(latest.unwrap(), None); } // Run block production and pipeline polling concurrently @@ -198,8 +197,8 @@ async fn spawn_end_to_end() { // Poll until the pipeline has processed all blocks async { loop { - let latest = Cosigning::::latest_finalized_block(&db); - if latest.map(|n| n >= total_blocks).unwrap_or(false) { + let latest = Cosigning::::latest_cosigned_block_number(&db); + if latest.ok().flatten().is_some_and(|n| n >= total_blocks) { break; } tokio::time::sleep(Duration::from_millis(100)).await; @@ -207,8 +206,8 @@ async fn spawn_end_to_end() { } ); - let latest = Cosigning::::latest_finalized_block(&db).unwrap(); - assert!(latest == total_blocks); + let latest = Cosigning::::latest_cosigned_block_number(&db).unwrap(); + assert!(latest == Some(total_blocks)); // Verify the dependent task was triggered by the pipeline assert!(triggered.load(Ordering::SeqCst)); @@ -221,7 +220,7 @@ fn latest_finalized_block() { // Defaults to zero { let db = MemDb::new(); - assert_eq!(Cosigning::::latest_finalized_block(&db).unwrap(), 0); + assert_eq!(Cosigning::::latest_cosigned_block_number(&db).unwrap(), None); } // Errors when faulted session exists @@ -230,7 +229,7 @@ fn latest_finalized_block() { let mut txn = db.txn(); FaultedSession::set(&mut txn, &random_global_session(&mut OsRng)); txn.commit(); - assert!(matches!(Cosigning::::latest_finalized_block(&db), Err(Faulted))); + assert!(matches!(Cosigning::::latest_cosigned_block_number(&db), Err(Faulted))); } // Returns stored value @@ -240,7 +239,10 @@ fn latest_finalized_block() { let latest_finalized_block = OsRng.next_u64(); LatestCosignedBlockNumber::set(&mut txn, &latest_finalized_block); txn.commit(); - assert_eq!(Cosigning::::latest_finalized_block(&db).unwrap(), latest_finalized_block); + assert_eq!( + Cosigning::::latest_cosigned_block_number(&db).unwrap(), + Some(latest_finalized_block) + ); } } @@ -320,9 +322,18 @@ fn notable_cosigns() { let notable = Cosigning::::notable_cosigns(&db, id); assert_eq!(notable.len(), 1); - assert_eq!(notable[0].cosign.block_number, block_number); - assert_eq!(notable[0].cosign.block_hash, block_hash); - assert_eq!(notable[0].cosign.cosigner, network); + + let SignedCosign { cosign, .. } = ¬able[0]; + let Cosign { + global_session, + block_number: cosign_block_number, + block_hash: cosign_block_hash, + cosigner, + } = cosign; + assert_eq!(global_session, &id); + assert_eq!(cosign_block_number, &block_number); + assert_eq!(cosign_block_hash, &block_hash); + assert_eq!(cosigner, &network); } } @@ -464,7 +475,7 @@ mod intake_cosign { global_session: random_global_session(&mut OsRng), block_number: OsRng.next_u64(), block_hash: random_block_hash(&mut OsRng), - cosigner: ExternalNetworkId::Bitcoin, + cosigner: random_validator_set(&mut OsRng).network, }; let signed = sign_cosign(cosign, &keypair); @@ -485,24 +496,33 @@ mod intake_cosign { let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); + let base_block = OsRng.next_u64() / 2; let block_hash_1 = random_block_hash(&mut OsRng); let block_hash_2 = random_block_hash(&mut OsRng); { let mut txn = db.txn(); - SubstrateBlockHash::set(&mut txn, 1, &block_hash_1); - SubstrateBlockHash::set(&mut txn, 2, &block_hash_2); + SubstrateBlockHash::set(&mut txn, base_block, &block_hash_1); + SubstrateBlockHash::set(&mut txn, base_block + 1, &block_hash_2); txn.commit(); } - let first_cosign = - Cosign { global_session: id, block_number: 2, block_hash: block_hash_2, cosigner: network }; + let first_cosign = Cosign { + global_session: id, + block_number: base_block + 1, + block_hash: block_hash_2, + cosigner: network, + }; let first_signed = sign_cosign(first_cosign, &keypair); let mut cosigning = Cosigning::new(db.clone()); cosigning.intake_cosign(&first_signed).unwrap(); - let stale_cosign = - Cosign { global_session: id, block_number: 1, block_hash: block_hash_1, cosigner: network }; + let stale_cosign = Cosign { + global_session: id, + block_number: base_block, + block_hash: block_hash_1, + cosigner: network, + }; let stale_signed = sign_cosign(stale_cosign, &keypair); assert!(matches!( @@ -529,7 +549,7 @@ mod intake_cosign { global_session: random_global_session(&mut OsRng), block_number, block_hash, - cosigner: ExternalNetworkId::Bitcoin, + cosigner: random_validator_set(&mut OsRng).network, }; let signed = sign_cosign(cosign, &keypair); @@ -545,7 +565,7 @@ mod intake_cosign { serai_env::init_logger(); let (mut session, keypair) = random_test_session(); let network = session.sets[0].network; - session.start_block_number = 10; + session.start_block_number = OsRng.next_u64(); let id = session.id(); let block_hash = random_block_hash(&mut OsRng); @@ -554,12 +574,17 @@ mod intake_cosign { let mut txn = db.txn(); GlobalSessions::set(&mut txn, id, &session.to_global()); CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, session.to_global())); - LatestCosignedBlockNumber::set(&mut txn, &10u64); - SubstrateBlockHash::set(&mut txn, 5, &block_hash); + LatestCosignedBlockNumber::set(&mut txn, &session.start_block_number); + SubstrateBlockHash::set(&mut txn, session.start_block_number - 1, &block_hash); txn.commit(); } - let cosign = Cosign { global_session: id, block_number: 5, block_hash, cosigner: network }; + let cosign = Cosign { + global_session: id, + block_number: session.start_block_number - 1, + block_hash, + cosigner: network, + }; let signed = sign_cosign(cosign, &keypair); let mut cosigning = Cosigning::new(db); @@ -580,14 +605,15 @@ mod intake_cosign { seed_minimal_state(&mut db, &session); let block_hash = random_block_hash(&mut OsRng); + let block_number = OsRng.next_u64(); { let mut txn = db.txn(); - GlobalSessionsLastBlock::set(&mut txn, id, &5u64); - SubstrateBlockHash::set(&mut txn, 10, &block_hash); + GlobalSessionsLastBlock::set(&mut txn, id, &(block_number - 1)); + SubstrateBlockHash::set(&mut txn, block_number, &block_hash); txn.commit(); } - let cosign = Cosign { global_session: id, block_number: 10, block_hash, cosigner: network }; + let cosign = Cosign { global_session: id, block_number, block_hash, cosigner: network }; let signed = sign_cosign(cosign, &keypair); let mut cosigning = Cosigning::new(db); @@ -628,7 +654,7 @@ mod intake_cosign { serai_env::init_logger(); let (mut session, keypair) = random_test_session(); let network = session.sets[0].network; - session.start_block_number = 10; + session.start_block_number = OsRng.next_u64(); let id = session.id(); let block_hash = random_block_hash(&mut OsRng); @@ -637,12 +663,17 @@ mod intake_cosign { let mut txn = db.txn(); GlobalSessions::set(&mut txn, id, &session.to_global()); CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, session.to_global())); - LatestCosignedBlockNumber::set(&mut txn, &5u64); - SubstrateBlockHash::set(&mut txn, 10, &block_hash); + LatestCosignedBlockNumber::set(&mut txn, &(session.start_block_number - 2)); + SubstrateBlockHash::set(&mut txn, session.start_block_number, &block_hash); txn.commit(); } - let cosign = Cosign { global_session: id, block_number: 10, block_hash, cosigner: network }; + let cosign = Cosign { + global_session: id, + block_number: session.start_block_number, + block_hash, + cosigner: network, + }; let signed = sign_cosign(cosign, &keypair); let mut cosigning = Cosigning::new(db); @@ -755,30 +786,40 @@ mod intake_cosign { let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); + let block_number1 = OsRng.next_u64(); let block_hash_1 = random_block_hash(&mut OsRng); + let block_number2 = block_number1 + 1; let block_hash_2 = random_block_hash(&mut OsRng); { let mut txn = db.txn(); - SubstrateBlockHash::set(&mut txn, 1, &block_hash_1); - SubstrateBlockHash::set(&mut txn, 2, &block_hash_2); + SubstrateBlockHash::set(&mut txn, block_number1, &block_hash_1); + SubstrateBlockHash::set(&mut txn, block_number2, &block_hash_2); txn.commit(); } - let first_cosign = - Cosign { global_session: id, block_number: 1, block_hash: block_hash_1, cosigner: network }; + let first_cosign = Cosign { + global_session: id, + block_number: block_number1, + block_hash: block_hash_1, + cosigner: network, + }; let first_signed = sign_cosign(first_cosign, &keypair); let mut cosigning = Cosigning::new(db.clone()); cosigning.intake_cosign(&first_signed).unwrap(); - let newer_cosign = - Cosign { global_session: id, block_number: 2, block_hash: block_hash_2, cosigner: network }; + let newer_cosign = Cosign { + global_session: id, + block_number: block_number2, + block_hash: block_hash_2, + cosigner: network, + }; let newer_signed = sign_cosign(newer_cosign, &keypair); assert!(cosigning.intake_cosign(&newer_signed).is_ok()); let latest = NetworksLatestCosignedBlock::get(&db, id, network).unwrap(); - assert_eq!(latest.cosign.block_number, 2); + assert_eq!(latest.cosign.block_number, block_number2); } #[test] @@ -791,7 +832,7 @@ mod intake_cosign { let mut db = MemDb::new(); seed_minimal_state(&mut db, &session); - let last_block = 5u64; + let last_block = u64::from(OsRng.next_u32() % 100) + 1; // any from 1 to 100 let mut block_hashes = Vec::new(); { let mut txn = db.txn(); @@ -869,10 +910,11 @@ mod intake_cosign { #[test] fn records_fault_below_threshold() { serai_env::init_logger(); - let network1 = ExternalNetworkId::Bitcoin; - let network2 = ExternalNetworkId::Ethereum; - let set1 = ExternalValidatorSet { network: network1, session: Session(0) }; - let set2 = ExternalValidatorSet { network: network2, session: Session(0) }; + let set1 = random_validator_set(&mut OsRng); + let network1 = set1.network; + // Ensure we pick a distinct second network + let network2 = ExternalNetworkId::all().find(|n| *n != network1).unwrap(); + let set2 = ExternalValidatorSet { network: network2, session: Session(OsRng.next_u32()) }; let (keypair1, public1) = random_keypair(&mut OsRng); let (_, public2) = random_keypair(&mut OsRng); @@ -883,15 +925,21 @@ mod intake_cosign { keys.insert(network1, public1); keys.insert(network2, public2); - stakes.insert(network1, 10); - stakes.insert(network2, 90); + // stake1 must be below the 17% threshold: stake1 < (total_stake * 17) / 100 + let total_stake = OsRng.gen_range(100u64 .. 10_000); + let max_below_threshold = (total_stake * 17) / 100; + let stake1 = OsRng.gen_range(1 .. max_below_threshold.max(2)); + let stake2 = total_stake - stake1; + + stakes.insert(network1, stake1); + stakes.insert(network2, stake2); let session = TestGlobalSession { - start_block_number: 1, + start_block_number: u64::from(set1.session.0) + 1, sets: vec![set1, set2], keys, stakes, - total_stake: 100, + total_stake, }; let id = session.id(); diff --git a/coordinator/cosign/src/tests/intend.rs b/coordinator/cosign/src/tests/intend.rs index 33a48657d..c9af879ff 100644 --- a/coordinator/cosign/src/tests/intend.rs +++ b/coordinator/cosign/src/tests/intend.rs @@ -22,7 +22,9 @@ use serai_client_serai::{ validator_sets, }, }; -use serai_primitives::test_helpers::{random_external_address, random_external_key, random_keypair}; +use serai_primitives::test_helpers::{ + random_external_address, random_external_key, random_keypair, random_serai_address, +}; use crate::{intend::*, tests::*, *}; @@ -46,7 +48,7 @@ fn deallocation_event(validator: SeraiAddress, network: NetworkId, amount: u64) validator, network, amount: Amount(amount), - timeline: validator_sets::DeallocationTimeline::Immediate, + timeline: serai_abi::primitives::validator_sets::DeallocationTimeline::Immediate, }) } @@ -388,19 +390,12 @@ pub(super) struct EventFuzzer { impl EventFuzzer { pub(super) fn new() -> Self { - let mut seed = [0u8; 32]; - OsRng.fill_bytes(&mut seed); - + // OsRng.next_u64() % 17 = 0..16 + // _ + 4 = 4..20 validators per test let num_validators = usize::try_from((OsRng.next_u64() % 17) + 4).unwrap(); - let validators: Vec = (0 .. num_validators) - .map(|i| { - let mut bytes = [0u8; 32]; - bytes[0 .. 8].copy_from_slice(&u64::try_from(i).unwrap().to_le_bytes()); - bytes[8 .. 16].copy_from_slice(&seed[0 .. 8]); - SeraiAddress(bytes) - }) - .collect(); + let validators: Vec = + (0 .. num_validators).map(|_| random_serai_address(&mut OsRng)).collect(); let networks: Vec = NetworkId::all().collect(); @@ -610,6 +605,44 @@ impl EventFuzzer { } } +#[tokio::test] +async fn deallocating_zero_is_a_noop() { + serai_env::init_logger(); + let (serai, task_test) = setup_mock_test().await; + + let validator = random_serai_address(&mut OsRng); + let network = NetworkId::External(ExternalNetworkId::Bitcoin); + + { + // Block 0: allocate 0 stake to the validator + serai.make_block(0, vec![vec![allocation_event(validator, network, 0)]]).await; + // Block 1: deallocate 0 from the same validator + serai.make_block(1, vec![vec![deallocation_event(validator, network, 0)]]).await; + + let mut task = task_test.into_task(); + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + + // Verify it works and the stake is unchanged after the 0-deallocation + let stake = Stakes::get(&task_test.db, ExternalNetworkId::Bitcoin, validator); + assert_eq!(stake, Some(Amount(0)), "stake should be unchanged after deallocating 0"); + } + + { + let amount = OsRng.next_u64(); + // Block 2: allocate stake to the validator + serai.make_block(2, vec![vec![allocation_event(validator, network, amount)]]).await; + // Block 3: deallocate 0 from the same validator + serai.make_block(3, vec![vec![deallocation_event(validator, network, 0)]]).await; + + let mut task = task_test.into_task(); + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + + // Verify the 0-deallocation works and the stake is unchanged after it + let stake = Stakes::get(&task_test.db, ExternalNetworkId::Bitcoin, validator); + assert_eq!(stake, Some(Amount(amount)), "stake should be unchanged after deallocating 0"); + } +} + #[tokio::test] async fn fuzzed_event_processing() { serai_env::init_logger(); diff --git a/coordinator/cosign/src/tests/mod.rs b/coordinator/cosign/src/tests/mod.rs index 8cff8a7e4..b11ffe664 100644 --- a/coordinator/cosign/src/tests/mod.rs +++ b/coordinator/cosign/src/tests/mod.rs @@ -75,11 +75,7 @@ async fn setup_shim_serai() -> (SeraiShimRpc, Arc) { (shim_serai, serai) } -pub(crate) fn random_global_session(rng: &mut R) -> [u8; 32] { - let mut id = [0u8; 32]; - rng.fill_bytes(&mut id); - id -} +pub(crate) use serai_cosign_types::tests::random_global_session; /// For whe external validator set does not alter or affect the behavior of the functions being tested /// this can be used just as a default value any time diff --git a/coordinator/cosign/types/Cargo.toml b/coordinator/cosign/types/Cargo.toml index 30d3a5925..d52fd0082 100644 --- a/coordinator/cosign/types/Cargo.toml +++ b/coordinator/cosign/types/Cargo.toml @@ -18,13 +18,15 @@ rustdoc-args = ["--cfg", "docsrs"] workspace = true [features] -test-helpers = ["serai-primitives/test-helpers"] +test-helpers = ["rand_core", "serai-primitives/test-helpers"] [dependencies] schnorrkel = { version = "0.11", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } +rand_core = { version = "0.6", default-features = false, features = ["std"], optional = true } + serai-primitives = { path = "../../../substrate/primitives", default-features = false, features = ["std"] } [dev-dependencies] diff --git a/coordinator/cosign/types/src/tests/mod.rs b/coordinator/cosign/types/src/tests/mod.rs index 6125f3e7c..befeb9ca9 100644 --- a/coordinator/cosign/types/src/tests/mod.rs +++ b/coordinator/cosign/types/src/tests/mod.rs @@ -9,35 +9,58 @@ pub fn sign_cosign(cosign: Cosign, keypair: &schnorrkel::Keypair) -> SignedCosig } #[cfg(test)] -use rand_core::OsRng; +use rand_core::{OsRng, RngCore}; #[cfg(test)] -use serai_primitives::test_helpers::random_keypair; +use serai_primitives::test_helpers::{random_block_hash, random_keypair}; #[cfg(test)] -use crate::{BlockHash, CosignIntent, ExternalNetworkId, Public}; +use crate::{CosignIntent, ExternalNetworkId, Public}; + +/// Generate a random global session ID for testing. +#[cfg(any(test, feature = "test-helpers"))] +pub fn random_global_session( + rng: &mut (impl rand_core::RngCore + rand_core::CryptoRng), +) -> [u8; 32] { + let mut id = [0u8; 32]; + rng.fill_bytes(&mut id); + id +} + +#[cfg(test)] +fn random_external_network_id( + rng: &mut (impl RngCore + rand_core::CryptoRng), +) -> ExternalNetworkId { + let all: Vec<_> = ExternalNetworkId::all().collect(); + all[(rng.next_u32() as usize) % all.len()] +} #[test] fn cosign_intent_into_cosign() { - let intent = CosignIntent { - global_session: [1u8; 32], - block_number: 5, - block_hash: BlockHash([5u8; 32]), - notable: true, - }; - - let cosign = intent.into_cosign(ExternalNetworkId::Bitcoin); - - assert_eq!(cosign.global_session, [1u8; 32]); - assert_eq!(cosign.block_number, 5); - assert_eq!(cosign.block_hash, BlockHash([5u8; 32])); - assert_eq!(cosign.cosigner, ExternalNetworkId::Bitcoin); + let global_session = random_global_session(&mut OsRng); + let block_number = OsRng.next_u64(); + let block_hash = random_block_hash(&mut OsRng); + let notable = OsRng.next_u32() % 2 == 0; + let network = random_external_network_id(&mut OsRng); + + let intent = CosignIntent { global_session, block_number, block_hash, notable }; + let Cosign { + global_session: cosign_global_session, + block_number: cosign_block_number, + block_hash: cosign_block_hash, + cosigner: cosign_cosigner, + } = intent.into_cosign(network); + + assert_eq!(cosign_global_session, global_session); + assert_eq!(cosign_block_number, block_number); + assert_eq!(cosign_block_hash, block_hash); + assert_eq!(cosign_cosigner, network); } #[test] fn deterministic_signature_message() { let cosign = Cosign { - global_session: [1u8; 32], - block_number: 5, - block_hash: BlockHash([5u8; 32]), + global_session: random_global_session(&mut OsRng), + block_number: OsRng.next_u64(), + block_hash: random_block_hash(&mut OsRng), cosigner: ExternalNetworkId::Bitcoin, }; @@ -51,10 +74,10 @@ fn deterministic_signature_message() { fn signed_cosign_verify_signature_valid() { let (keypair, public) = random_keypair(&mut OsRng); let cosign = Cosign { - global_session: [1u8; 32], - block_number: 5, - block_hash: BlockHash([5u8; 32]), - cosigner: ExternalNetworkId::Bitcoin, + global_session: random_global_session(&mut OsRng), + block_number: OsRng.next_u64(), + block_hash: random_block_hash(&mut OsRng), + cosigner: random_external_network_id(&mut OsRng), }; let signed = sign_cosign(cosign, &keypair); @@ -68,10 +91,10 @@ fn signed_cosign_verify_signature_invalid() { let (_, wrong_public) = random_keypair(&mut OsRng); let cosign = Cosign { - global_session: [1u8; 32], - block_number: 5, - block_hash: BlockHash([5u8; 32]), - cosigner: ExternalNetworkId::Bitcoin, + global_session: random_global_session(&mut OsRng), + block_number: OsRng.next_u64(), + block_hash: random_block_hash(&mut OsRng), + cosigner: random_external_network_id(&mut OsRng), }; let signed = sign_cosign(cosign, &keypair1); @@ -83,10 +106,10 @@ fn signed_cosign_verify_signature_invalid() { fn signed_cosign_verify_signature_invalid_public_key_bytes() { let (keypair, _) = random_keypair(&mut OsRng); let cosign = Cosign { - global_session: [1u8; 32], - block_number: 5, - block_hash: BlockHash([5u8; 32]), - cosigner: ExternalNetworkId::Bitcoin, + global_session: random_global_session(&mut OsRng), + block_number: OsRng.next_u64(), + block_hash: random_block_hash(&mut OsRng), + cosigner: random_external_network_id(&mut OsRng), }; let signed = sign_cosign(cosign, &keypair); @@ -104,10 +127,10 @@ fn signed_cosign_verify_signature_invalid_public_key_bytes() { #[test] fn signed_cosign_verify_signature_invalid_signature_bytes() { let cosign = Cosign { - global_session: [1u8; 32], - block_number: 5, - block_hash: BlockHash([5u8; 32]), - cosigner: ExternalNetworkId::Bitcoin, + global_session: random_global_session(&mut OsRng), + block_number: OsRng.next_u64(), + block_hash: random_block_hash(&mut OsRng), + cosigner: random_external_network_id(&mut OsRng), }; let invalid_sig_bytes = [255u8; 64]; diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 9fc547f34..87376d5b3 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -30,7 +30,7 @@ use message_queue::{Service, client::MessageQueue}; use serai_task::{Task, TaskHandle, ContinuallyRan as _}; -use serai_cosign::{COSIGN_LOOP_INTERVAL, Faulted, SignedCosign, Cosigning}; +use serai_cosign::{Faulted, SignedCosign, Cosigning}; use serai_coordinator_substrate::{ CanonicalEventStream, EphemeralEventStream, SignSlashReport, SetKeysTask, SignedBatches, PublishBatchTask, SlashReports, PublishSlashReportTask, @@ -88,11 +88,13 @@ fn spawn_cosigning( ) { let mut cosigning = Cosigning::spawn(db.clone(), serai, p2p.clone(), tasks_to_run_upon_cosigning); tokio::spawn(async move { + const COSIGN_LOOP_INTERVAL: Duration = Duration::from_secs(5); + let last_cosign_rebroadcast = Instant::now(); loop { // Intake our own cosigns - match Cosigning::::latest_finalized_block(&db) { - Ok(latest_acknowledged_block) => { + match Cosigning::::latest_cosigned_block_number(&db) { + Ok(Some(latest_acknowledged_block)) => { let mut txn = db.txn(); // The cosigns we prior tried to intake yet failed to let mut cosigns = ErroneousCosigns::get(&txn).unwrap_or(vec![]); @@ -124,6 +126,7 @@ fn spawn_cosigning( txn.commit(); } + Ok(None) => {} Err(Faulted) => { // We don't panic here as the following code rebroadcasts our cosigns which is // necessary to inform other coordinators of the faulty cosigns diff --git a/coordinator/src/tributary.rs b/coordinator/src/tributary.rs index 25a54344f..b53ebd42b 100644 --- a/coordinator/src/tributary.rs +++ b/coordinator/src/tributary.rs @@ -103,14 +103,14 @@ impl ContinuallyRan pending_notable_cosign = cosign.notable; // If we (Serai) haven't cosigned this block, break as this is still pending - let latest = match Cosigning::::latest_finalized_block(&txn) { + let latest = match Cosigning::::latest_cosigned_block_number(&txn) { Ok(latest) => latest, Err(Faulted) => { log::error!("cosigning faulted"); Err("cosigning faulted")? } }; - if latest < cosign.block_number { + if latest < Some(cosign.block_number) { break; } diff --git a/coordinator/substrate/src/canonical.rs b/coordinator/substrate/src/canonical.rs index 1d70b7048..9d9e24f48 100644 --- a/coordinator/substrate/src/canonical.rs +++ b/coordinator/substrate/src/canonical.rs @@ -42,9 +42,12 @@ impl ContinuallyRan for CanonicalEventStream { fn run_iteration(&mut self) -> impl Send + Future> { async move { - let next_block = NextBlock::get(&self.db).unwrap_or(0); - let latest_finalized_block = - Cosigning::::latest_finalized_block(&self.db).map_err(|e| format!("{e:?}"))?; + let next_block = NextBlock::get(&self.db).unwrap_or(1); + let Some(latest_finalized_block) = + Cosigning::::latest_cosigned_block_number(&self.db).map_err(|e| format!("{e:?}"))? + else { + return Ok(false); + }; // These are all the events which generate canonical messages struct CanonicalEvents { diff --git a/coordinator/substrate/src/ephemeral.rs b/coordinator/substrate/src/ephemeral.rs index 32a969f23..a7df64483 100644 --- a/coordinator/substrate/src/ephemeral.rs +++ b/coordinator/substrate/src/ephemeral.rs @@ -53,8 +53,11 @@ impl ContinuallyRan for EphemeralEventStream { fn run_iteration(&mut self) -> impl Send + Future> { async move { let next_block = NextBlock::get(&self.db).unwrap_or(0); - let latest_finalized_block = - Cosigning::::latest_finalized_block(&self.db).map_err(|e| format!("{e:?}"))?; + let Some(latest_finalized_block) = + Cosigning::::latest_cosigned_block_number(&self.db).map_err(|e| format!("{e:?}"))? + else { + return Ok(false); + }; // These are all the events which generate canonical messages struct EphemeralEvents { diff --git a/substrate/abi/src/modules/validator_sets.rs b/substrate/abi/src/modules/validator_sets.rs index 028b79ba3..75bb13607 100644 --- a/substrate/abi/src/modules/validator_sets.rs +++ b/substrate/abi/src/modules/validator_sets.rs @@ -12,8 +12,6 @@ use serai_primitives::{ validator_sets::*, }; -pub use serai_primitives::validator_sets::DeallocationTimeline; - /// The address used by the validator sets pallet. pub fn address() -> SeraiAddress { SeraiAddress::system(borsh::to_vec(b"ValidatorSets").unwrap()) diff --git a/tests/shim-rpc/Cargo.toml b/tests/shim-rpc/Cargo.toml index 613cb6971..fe263efe0 100644 --- a/tests/shim-rpc/Cargo.toml +++ b/tests/shim-rpc/Cargo.toml @@ -2,7 +2,7 @@ name = "serai-shim-rpc" version = "0.1.0" description = "A bespoke shim RPC node for testing Serai RPC clients without a real chain" -license = "MIT" +license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/tests/shim-rpc" authors = ["Luke Parker ", "rafael_xmr "] edition = "2021" diff --git a/tests/shim-rpc/LICENSE b/tests/shim-rpc/LICENSE index f995f1e78..2334e883e 100644 --- a/tests/shim-rpc/LICENSE +++ b/tests/shim-rpc/LICENSE @@ -1,21 +1,15 @@ -MIT License +AGPL-3.0-only license -Copyright (c) 2026 Serai +Copyright (c) 2026 Serai Contributors -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/tests/shim-rpc/README.md b/tests/shim-rpc/README.md index a9dd06ff2..6c8a555c4 100644 --- a/tests/shim-rpc/README.md +++ b/tests/shim-rpc/README.md @@ -22,6 +22,4 @@ let serai = Serai::new(shim_serai.url()).unwrap(); let latest = serai.latest_finalized_block_number().await.unwrap(); assert_eq!(latest, 1); - -shim_serai.stop(); ``` diff --git a/tests/shim-rpc/src/lib.rs b/tests/shim-rpc/src/lib.rs index 173b3ba3f..e69aadb71 100644 --- a/tests/shim-rpc/src/lib.rs +++ b/tests/shim-rpc/src/lib.rs @@ -155,9 +155,4 @@ impl SeraiShimRpc { pub async fn clear_failure_rate(&self) { self.state.write().await.errors.failure_rate = 0; } - - /// Stop the shim RPC node server. - pub fn stop(&self) { - self.handle.stop().expect("failed to stop shim RPC node"); - } } diff --git a/tests/shim-rpc/tests/integration.rs b/tests/shim-rpc/tests/integration.rs index ce34888b1..c77c71574 100644 --- a/tests/shim-rpc/tests/integration.rs +++ b/tests/shim-rpc/tests/integration.rs @@ -58,7 +58,6 @@ async fn test_basic_block_and_number() { let none = client.block_by_number(999).await.unwrap(); assert!(none.is_none()); - sim.stop(); } #[tokio::test] @@ -79,7 +78,6 @@ async fn test_block_by_hash() { let finalized = client.finalized(hash).await.unwrap(); assert!(finalized); - sim.stop(); } #[tokio::test] @@ -109,7 +107,6 @@ async fn test_events_round_trip() { // Verify second event is a SetDecided assert!(matches!(vs_events[1], vs_mod::Event::SetDecided { .. })); - sim.stop(); } #[tokio::test] @@ -138,7 +135,6 @@ async fn test_dynamic_block_addition() { let latest = client.latest_finalized_block_number().await.unwrap(); assert_eq!(latest, 2); - sim.stop(); } #[tokio::test] @@ -167,7 +163,6 @@ async fn test_error_injection() { let latest = client.latest_finalized_block_number().await.unwrap(); assert_eq!(latest, 1); - sim.stop(); } #[tokio::test] @@ -191,7 +186,6 @@ async fn test_clear_all_errors() { assert_eq!(client.latest_finalized_block_number().await.unwrap(), 1); assert!(client.block_by_number(1).await.unwrap().is_some()); - sim.stop(); } #[tokio::test] @@ -218,7 +212,6 @@ async fn test_builds_upon_chain() { assert_ne!(block1.header.hash(), block2.header.hash()); assert_ne!(block2.header.hash(), block3.header.hash()); - sim.stop(); } #[tokio::test] @@ -243,7 +236,6 @@ async fn test_publish_transaction() { assert_eq!(state.published_transactions[0], vec![0xDE, 0xAD]); } - sim.stop(); } #[tokio::test] @@ -271,5 +263,4 @@ async fn test_validator_sets_state() { serai_state.current_stake(NetworkId::External(ExternalNetworkId::Bitcoin)).await.unwrap(); assert_eq!(stake, Some(Amount(1_000_000))); - sim.stop(); } diff --git a/tests/task/Cargo.toml b/tests/task/Cargo.toml index fbf99b71f..034ec086a 100644 --- a/tests/task/Cargo.toml +++ b/tests/task/Cargo.toml @@ -2,7 +2,7 @@ name = "serai-test-task" version = "0.1.0" description = "Common test utilities for serai-task ContinuallyRan tasks" -license = "MIT" +license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/tests/task" authors = ["Luke Parker ", "rafael_xmr "] edition = "2021" diff --git a/tests/task/LICENSE b/tests/task/LICENSE index f995f1e78..2334e883e 100644 --- a/tests/task/LICENSE +++ b/tests/task/LICENSE @@ -1,21 +1,15 @@ -MIT License +AGPL-3.0-only license -Copyright (c) 2026 Serai +Copyright (c) 2026 Serai Contributors -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . From 38141224566c0d92f235ed4d95a3e89b76910b2a Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Mon, 23 Mar 2026 15:58:39 -0300 Subject: [PATCH 39/71] feat(coordinator/cosign): more review comment changes --- Cargo.toml | 1 + common/env/src/lib.rs | 2 - coordinator/cosign/src/evaluator.rs | 10 +- coordinator/cosign/src/tests/intend.rs | 8 +- coordinator/cosign/types/src/tests/mod.rs | 205 ++++++++++++---------- tests/shim-rpc/src/lib.rs | 4 +- 6 files changed, 128 insertions(+), 102 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 1e176114c..0b718e20a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -227,6 +227,7 @@ k256 = { git = "https://github.com/kayabaNerve/elliptic-curves", rev = "4994c9ab p256 = { git = "https://github.com/kayabaNerve/elliptic-curves", rev = "4994c9ab163781a88cd4a49beae812a89a44e8c3" } [workspace.lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(coverage)'] } # https://doc.rust-lang.org/rustc/lints/listing/allowed-by-default.html ambiguous_negative_literals = "warn" closure_returning_async_block = "warn" diff --git a/common/env/src/lib.rs b/common/env/src/lib.rs index 3f17a1180..c72494a84 100644 --- a/common/env/src/lib.rs +++ b/common/env/src/lib.rs @@ -1,8 +1,6 @@ #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_cfg))] -use std::str::FromStr; - /// Re-export of `log` for direct access (e.g. `serai_env::log::Level`). pub use log; diff --git a/coordinator/cosign/src/evaluator.rs b/coordinator/cosign/src/evaluator.rs index 3926c198d..8c9445522 100644 --- a/coordinator/cosign/src/evaluator.rs +++ b/coordinator/cosign/src/evaluator.rs @@ -15,7 +15,8 @@ pub(crate) const REQUEST_COSIGNS_SPACING: Duration = Duration::from_mins(1); #[cfg(any(test))] pub(crate) const REQUEST_COSIGNS_SPACING: Duration = Duration::from_secs(6); -const COSIGN_COMMIT_THRESHOLD: u64 = 83; +const COSIGN_COMMIT_THRESHOLD_NUMERATOR: u128 = 83; +const COSIGN_COMMIT_THRESHOLD_DENOMINATOR: u128 = 100; create_db!( SubstrateCosignEvaluator { @@ -100,7 +101,12 @@ fn should_request_cosigns(last_request_for_cosigns: &mut Instant) -> bool { //// Calculate the minimum threshold required for cosigning pub(crate) fn cosign_threshold(total_stake: u64) -> u64 { - ((total_stake * COSIGN_COMMIT_THRESHOLD) / 100) + 1 + u64::try_from( + (u128::from(total_stake) * COSIGN_COMMIT_THRESHOLD_NUMERATOR) / + COSIGN_COMMIT_THRESHOLD_DENOMINATOR, + ) + .expect("threshold < 1") + + 1 } /// Evaluate non-notable cosigns, returning (weight_cosigned, lowest_common_block). diff --git a/coordinator/cosign/src/tests/intend.rs b/coordinator/cosign/src/tests/intend.rs index c9af879ff..77ff7ab46 100644 --- a/coordinator/cosign/src/tests/intend.rs +++ b/coordinator/cosign/src/tests/intend.rs @@ -3,6 +3,7 @@ use std::{collections::HashMap, sync::Arc}; use rand_core::{OsRng, RngCore}; use serai_db::MemDb; +use serai_task::ContinuallyRan; use serai_shim_rpc::SeraiShimRpc; @@ -326,6 +327,7 @@ mod errors { } #[tokio::test] + #[should_panic(expected = "validator set from Event::SetDecided was empty")] async fn errors_if_set_decided_has_empty_validators() { serai_env::init_logger(); let (serai, task_test) = setup_mock_test().await; @@ -342,11 +344,7 @@ mod errors { serai.make_block(1, vec![vec![empty_set_decided]]).await; let mut task = task_test.into_task(); - TaskTest::task_runs_and_fails_with(&mut task, "validator set from Event::SetDecided was empty") - .await; - - // Block 0 committed, block 1 failed mid-processing - assert_eq!(ScanCosignFrom::get(&task_test.db), Some(1)); + task.run_iteration().await; } #[tokio::test] diff --git a/coordinator/cosign/types/src/tests/mod.rs b/coordinator/cosign/types/src/tests/mod.rs index befeb9ca9..54cd6f22c 100644 --- a/coordinator/cosign/types/src/tests/mod.rs +++ b/coordinator/cosign/types/src/tests/mod.rs @@ -15,14 +15,20 @@ use serai_primitives::test_helpers::{random_block_hash, random_keypair}; #[cfg(test)] use crate::{CosignIntent, ExternalNetworkId, Public}; +/// Generate a random 32-byte array for testing. +#[cfg(any(test, feature = "test-helpers"))] +pub fn random_bytes_32(rng: &mut (impl rand_core::RngCore + rand_core::CryptoRng)) -> [u8; 32] { + let mut bytes = [0u8; 32]; + rng.fill_bytes(&mut bytes); + bytes +} + /// Generate a random global session ID for testing. #[cfg(any(test, feature = "test-helpers"))] pub fn random_global_session( rng: &mut (impl rand_core::RngCore + rand_core::CryptoRng), ) -> [u8; 32] { - let mut id = [0u8; 32]; - rng.fill_bytes(&mut id); - id + random_bytes_32(rng) } #[cfg(test)] @@ -33,115 +39,132 @@ fn random_external_network_id( all[(rng.next_u32() as usize) % all.len()] } +#[cfg(test)] +fn random_cosign(rng: &mut (impl RngCore + rand_core::CryptoRng)) -> Cosign { + Cosign { + global_session: random_global_session(rng), + block_number: rng.next_u64(), + block_hash: random_block_hash(rng), + cosigner: random_external_network_id(rng), + } +} + +#[cfg(test)] +fn random_cosign_intent(rng: &mut (impl RngCore + rand_core::CryptoRng)) -> CosignIntent { + CosignIntent { + global_session: random_global_session(rng), + block_number: rng.next_u64(), + block_hash: random_block_hash(rng), + notable: rng.next_u32() % 2 == 0, + } +} + #[test] fn cosign_intent_into_cosign() { - let global_session = random_global_session(&mut OsRng); - let block_number = OsRng.next_u64(); - let block_hash = random_block_hash(&mut OsRng); - let notable = OsRng.next_u32() % 2 == 0; + let intent = random_cosign_intent(&mut OsRng); let network = random_external_network_id(&mut OsRng); + let Cosign { global_session, block_number, block_hash, cosigner } = intent.into_cosign(network); - let intent = CosignIntent { global_session, block_number, block_hash, notable }; - let Cosign { - global_session: cosign_global_session, - block_number: cosign_block_number, - block_hash: cosign_block_hash, - cosigner: cosign_cosigner, - } = intent.into_cosign(network); - - assert_eq!(cosign_global_session, global_session); - assert_eq!(cosign_block_number, block_number); - assert_eq!(cosign_block_hash, block_hash); - assert_eq!(cosign_cosigner, network); + assert_eq!(intent.global_session, global_session); + assert_eq!(intent.block_number, block_number); + assert_eq!(intent.block_hash, block_hash); + assert_eq!(cosigner, network); } #[test] -fn deterministic_signature_message() { - let cosign = Cosign { - global_session: random_global_session(&mut OsRng), - block_number: OsRng.next_u64(), - block_hash: random_block_hash(&mut OsRng), - cosigner: ExternalNetworkId::Bitcoin, - }; +fn deterministic_and_comprehensive_signature_message() { + let cosign = random_cosign(&mut OsRng); + let Cosign { global_session, block_number, block_hash, cosigner } = cosign; let msg1 = cosign.signature_message(); let msg2 = cosign.signature_message(); + // Deterministic assert_eq!(msg1, msg2, "signature_message should be deterministic"); -} -#[test] -fn signed_cosign_verify_signature_valid() { - let (keypair, public) = random_keypair(&mut OsRng); - let cosign = Cosign { - global_session: random_global_session(&mut OsRng), - block_number: OsRng.next_u64(), - block_hash: random_block_hash(&mut OsRng), - cosigner: random_external_network_id(&mut OsRng), - }; - - let signed = sign_cosign(cosign, &keypair); - - assert!(signed.verify_signature(public), "valid signature should verify"); + // Comprehensive + { + let mut expected = Vec::new(); + expected.extend(borsh::to_vec(&(global_session, block_number, block_hash, cosigner)).unwrap()); + assert_eq!(msg1, expected, "signature_message should include all fields in Borsh order"); + } + + // Changing any single field must produce a different message + { + let mut other_session = global_session; + other_session[0] ^= 1; + let other = Cosign { global_session: other_session, ..cosign.clone() }; + assert_ne!(msg1, other.signature_message(), "different global_session must change message"); + } + { + let other = Cosign { block_number: block_number.wrapping_add(1), ..cosign.clone() }; + assert_ne!(msg1, other.signature_message(), "different block_number must change message"); + } + { + let mut other_hash = block_hash; + other_hash.0[0] ^= 1; + let other = Cosign { block_hash: other_hash, ..cosign.clone() }; + assert_ne!(msg1, other.signature_message(), "different block_hash must change message"); + } + { + let other_cosigner = ExternalNetworkId::all().find(|n| *n != cosigner).unwrap(); + let other = Cosign { cosigner: other_cosigner, ..cosign.clone() }; + assert_ne!(msg1, other.signature_message(), "different cosigner must change message"); + } } #[test] -fn signed_cosign_verify_signature_invalid() { - let (keypair1, _) = random_keypair(&mut OsRng); - let (_, wrong_public) = random_keypair(&mut OsRng); - - let cosign = Cosign { - global_session: random_global_session(&mut OsRng), - block_number: OsRng.next_u64(), - block_hash: random_block_hash(&mut OsRng), - cosigner: random_external_network_id(&mut OsRng), - }; - - let signed = sign_cosign(cosign, &keypair1); +fn signed_cosign_verify_signature() { + { + let (keypair, public) = random_keypair(&mut OsRng); + let cosign = random_cosign(&mut OsRng); + let signed = sign_cosign(cosign, &keypair); + assert!(signed.verify_signature(public), "valid signature should verify"); + } - assert!(!signed.verify_signature(wrong_public), "invalid signature should not verify"); -} + { + let (keypair1, _) = random_keypair(&mut OsRng); + let (_, public2) = random_keypair(&mut OsRng); + let cosign = random_cosign(&mut OsRng); + let signed = sign_cosign(cosign, &keypair1); + assert_eq!(signed.verify_signature(public2), false, "invalid signature should not verify"); + } -#[test] -fn signed_cosign_verify_signature_invalid_public_key_bytes() { - let (keypair, _) = random_keypair(&mut OsRng); - let cosign = Cosign { - global_session: random_global_session(&mut OsRng), - block_number: OsRng.next_u64(), - block_hash: random_block_hash(&mut OsRng), - cosigner: random_external_network_id(&mut OsRng), - }; - - let signed = sign_cosign(cosign, &keypair); - - let invalid_bytes = [255u8; 32]; - assert!( - schnorrkel::PublicKey::from_bytes(&invalid_bytes).is_err(), - "test precondition: bytes should be invalid for schnorrkel" - ); - - let invalid_pubkey = Public(invalid_bytes); - assert!(!signed.verify_signature(invalid_pubkey), "invalid public key bytes should return false"); -} + { + let (keypair, _) = random_keypair(&mut OsRng); + let cosign = random_cosign(&mut OsRng); + let signed = sign_cosign(cosign, &keypair); + let invalid_bytes = [255u8; 32]; + assert!( + schnorrkel::PublicKey::from_bytes(&invalid_bytes).is_err(), + "test precondition: bytes should be invalid for schnorrkel" + ); + + let invalid_pubkey = Public(invalid_bytes); + assert_eq!( + signed.verify_signature(invalid_pubkey), + false, + "invalid public key bytes should return false" + ); + } -#[test] -fn signed_cosign_verify_signature_invalid_signature_bytes() { - let cosign = Cosign { - global_session: random_global_session(&mut OsRng), - block_number: OsRng.next_u64(), - block_hash: random_block_hash(&mut OsRng), - cosigner: random_external_network_id(&mut OsRng), - }; + { + let cosign = random_cosign(&mut OsRng); - let invalid_sig_bytes = [255u8; 64]; - assert!( - schnorrkel::Signature::from_bytes(&invalid_sig_bytes).is_err(), - "test precondition: signature bytes should be invalid for schnorrkel" - ); + let invalid_sig_bytes = [255u8; 64]; + assert!( + schnorrkel::Signature::from_bytes(&invalid_sig_bytes).is_err(), + "test precondition: signature bytes should be invalid for schnorrkel" + ); - let signed = SignedCosign { cosign, signature: invalid_sig_bytes }; + let signed = SignedCosign { cosign, signature: invalid_sig_bytes }; - let (_, valid_public) = random_keypair(&mut OsRng); + let (_, valid_public) = random_keypair(&mut OsRng); - assert!(!signed.verify_signature(valid_public), "invalid signature bytes should return false"); + assert_eq!( + signed.verify_signature(valid_public), + false, + "invalid signature bytes should return false" + ); + } } diff --git a/tests/shim-rpc/src/lib.rs b/tests/shim-rpc/src/lib.rs index e69aadb71..0f86b6a2f 100644 --- a/tests/shim-rpc/src/lib.rs +++ b/tests/shim-rpc/src/lib.rs @@ -20,7 +20,7 @@ use serai_abi::{ pub struct SeraiShimRpc { url: String, state: SharedState, - handle: ServerHandle, + _handle: ServerHandle, } impl SeraiShimRpc { @@ -42,7 +42,7 @@ impl SeraiShimRpc { let addr = server.local_addr().expect("server should have a local address"); let handle = server.start(rpc_module); - Self { url: format!("http://{addr}"), state, handle } + Self { url: format!("http://{addr}"), state, _handle: handle } } /// The HTTP URL this shim is listening on. From 3ca2bc5c0ce0d668ebb56ad483ee01d6966f9226 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Tue, 24 Mar 2026 10:46:59 -0300 Subject: [PATCH 40/71] refactor(common/task): move test helpers from external crate --- common/task/Cargo.toml | 3 +++ common/task/src/lib.rs | 4 ++++ .../lib.rs => common/task/src/test_helpers.rs | 10 ++++----- coordinator/cosign/Cargo.toml | 2 +- coordinator/cosign/src/tests/mod.rs | 2 +- tests/task/Cargo.toml | 21 ------------------- tests/task/LICENSE | 15 ------------- 7 files changed, 13 insertions(+), 44 deletions(-) rename tests/task/src/lib.rs => common/task/src/test_helpers.rs (76%) delete mode 100644 tests/task/Cargo.toml delete mode 100644 tests/task/LICENSE diff --git a/common/task/Cargo.toml b/common/task/Cargo.toml index fb458761c..863b44582 100644 --- a/common/task/Cargo.toml +++ b/common/task/Cargo.toml @@ -19,3 +19,6 @@ workspace = true [dependencies] log = { version = "0.4", default-features = false, features = ["std"] } tokio = { version = "1", default-features = false, features = ["macros", "sync", "time"] } + +[features] +test-helpers = [] diff --git a/common/task/src/lib.rs b/common/task/src/lib.rs index 9e44acac9..be6022114 100644 --- a/common/task/src/lib.rs +++ b/common/task/src/lib.rs @@ -12,6 +12,10 @@ use tokio::sync::mpsc; mod type_name; +/// Test helpers for asserting [`ContinuallyRan`] task iteration behavior. +#[cfg(any(test, feature = "test-helpers"))] +pub mod test_helpers; + /// A handle for a task. /// /// The task will only stop running once all handles for it are dropped. diff --git a/tests/task/src/lib.rs b/common/task/src/test_helpers.rs similarity index 76% rename from tests/task/src/lib.rs rename to common/task/src/test_helpers.rs index 879bcf6c1..5faa9f0b7 100644 --- a/tests/task/src/lib.rs +++ b/common/task/src/test_helpers.rs @@ -1,8 +1,6 @@ -//! Common test utilities for `serai-task` [`ContinuallyRan`] tasks. +//! Common test utilities for [`ContinuallyRan`] tasks. -#![deny(missing_docs)] - -use serai_task::ContinuallyRan; +use crate::ContinuallyRan; /// Test helpers for asserting task iteration behavior. pub struct TaskTest; @@ -13,13 +11,13 @@ impl TaskTest { task: &mut T, made_progress: bool, ) { - serai_env::log::debug!("running task once: {}", core::any::type_name::()); + log::debug!("running task once: {}", core::any::type_name::()); assert_eq!(task.run_iteration().await.unwrap(), made_progress); } /// Assert that a task iteration fails with an error containing the given string. pub async fn task_runs_and_fails_with(task: &mut T, error: &str) { - serai_env::log::debug!("running task (expecting failure): {}", core::any::type_name::()); + log::debug!("running task (expecting failure): {}", core::any::type_name::()); let err = task.run_iteration().await.unwrap_err(); let err_str = format!("{err:?}"); assert!(err_str.contains(error), "{err_str}"); diff --git a/coordinator/cosign/Cargo.toml b/coordinator/cosign/Cargo.toml index 3af2ec5f5..0ea6f77ab 100644 --- a/coordinator/cosign/Cargo.toml +++ b/coordinator/cosign/Cargo.toml @@ -35,7 +35,7 @@ serai-cosign-types = { path = "./types" } serai-cosign-types = { path = "./types", features = ["test-helpers"] } serai-shim-rpc = { path = "../../tests/shim-rpc" } -serai-test-task = { path = "../../tests/task" } +serai-task = { path = "../../common/task", features = ["test-helpers"] } serai-abi = { path = "../../substrate/abi", default-features = false, features = ["std"] } hex = { version = "0.4", default-features = false } diff --git a/coordinator/cosign/src/tests/mod.rs b/coordinator/cosign/src/tests/mod.rs index b11ffe664..ba2cd7f03 100644 --- a/coordinator/cosign/src/tests/mod.rs +++ b/coordinator/cosign/src/tests/mod.rs @@ -28,7 +28,7 @@ use serai_client_serai::{ validator_sets::{ExternalValidatorSet, Session}, }, }; -pub(crate) use serai_test_task::{IntoTask, TaskTest}; +pub(crate) use serai_task::test_helpers::{IntoTask, TaskTest}; use crate::RequestNotableCosigns; diff --git a/tests/task/Cargo.toml b/tests/task/Cargo.toml deleted file mode 100644 index 034ec086a..000000000 --- a/tests/task/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "serai-test-task" -version = "0.1.0" -description = "Common test utilities for serai-task ContinuallyRan tasks" -license = "AGPL-3.0-only" -repository = "https://github.com/serai-dex/serai/tree/develop/tests/task" -authors = ["Luke Parker ", "rafael_xmr "] -edition = "2021" -rust-version = "1.85" -publish = false - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] - -[lints] -workspace = true - -[dependencies] -serai-task = { path = "../../common/task" } -serai-env = { path = "../../common/env", version = "0.1.0" } diff --git a/tests/task/LICENSE b/tests/task/LICENSE deleted file mode 100644 index 2334e883e..000000000 --- a/tests/task/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -AGPL-3.0-only license - -Copyright (c) 2026 Serai Contributors - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License Version 3 as -published by the Free Software Foundation. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . From 06039ee65a750460d9d0c040addb1191769b5ab5 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Tue, 24 Mar 2026 15:29:11 -0300 Subject: [PATCH 41/71] refactor(coordinator/cosign): self-reviewing & cleaning up --- coordinator/cosign/Cargo.toml | 1 - coordinator/cosign/src/delay.rs | 9 ------- coordinator/cosign/src/evaluator.rs | 15 ++--------- coordinator/cosign/src/intend.rs | 32 ----------------------- coordinator/cosign/src/lib.rs | 25 +++++++++--------- coordinator/cosign/src/tests/cosigning.rs | 2 +- 6 files changed, 16 insertions(+), 68 deletions(-) diff --git a/coordinator/cosign/Cargo.toml b/coordinator/cosign/Cargo.toml index 0ea6f77ab..62aa4d91c 100644 --- a/coordinator/cosign/Cargo.toml +++ b/coordinator/cosign/Cargo.toml @@ -32,7 +32,6 @@ serai-task = { path = "../../common/task", version = "0.1" } serai-cosign-types = { path = "./types" } [dev-dependencies] - serai-cosign-types = { path = "./types", features = ["test-helpers"] } serai-shim-rpc = { path = "../../tests/shim-rpc" } serai-task = { path = "../../common/task", features = ["test-helpers"] } diff --git a/coordinator/cosign/src/delay.rs b/coordinator/cosign/src/delay.rs index d8aecb688..79ff6a862 100644 --- a/coordinator/cosign/src/delay.rs +++ b/coordinator/cosign/src/delay.rs @@ -54,11 +54,6 @@ impl ContinuallyRan for CosignDelayTask { break; }; - serai_env::trace!( - "{block_number}: beginning delay: time_evaluated={time_evaluated}, \ - has_events={has_events}, latest_cosigned={latest_cosigned_block_number}", - ); - // Defensive check, not likely to happen but does not allow regressing if block_number <= latest_cosigned_block_number { serai_env::warn!("Attempting to delay on an already cosigned block number ({block_number}, latest={latest_cosigned_block_number})"); @@ -71,9 +66,6 @@ impl ContinuallyRan for CosignDelayTask { if !has_events { LatestCosignedBlockNumber::set(&mut txn, &block_number); txn.commit(); - serai_env::trace!( - "{block_number}: LatestCosignedBlockNumber={block_number} (no events, skipped delay)" - ); made_progress = true; continue; } @@ -96,7 +88,6 @@ impl ContinuallyRan for CosignDelayTask { LatestCosignedBlockNumber::set(&mut txn, &block_number); txn.commit(); - serai_env::trace!("{block_number}: LatestCosignedBlockNumber={block_number}"); made_progress = true; } diff --git a/coordinator/cosign/src/evaluator.rs b/coordinator/cosign/src/evaluator.rs index 8c9445522..6ce5c0ab4 100644 --- a/coordinator/cosign/src/evaluator.rs +++ b/coordinator/cosign/src/evaluator.rs @@ -155,7 +155,7 @@ async fn ensure_cosigned( block_number: u64, global_session: [u8; 32], last_request_for_cosigns: &mut Instant, - request: &(impl RequestNotableCosigns + Sync), + request: &impl RequestNotableCosigns, label: &str, ) -> Result<(), String> { if weight_cosigned >= cosign_threshold(total_stake) { @@ -186,7 +186,7 @@ pub(crate) struct CosignEvaluatorTask { pub(crate) last_request_for_cosigns: Instant, } -impl ContinuallyRan for CosignEvaluatorTask { +impl ContinuallyRan for CosignEvaluatorTask { #[cfg(test)] const DELAY_BETWEEN_ITERATIONS: u64 = 1; #[cfg(test)] @@ -198,7 +198,6 @@ impl ContinuallyRan for CosignEvaluatorT async move { let mut known_cosign = None; let mut made_progress = false; - loop { let mut txn = self.db.txn(); let Some(BlockEventData { block_number, has_events }) = BlockEvents::try_recv(&mut txn) @@ -212,19 +211,12 @@ impl ContinuallyRan for CosignEvaluatorT // No global session declared yet: this block predates all sessions, skip it // this means only HasEvents:No blocks have been consumed so far None => { - serai_env::trace!( - "{block_number}: No global session declared yet. Ending evaluator." - ); commit_evaluated_block(txn, block_number, false); made_progress = true; continue; } // Session queued but starts after this block, skip it Some(next) if next.1.start_block_number > block_number => { - serai_env::trace!( - "{block_number}: Cannot cosign: GlobalSession is queued for block {}", - next.1.start_block_number - ); commit_evaluated_block(txn, block_number, false); made_progress = true; continue; @@ -234,8 +226,6 @@ impl ContinuallyRan for CosignEvaluatorT } } - serai_env::trace!("{block_number}: beginning evaluator: has_events={:#?}", has_events); - // Fetch the global session information let (global_session, global_session_info) = currently_evaluated_global_session_strict(&mut txn, block_number); @@ -245,7 +235,6 @@ impl ContinuallyRan for CosignEvaluatorT // supermajority of the prior block's validator sets HasEvents::Notable => { let mut weight_cosigned = 0; - for set in global_session_info.sets { // Check if we have the cosign from this set if NetworksLatestCosignedBlock::get(&txn, global_session, set.network) diff --git a/coordinator/cosign/src/intend.rs b/coordinator/cosign/src/intend.rs index 2b0c0a2bc..5667119ac 100644 --- a/coordinator/cosign/src/intend.rs +++ b/coordinator/cosign/src/intend.rs @@ -89,10 +89,6 @@ impl ContinuallyRan for CosignIntendTask { // Ephemeral RPC Err: task to re-run and continue trying .map_err(|e| format!("RPC error fetching latest finalized block number: {e}"))?; - serai_env::trace!( - "beginning intend scan: start={start_scan_block_number}, latest={latest_serai_block_number}" - ); - let mut made_progress = false; for block_number in start_scan_block_number ..= latest_serai_block_number { @@ -131,7 +127,6 @@ impl ContinuallyRan for CosignIntendTask { block_number - 1 ))?; } - SubstrateBlockHash::set(&mut txn, block_number, &serai_block_hash); builds_upon.append( serai_client_serai::abi::BLOCK_BRANCH_TAG, @@ -142,8 +137,6 @@ impl ContinuallyRan for CosignIntendTask { ); BuildsUpon::set(&mut txn, &builds_upon); - serai_env::trace!("iterating over block_number={block_number}"); - let mut has_events = HasEvents::No; let vset_events = serai_block_events.validator_sets(); @@ -228,12 +221,6 @@ impl ContinuallyRan for CosignIntendTask { set.network, &Set { session: set.session, key: key_pair.0, stake: Amount(stake) }, ); - } else { - serai_env::trace!( - "{block_number}: skipped session {:?} of {:?} with 0 stake from being selected for cosigns", - set.session, - set.network - ); } } @@ -246,8 +233,6 @@ impl ContinuallyRan for CosignIntendTask { let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn); - serai_env::trace!("{block_number}: type of has_events={has_events:?}"); - // If this is notable, it creates a new global session, which we index into the database // now if has_events == HasEvents::Notable { @@ -286,15 +271,6 @@ impl ContinuallyRan for CosignIntendTask { total_stake, }; - serai_env::trace!( - "{block_number}: Notable block block_number={block_number}: new session created \ - start_block_number={start_block}, sets={sets:?}, \ - stakes={stakes:?}, total_stake={total_stake}", - start_block = next_global_session_info.start_block_number, - sets = next_global_session_info.sets, - stakes = next_global_session_info.stakes, - ); - GlobalSessions::set(&mut txn, new_global_session, &next_global_session_info); if let Some(ending_global_session) = global_session_for_this_block { GlobalSessionsLastBlock::set(&mut txn, ending_global_session, &block_number); @@ -307,9 +283,6 @@ impl ContinuallyRan for CosignIntendTask { // we flag it as not having any events requiring cosigning so we don't attempt to // sign/require a cosign for it if (has_events != HasEvents::No) && global_session_for_this_block.is_none() { - serai_env::trace!( - "{block_number}: no previous global session available to cosign, has_events = HasEvents::No" - ); has_events = HasEvents::No; } @@ -346,15 +319,10 @@ impl ContinuallyRan for CosignIntendTask { HasEvents::No => {} } - serai_env::trace!( - "finished iterating block_number={block_number}: has_events={has_events:?}" - ); - // Populate a singular feed with every block's status for the evaluator to work off of BlockEvents::send(&mut txn, &(BlockEventData { block_number, has_events })); // Mark this block as handled, meaning we should scan from the next block moving on ScanCosignFrom::set(&mut txn, &(block_number + 1)); - // Commit for every block that did progress, on failure restarts from the next block txn.commit(); made_progress = true; diff --git a/coordinator/cosign/src/lib.rs b/coordinator/cosign/src/lib.rs index 19103a555..b33269b06 100644 --- a/coordinator/cosign/src/lib.rs +++ b/coordinator/cosign/src/lib.rs @@ -4,16 +4,17 @@ #![allow(clippy::std_instead_of_alloc, clippy::std_instead_of_core)] use core::{fmt::Debug, future::Future}; -use std::{collections::HashMap, sync::Arc, time::Instant}; - -use serai_client_serai::Serai; +use std::{sync::Arc, collections::HashMap, time::Instant}; use blake2::{Digest as _, Blake2s256}; use borsh::{BorshSerialize, BorshDeserialize}; -use serai_client_serai::abi::primitives::{ - BlockHash, crypto::Public, network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet, +use serai_client_serai::{ + abi::primitives::{ + BlockHash, crypto::Public, network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet, + }, + Serai, }; use serai_db::*; @@ -192,7 +193,7 @@ impl Cosigning { db: D, serai: Arc, request: R, - tasks_to_run_upon_finalizing_blocks: Vec, + tasks_to_run_upon_cosigning_blocks: Vec, ) -> Self { let (intend_task, intend_task_handle) = Task::new(); // Forget the intend task handle, as dropping the handle would stop the task @@ -203,7 +204,7 @@ impl Cosigning { let (delay_task, delay_task_handle) = Task::new(); tokio::spawn( (intend::CosignIntendTask { db: db.clone(), serai }) - .continually_run(intend_task, vec![evaluator_task_handle.clone()]), + .continually_run(intend_task, vec![evaluator_task_handle]), ); tokio::spawn( (evaluator::CosignEvaluatorTask { @@ -211,17 +212,17 @@ impl Cosigning { request, last_request_for_cosigns: Instant::now(), }) - .continually_run(evaluator_task, vec![delay_task_handle.clone()]), + .continually_run(evaluator_task, vec![delay_task_handle]), ); tokio::spawn( (delay::CosignDelayTask { db: db.clone() }) - .continually_run(delay_task, tasks_to_run_upon_finalizing_blocks), + .continually_run(delay_task, tasks_to_run_upon_cosigning_blocks), ); Self { db } } - /// The latest acknowledged block number. + /// The latest cosigned block number. pub fn latest_cosigned_block_number(getter: &impl Get) -> Result, Faulted> { if FaultedSession::get(getter).is_some() { Err(Faulted)?; @@ -345,10 +346,10 @@ impl Cosigning { if !faulty { // If this is for a future global session, we don't acknowledge this cosign at this time - let latest_cosigned_block = delay::LatestCosignedBlockNumber::get(&txn).unwrap_or(0); + let latest_cosigned_block_number = LatestCosignedBlockNumber::get(&txn).unwrap_or(0); // This global session starts the block *after* its declaration, so we want to check if the // block declaring it was evaluated - if (global_session.start_block_number - 1) > latest_cosigned_block { + if (global_session.start_block_number - 1) > latest_cosigned_block_number { Err(IntakeCosignError::FutureGlobalSession)?; } diff --git a/coordinator/cosign/src/tests/cosigning.rs b/coordinator/cosign/src/tests/cosigning.rs index 3c78303d5..715ca1199 100644 --- a/coordinator/cosign/src/tests/cosigning.rs +++ b/coordinator/cosign/src/tests/cosigning.rs @@ -207,7 +207,7 @@ async fn spawn_end_to_end() { ); let latest = Cosigning::::latest_cosigned_block_number(&db).unwrap(); - assert!(latest == Some(total_blocks)); + assert_eq!(latest, Some(total_blocks)); // Verify the dependent task was triggered by the pipeline assert!(triggered.load(Ordering::SeqCst)); From a708ed383548a13ddb5c61974b0d421ebe4f4e7f Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Tue, 24 Mar 2026 16:03:07 -0300 Subject: [PATCH 42/71] some misc --- coordinator/cosign/src/evaluator.rs | 4 ++++ coordinator/cosign/src/lib.rs | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/coordinator/cosign/src/evaluator.rs b/coordinator/cosign/src/evaluator.rs index 6ce5c0ab4..ecd999df4 100644 --- a/coordinator/cosign/src/evaluator.rs +++ b/coordinator/cosign/src/evaluator.rs @@ -259,6 +259,8 @@ impl ContinuallyRan for CosignEvaluatorTask ContinuallyRan for CosignEvaluatorTask Cosigning { // If this is for a future global session, we don't acknowledge this cosign at this time let latest_cosigned_block_number = LatestCosignedBlockNumber::get(&txn).unwrap_or(0); // This global session starts the block *after* its declaration, so we want to check if the - // block declaring it was evaluated + // block declaring it was cosigned if (global_session.start_block_number - 1) > latest_cosigned_block_number { Err(IntakeCosignError::FutureGlobalSession)?; } From 9fa3ce71ee8d3b93624d5a21084a1795cc036f75 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Wed, 25 Mar 2026 17:27:09 -0300 Subject: [PATCH 43/71] refactor(coordinator/cosign): self-reviewing & cleaning up tests --- coordinator/cosign/Cargo.toml | 2 +- coordinator/cosign/src/evaluator.rs | 2 + coordinator/cosign/src/tests/cosigning.rs | 168 ++-- coordinator/cosign/src/tests/delay.rs | 212 ++--- coordinator/cosign/src/tests/evaluator.rs | 860 ++++++++------------- coordinator/cosign/src/tests/full_stack.rs | 23 +- coordinator/cosign/src/tests/intend.rs | 418 ++-------- coordinator/cosign/src/tests/mod.rs | 62 +- coordinator/cosign/types/src/tests/mod.rs | 45 +- coordinator/src/main.rs | 4 +- substrate/primitives/src/test_helpers.rs | 45 +- tests/shim-rpc/Cargo.toml | 11 +- tests/shim-rpc/src/builder.rs | 9 +- tests/shim-rpc/src/event_fuzzer.rs | 248 ++++++ tests/shim-rpc/src/lib.rs | 28 +- tests/shim-rpc/src/rpc.rs | 35 +- tests/shim-rpc/src/state.rs | 30 +- tests/shim-rpc/src/test_helpers.rs | 39 + tests/shim-rpc/tests/integration.rs | 295 ++++--- 19 files changed, 1139 insertions(+), 1397 deletions(-) create mode 100644 tests/shim-rpc/src/event_fuzzer.rs create mode 100644 tests/shim-rpc/src/test_helpers.rs diff --git a/coordinator/cosign/Cargo.toml b/coordinator/cosign/Cargo.toml index 62aa4d91c..dde3ffb74 100644 --- a/coordinator/cosign/Cargo.toml +++ b/coordinator/cosign/Cargo.toml @@ -33,7 +33,7 @@ serai-cosign-types = { path = "./types" } [dev-dependencies] serai-cosign-types = { path = "./types", features = ["test-helpers"] } -serai-shim-rpc = { path = "../../tests/shim-rpc" } +serai-shim-rpc = { path = "../../tests/shim-rpc", features = ["test-helpers"] } serai-task = { path = "../../common/task", features = ["test-helpers"] } serai-abi = { path = "../../substrate/abi", default-features = false, features = ["std"] } diff --git a/coordinator/cosign/src/evaluator.rs b/coordinator/cosign/src/evaluator.rs index ecd999df4..6a947842b 100644 --- a/coordinator/cosign/src/evaluator.rs +++ b/coordinator/cosign/src/evaluator.rs @@ -56,6 +56,8 @@ fn currently_evaluated_global_session_strict( Some(existing) => existing, None => { let first = GlobalSessionsChannel::try_recv(txn) + // Panic: invariant, this function should only be called if + // the global sessions channel is populated .expect("fetching latest global session yet none declared"); CurrentlyEvaluatedGlobalSession::set(txn, &first); first diff --git a/coordinator/cosign/src/tests/cosigning.rs b/coordinator/cosign/src/tests/cosigning.rs index 715ca1199..d355e5fa8 100644 --- a/coordinator/cosign/src/tests/cosigning.rs +++ b/coordinator/cosign/src/tests/cosigning.rs @@ -1,41 +1,4 @@ -use std::{ - collections::HashMap, - time::Duration, - sync::{ - Arc, - atomic::{AtomicBool, Ordering}, - }, -}; - -use borsh::{BorshDeserialize, BorshSerialize}; - -use rand_core::OsRng; -use rand::{Rng, RngCore}; - -use serai_db::{Db as _, DbTxn, MemDb}; - -use serai_primitives::test_helpers::{random_block_hash, random_keypair}; -use serai_cosign_types::tests::sign_cosign; -use serai_task::{Task, ContinuallyRan}; - -use serai_client_serai::abi::primitives::{ - crypto::Public, - network_id::ExternalNetworkId, - validator_sets::{ExternalValidatorSet, Session}, -}; - -use crate::{ - Cosign, CosignIntent, Cosigning, Faulted, FaultedSession, Faults, GlobalSession, GlobalSessions, - GlobalSessionsLastBlock, IntakeCosignError, NetworksLatestCosignedBlock, SignedCosign, - SubstrateBlockHash, - delay::LatestCosignedBlockNumber, - evaluator::CurrentlyEvaluatedGlobalSession, - intend::IntendedCosigns, - tests::{ - TestRequest, default_test_validator_set, random_global_session, random_validator_set, - setup_shim_serai, - }, -}; +use crate::{delay::*, evaluator::*, intend::*, tests::*, *}; #[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] struct TestGlobalSession { @@ -45,12 +8,13 @@ struct TestGlobalSession { stakes: HashMap, total_stake: u64, } + impl TestGlobalSession { fn id(&self) -> [u8; 32] { GlobalSession::id(self.sets.clone()) } - fn to_global(&self) -> GlobalSession { + fn to_global_session(&self) -> GlobalSession { GlobalSession { start_block_number: self.start_block_number, sets: self.sets.clone(), @@ -63,23 +27,16 @@ impl TestGlobalSession { fn random_test_session() -> (TestGlobalSession, schnorrkel::Keypair) { let set = default_test_validator_set(); - let network = set.network; - let (keypair, public) = random_keypair(&mut OsRng); - - let mut keys = HashMap::new(); - let mut stakes = HashMap::new(); - - let total_stake = OsRng.gen_range(1u64 .. u64::MAX / 17); - keys.insert(network, public); - stakes.insert(network, total_stake); + let stake = OsRng.gen_range(1u64 .. u64::MAX / 17); + let gs = build_global_session(set, public, stake, u64::from(set.session.0) + 1); let session = TestGlobalSession { - start_block_number: u64::from(set.session.0) + 1, - sets: vec![set], - keys, - stakes, - total_stake, + start_block_number: gs.start_block_number, + sets: gs.sets, + keys: gs.keys, + stakes: gs.stakes, + total_stake: gs.total_stake, }; (session, keypair) } @@ -89,10 +46,10 @@ fn seed_minimal_state(db: &mut MemDb, random_test_session: &TestGlobalSession) { let id = random_test_session.id(); // Required by `Cosigning::intake_cosign`. - GlobalSessions::set(&mut txn, id, &random_test_session.to_global()); + GlobalSessions::set(&mut txn, id, &random_test_session.to_global_session()); // Required by `Cosigning::cosigns_to_rebroadcast` in the non-faulted case. - CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, random_test_session.to_global())); + CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, random_test_session.to_global_session())); // Required for `intake_cosign` to not classify a session as "future". LatestCosignedBlockNumber::set(&mut txn, &0u64); @@ -102,7 +59,6 @@ fn seed_minimal_state(db: &mut MemDb, random_test_session: &TestGlobalSession) { #[test] fn fuzz_global_session_id() { - serai_env::init_logger(); for _ in 0 .. 100 { let num_sets = OsRng.gen_range(1u8 ..= 3); let sets: Vec<_> = (0 .. num_sets).map(|_| random_validator_set(&mut OsRng)).collect(); @@ -150,7 +106,6 @@ mod intake_cosign_error { // More cases are tested in ./full_stack.rs with fuzzing for different event type blocks #[tokio::test] async fn spawn_end_to_end() { - serai_env::init_logger(); let db = MemDb::new(); let (shim_serai, serai) = setup_shim_serai().await; let (request, _calls) = TestRequest::new(false); @@ -187,7 +142,7 @@ async fn spawn_end_to_end() { // Run block production and pipeline polling concurrently let total_blocks = 10; tokio::join!( - // Produce blocks + // Produce blocks with no events (passes all tasks and is marked as cosigned at the end) async { for _ in 0 ..= total_blocks { shim_serai.add_block_with_events(vec![]).await; @@ -209,14 +164,12 @@ async fn spawn_end_to_end() { let latest = Cosigning::::latest_cosigned_block_number(&db).unwrap(); assert_eq!(latest, Some(total_blocks)); - // Verify the dependent task was triggered by the pipeline + // Verify the dependent task was triggered by the cosign pipeline assert!(triggered.load(Ordering::SeqCst)); } #[test] fn latest_finalized_block() { - serai_env::init_logger(); - // Defaults to zero { let db = MemDb::new(); @@ -226,19 +179,23 @@ fn latest_finalized_block() { // Errors when faulted session exists { let mut db = MemDb::new(); - let mut txn = db.txn(); - FaultedSession::set(&mut txn, &random_global_session(&mut OsRng)); - txn.commit(); + { + let mut txn = db.txn(); + FaultedSession::set(&mut txn, &random_global_session(&mut OsRng)); + txn.commit(); + } assert!(matches!(Cosigning::::latest_cosigned_block_number(&db), Err(Faulted))); } // Returns stored value { let mut db = MemDb::new(); - let mut txn = db.txn(); let latest_finalized_block = OsRng.next_u64(); - LatestCosignedBlockNumber::set(&mut txn, &latest_finalized_block); - txn.commit(); + { + let mut txn = db.txn(); + LatestCosignedBlockNumber::set(&mut txn, &latest_finalized_block); + txn.commit(); + } assert_eq!( Cosigning::::latest_cosigned_block_number(&db).unwrap(), Some(latest_finalized_block) @@ -248,28 +205,31 @@ fn latest_finalized_block() { #[test] fn cosigned_block() { - serai_env::init_logger(); - // Returns None beyond latest finalized block { let mut db = MemDb::new(); assert_eq!(Cosigning::::cosigned_block(&db, 0).unwrap(), None); - let mut txn = db.txn(); + let latest_finalized_block = OsRng.next_u64(); - LatestCosignedBlockNumber::set(&mut txn, &latest_finalized_block); - txn.commit(); + { + let mut txn = db.txn(); + LatestCosignedBlockNumber::set(&mut txn, &latest_finalized_block); + txn.commit(); + } assert_eq!(Cosigning::::cosigned_block(&db, latest_finalized_block + 1).unwrap(), None); } // Returns hash when block is in range { let mut db = MemDb::new(); - let block_hash = random_block_hash(&mut OsRng); let latest_finalized_block = OsRng.next_u64(); - let mut txn = db.txn(); - LatestCosignedBlockNumber::set(&mut txn, &latest_finalized_block); - SubstrateBlockHash::set(&mut txn, latest_finalized_block - 1, &block_hash); - txn.commit(); + let block_hash = random_block_hash(&mut OsRng); + { + let mut txn = db.txn(); + LatestCosignedBlockNumber::set(&mut txn, &latest_finalized_block); + SubstrateBlockHash::set(&mut txn, latest_finalized_block - 1, &block_hash); + txn.commit(); + } assert_eq!( Cosigning::::cosigned_block(&db, latest_finalized_block - 1).unwrap(), Some(block_hash) @@ -279,17 +239,17 @@ fn cosigned_block() { // Errors when faulted session exists { let mut db = MemDb::new(); - let mut txn = db.txn(); - FaultedSession::set(&mut txn, &random_global_session(&mut OsRng)); - txn.commit(); + { + let mut txn = db.txn(); + FaultedSession::set(&mut txn, &random_global_session(&mut OsRng)); + txn.commit(); + } assert!(matches!(Cosigning::::cosigned_block(&db, OsRng.next_u64()), Err(Faulted))); } } #[test] fn notable_cosigns() { - serai_env::init_logger(); - // Empty without cosigns { let db = MemDb::new(); @@ -339,8 +299,6 @@ fn notable_cosigns() { #[test] fn cosigns_to_rebroadcast() { - serai_env::init_logger(); - // Excludes cosigns from different global session { let (session, keypair) = random_test_session(); @@ -467,17 +425,10 @@ mod intake_cosign { #[test] fn rejects_not_yet_indexed_block() { - serai_env::init_logger(); let db = MemDb::new(); let (keypair, _) = random_keypair(&mut OsRng); - let cosign = Cosign { - global_session: random_global_session(&mut OsRng), - block_number: OsRng.next_u64(), - block_hash: random_block_hash(&mut OsRng), - cosigner: random_validator_set(&mut OsRng).network, - }; - let signed = sign_cosign(cosign, &keypair); + let signed = sign_cosign(random_cosign(&mut OsRng), &keypair); let mut cosigning = Cosigning::new(db); assert!(matches!( @@ -488,7 +439,6 @@ mod intake_cosign { #[test] fn rejects_stale_cosign() { - serai_env::init_logger(); let (session, keypair) = random_test_session(); let id = session.id(); let network = session.sets[0].network; @@ -533,7 +483,6 @@ mod intake_cosign { #[test] fn rejects_unrecognized_global_session() { - serai_env::init_logger(); let (keypair, _) = random_keypair(&mut OsRng); let mut db = MemDb::new(); @@ -562,7 +511,6 @@ mod intake_cosign { #[test] fn rejects_before_global_session_start() { - serai_env::init_logger(); let (mut session, keypair) = random_test_session(); let network = session.sets[0].network; session.start_block_number = OsRng.next_u64(); @@ -572,8 +520,8 @@ mod intake_cosign { let mut db = MemDb::new(); { let mut txn = db.txn(); - GlobalSessions::set(&mut txn, id, &session.to_global()); - CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, session.to_global())); + GlobalSessions::set(&mut txn, id, &session.to_global_session()); + CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, session.to_global_session())); LatestCosignedBlockNumber::set(&mut txn, &session.start_block_number); SubstrateBlockHash::set(&mut txn, session.start_block_number - 1, &block_hash); txn.commit(); @@ -596,7 +544,6 @@ mod intake_cosign { #[test] fn rejects_after_global_session_end() { - serai_env::init_logger(); let (session, keypair) = random_test_session(); let id = session.id(); let network = session.sets[0].network; @@ -625,7 +572,6 @@ mod intake_cosign { #[test] fn rejects_invalid_signature() { - serai_env::init_logger(); let (session, _keypair) = random_test_session(); let id = session.id(); let network = session.sets[0].network; @@ -651,7 +597,6 @@ mod intake_cosign { #[test] fn rejects_future_global_session() { - serai_env::init_logger(); let (mut session, keypair) = random_test_session(); let network = session.sets[0].network; session.start_block_number = OsRng.next_u64(); @@ -661,8 +606,8 @@ mod intake_cosign { let mut db = MemDb::new(); { let mut txn = db.txn(); - GlobalSessions::set(&mut txn, id, &session.to_global()); - CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, session.to_global())); + GlobalSessions::set(&mut txn, id, &session.to_global_session()); + CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, session.to_global_session())); LatestCosignedBlockNumber::set(&mut txn, &(session.start_block_number - 2)); SubstrateBlockHash::set(&mut txn, session.start_block_number, &block_hash); txn.commit(); @@ -685,7 +630,6 @@ mod intake_cosign { #[test] fn rejects_non_participating_network() { - serai_env::init_logger(); let (session, _keypair) = random_test_session(); let id = session.id(); let session_network = session.sets[0].network; @@ -718,7 +662,6 @@ mod intake_cosign { #[test] fn accepts_valid_cosign() { - serai_env::init_logger(); let (session, keypair) = random_test_session(); let id = session.id(); let network = session.sets[0].network; @@ -743,7 +686,6 @@ mod intake_cosign { #[test] fn handles_faulty_cosign() { - serai_env::init_logger(); let (session, keypair) = random_test_session(); let id = session.id(); let network = session.sets[0].network; @@ -778,7 +720,6 @@ mod intake_cosign { #[test] fn accepts_newer_cosign_when_existing_is_older() { - serai_env::init_logger(); let (session, keypair) = random_test_session(); let id = session.id(); let network = session.sets[0].network; @@ -824,7 +765,6 @@ mod intake_cosign { #[test] fn accepts_cosign_at_global_session_last_block() { - serai_env::init_logger(); let (session, keypair) = random_test_session(); let id = session.id(); let network = session.sets[0].network; @@ -863,7 +803,6 @@ mod intake_cosign { #[test] fn ignores_duplicate_fault_from_same_network() { - serai_env::init_logger(); let (session, keypair) = random_test_session(); let id = session.id(); let network = session.sets[0].network; @@ -909,7 +848,6 @@ mod intake_cosign { #[test] fn records_fault_below_threshold() { - serai_env::init_logger(); let set1 = random_validator_set(&mut OsRng); let network1 = set1.network; // Ensure we pick a distinct second network @@ -973,8 +911,6 @@ mod intake_cosign { #[test] fn intended_cosigns() { - serai_env::init_logger(); - // Empty returns empty { let mut db = MemDb::new(); @@ -988,13 +924,7 @@ fn intended_cosigns() { { let mut db = MemDb::new(); let set = random_validator_set(&mut OsRng); - - let intent = CosignIntent { - global_session: random_global_session(&mut OsRng), - block_number: OsRng.next_u64(), - block_hash: random_block_hash(&mut OsRng), - notable: true, - }; + let intent = random_cosign_intent(&mut OsRng); { let mut txn = db.txn(); @@ -1010,7 +940,7 @@ fn intended_cosigns() { assert_eq!(got[0].global_session, intent.global_session); assert_eq!(got[0].block_number, intent.block_number); assert_eq!(got[0].block_hash, intent.block_hash); - assert!(got[0].notable); + assert_eq!(got[0].notable, intent.notable); } } } diff --git a/coordinator/cosign/src/tests/delay.rs b/coordinator/cosign/src/tests/delay.rs index 11e911686..3dd7059ca 100644 --- a/coordinator/cosign/src/tests/delay.rs +++ b/coordinator/cosign/src/tests/delay.rs @@ -1,17 +1,4 @@ -use std::time::{Duration, Instant}; - -use rand::RngCore; -use rand_core::OsRng; -use serai_task::ContinuallyRan; - -use crate::{ - LatestCosignedBlockNumber, - delay::{ACKNOWLEDGEMENT_DELAY, CosignDelayTask, now_timestamp}, - evaluator::CosignedBlocks, - tests::{IntoTask, TaskTest}, -}; - -use serai_db::{Db as _, DbTxn as _, MemDb}; +use crate::{delay::*, evaluator::*, tests::*}; fn now_secs() -> u64 { now_timestamp().as_secs() @@ -40,129 +27,158 @@ impl DelayTest { let start = std::time::Instant::now(); (Self::default(), start) } - - async fn assert_task_iteration_completes_with(&self, latest_finalized_block: u64) { - use serai_env::log::debug; - let actual = LatestCosignedBlockNumber::get(&self.db); - let cosigned_pending = CosignedBlocks::peek(&self.db).is_some(); - debug!("LatestFinalizedBlock: {actual:?} (expected: Some({latest_finalized_block}))"); - debug!("CosignedBlocks pending: {cosigned_pending}"); - assert_eq!(actual, Some(latest_finalized_block)); - assert!(!cosigned_pending, "CosignedBlocks queue items should have been consumed"); - } } -#[tokio::test] -async fn returns_false_with_no_messages() { - serai_env::init_logger(); - let test = DelayTest::default(); - let mut task = test.into_task(); - - TaskTest::task_runs_once_and_matches_progress(&mut task, false).await; +/// Verify delay's post-run DB invariants. +/// +/// After a successful task run, `CosignedBlocks` should be consumed and +/// `LatestCosignedBlockNumber` point to the expected last block number. +fn verify_db_invariants(db: &MemDb, expected_latest_block: Option) { + let actual = LatestCosignedBlockNumber::get(db); + let cosigned_pending = CosignedBlocks::peek(db).is_some(); - assert_eq!(LatestCosignedBlockNumber::get(&test.db), None); - assert_eq!(CosignedBlocks::peek(&test.db), None); + assert_eq!(actual, expected_latest_block, "LatestCosignedBlockNumber mismatch"); + assert!(!cosigned_pending, "CosignedBlocks should be fully consumed"); } #[tokio::test] async fn updates_latest_finalized_block_after_ack_delay() { + serai_env::init_logger(); let (mut test, start) = DelayTest::new(); + // Returns false (made no progress) on no CosignedBlocks { - let mut txn = test.db.txn(); - // blocks with the same timestamps - // nothing unusual happens, the task follow block numbers - let now = now_secs(); - CosignedBlocks::send(&mut txn, &(0, now, true)); - CosignedBlocks::send(&mut txn, &(1, now, true)); - CosignedBlocks::send(&mut txn, &(2, now, true)); - txn.commit(); + let mut task = test.into_task(); + TaskTest::task_runs_once_and_matches_progress(&mut task, false).await; + verify_db_invariants(&test.db, None); } - let mut task = test.into_task(); + // Multiple blocks with the same evaluated_time sleep only for ACKNOWLEDGEMENT_DELAY + { + let mut txn = test.db.txn(); - TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - test.assert_task_iteration_completes_with(2).await; + { + let now = now_secs(); + CosignedBlocks::send(&mut txn, &(0, now, true)); + CosignedBlocks::send(&mut txn, &(1, now, true)); + CosignedBlocks::send(&mut txn, &(2, now, true)); + txn.commit(); + } + + let mut task = test.into_task(); + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + verify_db_invariants(&test.db, Some(2)); + } - serai_env::log::info!("Blocks 0-2 processed after {:?}", start.elapsed()); + serai_env::log::info!("Blocks 0-2 processed in {:?}", start.elapsed()); + let start = Instant::now(); + // Timestamps decreasing, given their time_valid already passed during + // the 1st sleep, none need to sleep after the 1st block { let mut txn = test.db.txn(); - // timestamps out of order - // nothing unusual happens, the task stil follows block numbers - let now = now_secs(); - CosignedBlocks::send(&mut txn, &(3, now, true)); - CosignedBlocks::send(&mut txn, &(4, now - 1, true)); - CosignedBlocks::send(&mut txn, &(5, now - 2, true)); - txn.commit(); - } - TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - test.assert_task_iteration_completes_with(5).await; + { + let now = now_secs(); + CosignedBlocks::send(&mut txn, &(3, now, true)); + CosignedBlocks::send(&mut txn, &(4, now - 5, true)); + CosignedBlocks::send(&mut txn, &(5, now - 10, true)); + txn.commit(); + } + + let mut task = test.into_task(); + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + verify_db_invariants(&test.db, Some(5)); + } - serai_env::log::info!("Blocks 3-5 processed after {:?}", start.elapsed()); + serai_env::log::info!("Blocks 3-5 processed in {:?}", start.elapsed()); + let start = Instant::now(); + // Timestamps increasing in order + // each block sleeps for a slight amount { let mut txn = test.db.txn(); - // timestamps increasing in order - // nothing unusual happens, the task stil follows block numbers - let now = now_secs(); - CosignedBlocks::send(&mut txn, &(6, now, true)); - CosignedBlocks::send(&mut txn, &(7, now + 1, true)); - CosignedBlocks::send(&mut txn, &(8, now + 2, true)); - txn.commit(); + + { + let now = now_secs(); + CosignedBlocks::send(&mut txn, &(6, now, true)); + CosignedBlocks::send(&mut txn, &(7, now + 5, true)); + CosignedBlocks::send(&mut txn, &(8, now + 10, true)); + txn.commit(); + } + + let mut task = test.into_task(); + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + verify_db_invariants(&test.db, Some(8)); } - TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - test.assert_task_iteration_completes_with(8).await; + serai_env::log::info!("Blocks 6-8 processed in {:?}", start.elapsed()); + + // has_events=false blocks are marked as cosigned immediately, no sleep + { + let mut txn = test.db.txn(); + // the time_evaluated timestamp doesn't matter here since it will be skipped + CosignedBlocks::send(&mut txn, &(9, OsRng.next_u64(), false)); + CosignedBlocks::send(&mut txn, &(10, OsRng.next_u64(), false)); + CosignedBlocks::send(&mut txn, &(11, OsRng.next_u64(), false)); + txn.commit(); - serai_env::log::info!("Blocks 6-8 processed after {:?}", start.elapsed()); + let start = Instant::now(); + let mut task = test.into_task(); + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + verify_db_invariants(&test.db, Some(11)); + assert!( + start.elapsed() < Duration::from_secs(1), + "no-events blocks should not sleep, took {:?}", + start.elapsed() + ); + } } #[tokio::test] async fn does_not_regress_and_skips_if_not_a_later_block() { - serai_env::init_logger(); let mut test = DelayTest::default(); + // Does not regress { - let mut txn = test.db.txn(); - CosignedBlocks::send(&mut txn, &(1, now_secs(), true)); - CosignedBlocks::send(&mut txn, &(2, now_secs(), true)); - - // Sent out of order below - CosignedBlocks::send(&mut txn, &(4, now_secs(), true)); - // 3 will be skipped after 4 was processed - CosignedBlocks::send(&mut txn, &(3, now_secs(), true)); - - txn.commit(); + { + let mut txn = test.db.txn(); + CosignedBlocks::send(&mut txn, &(1, now_secs(), true)); + CosignedBlocks::send(&mut txn, &(2, now_secs(), true)); + + // Sent out of order below + CosignedBlocks::send(&mut txn, &(4, now_secs(), true)); + // 3 will be skipped after 4 was processed + CosignedBlocks::send(&mut txn, &(3, now_secs(), true)); + + txn.commit(); + } + + let mut task = test.into_task(); + // returns made_progress as true + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + verify_db_invariants(&test.db, Some(4)); } - let mut task = test.into_task(); - // returns made_progress as true - TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - - // This is unlikely to actually happen in practice but it needs to be tested that it does what it is - // meant to do, which is that if we've already acknowledged a later block, consume and skip - test.assert_task_iteration_completes_with(4).await; - + // Skip if not a later block { - let mut txn = test.db.txn(); - // Sends the same previous block number - CosignedBlocks::send(&mut txn, &(4, now_secs(), true)); - txn.commit(); + { + let mut txn = test.db.txn(); + // Sends the same previous block number + CosignedBlocks::send(&mut txn, &(4, now_secs(), true)); + txn.commit(); + } + + let mut task = test.into_task(); + // No progress was made since the same block number was also skipped, + // made_progress returns false + TaskTest::task_runs_once_and_matches_progress(&mut task, false).await; + verify_db_invariants(&test.db, Some(4)); } - - let mut task = test.into_task(); - - // No progress was made since the same block number was skipped, - // made_progress returns false - TaskTest::task_runs_once_and_matches_progress(&mut task, false).await; - test.assert_task_iteration_completes_with(4).await; } #[tokio::test] async fn respects_acknowledgement_delay() { - serai_env::init_logger(); let mut test = DelayTest::default(); let block_number = OsRng.next_u64(); @@ -188,7 +204,7 @@ async fn respects_acknowledgement_delay() { assert!(made_progress); // Block is now finalized - assert_eq!(LatestCosignedBlockNumber::get(&test.db), Some(block_number)); + verify_db_invariants(&test.db, Some(block_number)); // The elapsed time must be at least ACKNOWLEDGEMENT_DELAY let elapsed = start.elapsed(); diff --git a/coordinator/cosign/src/tests/evaluator.rs b/coordinator/cosign/src/tests/evaluator.rs index df9554db3..513f8494e 100644 --- a/coordinator/cosign/src/tests/evaluator.rs +++ b/coordinator/cosign/src/tests/evaluator.rs @@ -1,29 +1,4 @@ -use std::{ - collections::HashMap, - sync::atomic::Ordering, - time::{Duration, Instant}, -}; - -use rand_core::OsRng; -use serai_cosign_types::SignedCosign; -use serai_db::{Db as _, DbTxn, MemDb}; -use serai_client_serai::abi::primitives::{ - crypto::Public, - network_id::ExternalNetworkId, - validator_sets::{ExternalValidatorSet, Session}, -}; - -use serai_primitives::test_helpers::random_block_hash; -use serai_task::ContinuallyRan; - -use crate::{ - Cosign, GlobalSession, HasEvents, NetworksLatestCosignedBlock, - evaluator::{ - CosignEvaluatorTask, CosignedBlocks, CurrentlyEvaluatedGlobalSession, REQUEST_COSIGNS_SPACING, - }, - intend::{BlockEventData, BlockEvents, GlobalSessionsChannel}, - tests::{IntoTask, TaskTest, TestRequest, random_global_session}, -}; +use crate::{evaluator::*, intend::*, tests::*, *}; pub(crate) struct EvaluatorTest { pub(crate) db: MemDb, @@ -45,28 +20,54 @@ impl IntoTask for EvaluatorTest { } impl EvaluatorTest { - fn init_global_session(&mut self, start_block_number: u64) -> [u8; 32] { + fn init_global_session(&mut self, start_block_number: u64) -> ([u8; 32], ExternalNetworkId) { let global_session = random_global_session(&mut OsRng); - let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; + let set = random_validator_set(&mut OsRng); + let info = build_global_session( + set, + random_public(&mut OsRng), + OsRng.gen_range(1 ..= u64::MAX), + start_block_number, + ); - let mut keys = HashMap::new(); - keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); + let mut txn = self.db.txn(); + GlobalSessionsChannel::send(&mut txn, &(global_session, info)); + txn.commit(); - let mut stakes = HashMap::new(); - stakes.insert(ExternalNetworkId::Bitcoin, 1u64); + (global_session, set.network) + } - let info = - GlobalSession { start_block_number, sets: vec![set], keys, stakes, total_stake: 1u64 }; + /// Like `init_global_session` but with empty stakes, for testing the "didn't have its stake" error. + fn init_stakeless_global_session( + &mut self, + start_block_number: u64, + ) -> ([u8; 32], ExternalNetworkId) { + let global_session = random_global_session(&mut OsRng); + let network = random_external_network_id(&mut OsRng); + let set = ExternalValidatorSet { network, session: Session(OsRng.gen()) }; + + let mut keys = HashMap::new(); + keys.insert(network, random_public(&mut OsRng)); + + let info = GlobalSession { + start_block_number, + sets: vec![set], + keys, + stakes: HashMap::new(), + // total_stake is not important, + // the 0 stake test fails before it is used + total_stake: OsRng.next_u64(), + }; let mut txn = self.db.txn(); GlobalSessionsChannel::send(&mut txn, &(global_session, info)); txn.commit(); - global_session + (global_session, network) } } -/// Verify evaluator post-run DB invariants. +/// Verify evaluator's post-run DB invariants. /// /// After a successful task run, all input channels should be consumed and the /// `CosignedBlocks` output channel should contain exactly the expected block range. @@ -127,474 +128,352 @@ fn signed_cosign( block_hash: random_block_hash(&mut OsRng), cosigner, }, - signature: [0u8; 64], + signature: random_bytes_64(&mut OsRng), } } -#[tokio::test] -async fn returns_false_with_no_block_events() { - let mut test = EvaluatorTest::default(); - let mut task = test.into_task(); - TaskTest::task_runs_once_and_matches_progress(&mut task, false).await; - verify_db_invariants(&mut test.db, None); -} - #[tokio::test] async fn processes_blocks_with_no_events() { - serai_env::init_logger(); let mut test = EvaluatorTest::default(); + + // Returns false (made no progress) on no blocks to evaluate + { + let mut task = test.into_task(); + TaskTest::task_runs_once_and_matches_progress(&mut task, false).await; + verify_db_invariants(&mut test.db, None); + } + test.init_global_session(0); + // Sent BlockEvents progress and with no events are sent to CosignedBlocks { let mut txn = test.db.txn(); BlockEvents::send(&mut txn, &BlockEventData { block_number: 0, has_events: HasEvents::No }); BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); BlockEvents::send(&mut txn, &BlockEventData { block_number: 2, has_events: HasEvents::No }); txn.commit(); - } - - let mut task = test.into_task(); - TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - assert!(BlockEvents::peek(&test.db).is_none(), "BlockEvents should be fully consumed"); + let mut task = test.into_task(); + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + verify_db_invariants(&mut test.db, Some((0, 2))); + } - // HasEvents::No blocks are sent through CosignedBlocks with has_events=false + // Advances to the next global session when blocks reach its start_block_number { - let mut txn = test.db.txn(); - for expected in 0 ..= 2 { - let (block_number, _time, has_events) = CosignedBlocks::try_recv(&mut txn).unwrap(); - assert_eq!(block_number, expected); - assert_eq!(has_events, false); + let (session2, _) = test.init_global_session(6); + + { + let mut txn = test.db.txn(); + for block_number in 3 ..= 6 { + BlockEvents::send(&mut txn, &BlockEventData { block_number, has_events: HasEvents::No }); + } + txn.commit(); } - assert!(CosignedBlocks::try_recv(&mut txn).is_none(), "no more blocks expected"); - txn.commit(); + + let mut task = test.into_task(); + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + verify_db_invariants(&mut test.db, Some((3, 6))); + + let current = + CurrentlyEvaluatedGlobalSession::get(&test.db).expect("should have current session"); + assert_eq!(current.0, session2, "should have transitioned to session 2"); + assert_eq!(current.1.start_block_number, 6, "session 2 should start at block 6"); } } #[tokio::test] async fn processes_notable_events_when_cosigned() { - serai_env::init_logger(); let mut test = EvaluatorTest::default(); - let global_session = test.init_global_session(0); + let (global_session, network) = test.init_global_session(0); + // Notable block with no NetworksLatestCosignedBlock set fails { let mut txn = test.db.txn(); - NetworksLatestCosignedBlock::set( - &mut txn, - global_session, - ExternalNetworkId::Bitcoin, - &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 1), - ); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 0, has_events: HasEvents::No }); BlockEvents::send( &mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::Notable }, ); txn.commit(); - } - let mut task = test.into_task(); - TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - verify_db_invariants(&mut test.db, Some((1, 1))); -} + let mut task = test.into_task(); + TaskTest::task_runs_and_fails_with(&mut task, "wasn't yet cosigned").await; + assert!(GlobalSessionsChannel::peek(&test.db).is_none(), "global session should be consumed"); + assert!(BlockEvents::peek(&test.db).is_some(), "block events should remain for retry"); -#[tokio::test] -async fn non_notable_uses_cached_known_cosign() { - serai_env::init_logger(); - let mut test = EvaluatorTest::default(); - let global_session = test.init_global_session(0); + // Still fails on retry even with enough time elapsed to re-request cosigns + let mut task: CosignEvaluatorTask = test.into_task().into(); + task.last_request_for_cosigns = Instant::now() - Duration::from_secs(5); + TaskTest::task_runs_and_fails_with(&mut task, "wasn't yet cosigned").await; + assert!(BlockEvents::peek(&test.db).is_some(), "block events should remain for retry"); + } + // Same block succeeds once cosign is intake { let mut txn = test.db.txn(); NetworksLatestCosignedBlock::set( &mut txn, global_session, - ExternalNetworkId::Bitcoin, - &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 10), - ); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 1, has_events: HasEvents::NonNotable }, - ); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 2, has_events: HasEvents::NonNotable }, - ); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 3, has_events: HasEvents::NonNotable }, + network, + &signed_cosign(global_session, network, 1), ); txn.commit(); - } - - let mut task = test.into_task(); - TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - verify_db_invariants(&mut test.db, Some((1, 3))); -} -#[tokio::test] -async fn non_notable_with_cosign_returns_some() { - serai_env::init_logger(); - let mut test = EvaluatorTest::default(); - let global_session = test.init_global_session(0); + let mut task = test.into_task(); + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + verify_db_invariants(&mut test.db, Some((0, 1))); + } + // Cosign for a later block doesn't satisfy Notable (requires exact block_number match) { let mut txn = test.db.txn(); NetworksLatestCosignedBlock::set( &mut txn, global_session, - ExternalNetworkId::Bitcoin, - &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 5), + network, + &signed_cosign(global_session, network, 5), ); BlockEvents::send( &mut txn, - &BlockEventData { block_number: 1, has_events: HasEvents::NonNotable }, + &BlockEventData { block_number: 2, has_events: HasEvents::Notable }, ); txn.commit(); - } - let mut task = test.into_task(); - TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - verify_db_invariants(&mut test.db, Some((1, 1))); -} - -#[tokio::test] -async fn non_notable_computes_lowest_common_block() { - serai_env::init_logger(); - let mut test = EvaluatorTest::default(); - - let global_session = { - let sets = vec![ - ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }, - ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }, - ]; - - let mut keys = HashMap::new(); - keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); - keys.insert(ExternalNetworkId::Ethereum, Public([2u8; 32])); - - let mut stakes = HashMap::new(); - stakes.insert(ExternalNetworkId::Bitcoin, 50u64); - stakes.insert(ExternalNetworkId::Ethereum, 50u64); - - let info = GlobalSession { start_block_number: 0, sets, keys, stakes, total_stake: 100u64 }; - - let mut txn = test.db.txn(); - let id = random_global_session(&mut OsRng); - GlobalSessionsChannel::send(&mut txn, &(id, info)); - txn.commit(); - - id - }; + let mut task = test.into_task(); + TaskTest::task_runs_and_fails_with(&mut task, "wasn't yet cosigned").await; + assert!(BlockEvents::peek(&test.db).is_some(), "block events should remain for retry"); + } + // Cosign for an earlier block doesn't satisfy Notable either + // (BlockEvents already had sent block 2 from the branch above + // but the cosign is for block 1) { let mut txn = test.db.txn(); NetworksLatestCosignedBlock::set( &mut txn, global_session, - ExternalNetworkId::Bitcoin, - &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 10), - ); - NetworksLatestCosignedBlock::set( - &mut txn, - global_session, - ExternalNetworkId::Ethereum, - &signed_cosign(global_session, ExternalNetworkId::Ethereum, 5), - ); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 1, has_events: HasEvents::NonNotable }, - ); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 2, has_events: HasEvents::NonNotable }, - ); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 3, has_events: HasEvents::NonNotable }, + network, + &signed_cosign(global_session, network, 1), ); txn.commit(); - } - - let mut task = test.into_task(); - TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - verify_db_invariants(&mut test.db, Some((1, 3))); -} -#[tokio::test] -async fn advances_global_session_at_start_block() { - serai_env::init_logger(); - let mut test = EvaluatorTest::default(); + let mut task = test.into_task(); + TaskTest::task_runs_and_fails_with(&mut task, "wasn't yet cosigned").await; + assert!(BlockEvents::peek(&test.db).is_some(), "block events should remain for retry"); + } - let session1 = [1u8; 32]; + // Each Notable block succeeds when cosigned with its exact block number { - let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - let mut keys = HashMap::new(); - keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); - let mut stakes = HashMap::new(); - stakes.insert(ExternalNetworkId::Bitcoin, 1u64); - let info = - GlobalSession { start_block_number: 0, sets: vec![set], keys, stakes, total_stake: 1u64 }; + for block_number in 2 .. 4 { + let mut txn = test.db.txn(); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + network, + &signed_cosign(global_session, network, block_number), + ); + // (block 2 is already in BlockEvents) + if block_number > 2 { + BlockEvents::send( + &mut txn, + &BlockEventData { block_number, has_events: HasEvents::Notable }, + ); + } + txn.commit(); - let mut txn = test.db.txn(); - GlobalSessionsChannel::send(&mut txn, &(session1, info)); - txn.commit(); + let mut task = test.into_task(); + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + verify_db_invariants(&mut test.db, Some((block_number, block_number))); + } } - let session2 = [2u8; 32]; + // Cosigned Notable block without stakes fails { - let set = ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }; - let mut keys = HashMap::new(); - keys.insert(ExternalNetworkId::Ethereum, Public([2u8; 32])); - let mut stakes = HashMap::new(); - stakes.insert(ExternalNetworkId::Ethereum, 1u64); - let info = - GlobalSession { start_block_number: 3, sets: vec![set], keys, stakes, total_stake: 1u64 }; + let mut test = EvaluatorTest::default(); + let (global_session, network) = test.init_stakeless_global_session(0); let mut txn = test.db.txn(); - GlobalSessionsChannel::send(&mut txn, &(session2, info)); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + network, + &signed_cosign(global_session, network, 1), + ); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 1, has_events: HasEvents::Notable }, + ); txn.commit(); + + let mut task = test.into_task(); + TaskTest::task_runs_and_fails_with(&mut task, "didn't have its stake").await; } + // request_notable_cosigns failure propagates { + let mut test = EvaluatorTest::default(); + test.init_global_session(0); + let mut txn = test.db.txn(); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 2, has_events: HasEvents::No }); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 3, has_events: HasEvents::No }); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 1, has_events: HasEvents::Notable }, + ); txn.commit(); - } - let mut task = test.into_task(); - TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + let (request, calls) = TestRequest::new(true); + let mut task = CosignEvaluatorTask { + db: test.db.clone(), + request, + last_request_for_cosigns: Instant::now() - REQUEST_COSIGNS_SPACING - Duration::from_secs(5), + }; - assert!(BlockEvents::peek(&test.db).is_none(), "BlockEvents should be fully consumed"); - // HasEvents::No blocks are sent through CosignedBlocks with has_events=false - { - let mut txn = test.db.txn(); - for expected in 1 ..= 3 { - let (block_number, _time, has_events) = CosignedBlocks::try_recv(&mut txn) - .unwrap_or_else(|| panic!("expected cosigned block {expected}")); - assert_eq!(block_number, expected); - assert!(!has_events, "HasEvents::No blocks should have has_events=false"); - } - assert!(CosignedBlocks::try_recv(&mut txn).is_none(), "no more blocks expected"); - txn.commit(); + TaskTest::task_runs_and_fails_with(&mut task, "RequestError").await; + assert_eq!(calls.load(Ordering::SeqCst), 1, "request_notable_cosigns should have been called"); } - - let current = - CurrentlyEvaluatedGlobalSession::get(&test.db).expect("should have current session"); - assert_eq!(current.0, session2, "should have transitioned to session 2"); - assert_eq!(current.1.start_block_number, 3, "session 2 should start at block 3"); } -mod errors { - use super::*; - - #[tokio::test] - async fn notable_events_without_cosign() { - serai_env::init_logger(); - let mut test = EvaluatorTest::default(); - test.init_global_session(0); +#[tokio::test] +async fn processes_non_notable_events_when_cosigned() { + let mut test = EvaluatorTest::default(); + let (global_session, network) = test.init_global_session(0); - { - let mut txn = test.db.txn(); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 0, has_events: HasEvents::No }); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 2, has_events: HasEvents::Notable }, - ); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 3, has_events: HasEvents::No }); - txn.commit(); - } + // NonNotable block with no NetworksLatestCosignedBlock set fails + { + let mut txn = test.db.txn(); + BlockEvents::send(&mut txn, &BlockEventData { block_number: 0, has_events: HasEvents::No }); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 1, has_events: HasEvents::NonNotable }, + ); + txn.commit(); let mut task = test.into_task(); TaskTest::task_runs_and_fails_with(&mut task, "wasn't yet cosigned").await; - // On failure, global session was consumed but block events remain - assert!(GlobalSessionsChannel::peek(&test.db).is_none()); - assert!(BlockEvents::peek(&test.db).is_some()); - - { - let mut txn = test.db.txn(); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 2, has_events: HasEvents::Notable }, - ); - txn.commit(); - } + assert!(GlobalSessionsChannel::peek(&test.db).is_none(), "global session should be consumed"); + assert!(BlockEvents::peek(&test.db).is_some(), "block events should remain for retry"); + // Still fails on retry even with enough time elapsed to re-request cosigns let mut task: CosignEvaluatorTask = test.into_task().into(); task.last_request_for_cosigns = Instant::now() - Duration::from_secs(5); - TaskTest::task_runs_and_fails_with(&mut task, "wasn't yet cosigned").await; - assert!(GlobalSessionsChannel::peek(&test.db).is_none()); - assert!(BlockEvents::peek(&test.db).is_some()); + assert!(BlockEvents::peek(&test.db).is_some(), "block events should remain for retry"); } - #[tokio::test] - async fn notable_events_without_stakes() { - serai_env::init_logger(); - let mut test = EvaluatorTest::default(); - - let global_session = { - let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - - let mut keys = HashMap::new(); - keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); - - let stakes = HashMap::new(); - - let info = - GlobalSession { start_block_number: 0, sets: vec![set], keys, stakes, total_stake: 1u64 }; - - let mut txn = test.db.txn(); - let id = random_global_session(&mut OsRng); - GlobalSessionsChannel::send(&mut txn, &(id, info)); - txn.commit(); - - id - }; - - { - let mut txn = test.db.txn(); - NetworksLatestCosignedBlock::set( - &mut txn, - global_session, - ExternalNetworkId::Bitcoin, - &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 1), - ); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 1, has_events: HasEvents::Notable }, - ); - txn.commit(); - } + // Same block succeeds once cosign is present + { + let mut txn = test.db.txn(); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + network, + &signed_cosign(global_session, network, 1), + ); + txn.commit(); let mut task = test.into_task(); - TaskTest::task_runs_and_fails_with(&mut task, "didn't have its stake").await; + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + verify_db_invariants(&mut test.db, Some((0, 1))); } - #[tokio::test] - async fn non_notable_events_without_cosign() { - serai_env::init_logger(); - let mut test = EvaluatorTest::default(); - test.init_global_session(0); - - { - let mut txn = test.db.txn(); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 0, has_events: HasEvents::No }); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 2, has_events: HasEvents::NonNotable }, - ); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 3, has_events: HasEvents::No }); - txn.commit(); - } + // Unlike Notable, a cosign for a later block satisfies NonNotable (uses >=) + { + let mut txn = test.db.txn(); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + network, + &signed_cosign(global_session, network, 5), + ); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 2, has_events: HasEvents::NonNotable }, + ); + txn.commit(); let mut task = test.into_task(); - TaskTest::task_runs_and_fails_with(&mut task, "wasn't yet cosigned").await; - assert!(GlobalSessionsChannel::peek(&test.db).is_none()); - assert!(BlockEvents::peek(&test.db).is_some()); - - { - let mut txn = test.db.txn(); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events: HasEvents::No }); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 2, has_events: HasEvents::NonNotable }, - ); - txn.commit(); - } + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + verify_db_invariants(&mut test.db, Some((2, 2))); + } - let mut task: CosignEvaluatorTask = test.into_task().into(); - task.last_request_for_cosigns = Instant::now() - Duration::from_secs(5); + // Cosign for an earlier block doesn't satisfy NonNotable (uses >=) + { + let mut txn = test.db.txn(); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + network, + &signed_cosign(global_session, network, 1), + ); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 3, has_events: HasEvents::NonNotable }, + ); + txn.commit(); + let mut task = test.into_task(); TaskTest::task_runs_and_fails_with(&mut task, "wasn't yet cosigned").await; + assert!(BlockEvents::peek(&test.db).is_some(), "block events should remain for retry"); } - #[tokio::test] - async fn non_notable_events_without_stakes() { - serai_env::init_logger(); - let mut test = EvaluatorTest::default(); - - let global_session = { - let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - - let mut keys = HashMap::new(); - keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); - - let stakes = HashMap::new(); - - let info = - GlobalSession { start_block_number: 0, sets: vec![set], keys, stakes, total_stake: 1u64 }; - - let mut txn = test.db.txn(); - let id = random_global_session(&mut OsRng); - GlobalSessionsChannel::send(&mut txn, &(id, info)); - txn.commit(); - - id - }; - - { - let mut txn = test.db.txn(); - NetworksLatestCosignedBlock::set( - &mut txn, - global_session, - ExternalNetworkId::Bitcoin, - &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 5), - ); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 1, has_events: HasEvents::NonNotable }, - ); - txn.commit(); - } + // Multiple NonNotable blocks in one run via cached known_cosign + // (block 3 is already in BlockEvents from the failed branch above) + { + let mut txn = test.db.txn(); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + network, + &signed_cosign(global_session, network, 10), + ); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 4, has_events: HasEvents::NonNotable }, + ); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 5, has_events: HasEvents::NonNotable }, + ); + txn.commit(); let mut task = test.into_task(); - TaskTest::task_runs_and_fails_with(&mut task, "didn't have its stake").await; + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + verify_db_invariants(&mut test.db, Some((3, 5))); } - #[tokio::test] - async fn non_notable_cosign_too_low_does_not_add_weight() { - serai_env::init_logger(); + // Cosigned NonNotable block without stakes fails + { let mut test = EvaluatorTest::default(); - let global_session = test.init_global_session(0); + let (global_session, network) = test.init_stakeless_global_session(0); - { - let mut txn = test.db.txn(); - NetworksLatestCosignedBlock::set( - &mut txn, - global_session, - ExternalNetworkId::Bitcoin, - &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 1), - ); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 5, has_events: HasEvents::NonNotable }, - ); - txn.commit(); - } + let mut txn = test.db.txn(); + NetworksLatestCosignedBlock::set( + &mut txn, + global_session, + network, + &signed_cosign(global_session, network, 5), + ); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 1, has_events: HasEvents::NonNotable }, + ); + txn.commit(); let mut task = test.into_task(); - TaskTest::task_runs_and_fails_with(&mut task, "wasn't yet cosigned").await; + TaskTest::task_runs_and_fails_with(&mut task, "didn't have its stake").await; } - #[tokio::test] - async fn request_notable_cosigns_failure() { - serai_env::init_logger(); + // request_notable_cosigns failure propagates + { let mut test = EvaluatorTest::default(); test.init_global_session(0); - { - let mut txn = test.db.txn(); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 1, has_events: HasEvents::Notable }, - ); - txn.commit(); - } + let mut txn = test.db.txn(); + BlockEvents::send( + &mut txn, + &BlockEventData { block_number: 1, has_events: HasEvents::NonNotable }, + ); + txn.commit(); let (request, calls) = TestRequest::new(true); let mut task = CosignEvaluatorTask { @@ -606,59 +485,33 @@ mod errors { TaskTest::task_runs_and_fails_with(&mut task, "RequestError").await; assert_eq!(calls.load(Ordering::SeqCst), 1, "request_notable_cosigns should have been called"); } +} - #[tokio::test] - async fn request_non_notable_cosigns_failure() { - serai_env::init_logger(); - let mut test = EvaluatorTest::default(); - test.init_global_session(0); - - { - let mut txn = test.db.txn(); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 1, has_events: HasEvents::NonNotable }, - ); - txn.commit(); - } - - let (request, calls) = TestRequest::new(true); - let mut task = CosignEvaluatorTask { - db: test.db.clone(), - request, - last_request_for_cosigns: Instant::now() - REQUEST_COSIGNS_SPACING - Duration::from_secs(5), - }; - - TaskTest::task_runs_and_fails_with(&mut task, "RequestError").await; - assert_eq!(calls.load(Ordering::SeqCst), 1, "request_notable_cosigns should have been called"); - } +mod errors { + use super::*; #[tokio::test] #[should_panic(expected = "candidate's start block number ")] async fn panics_when_session_starts_after_block() { - serai_env::init_logger(); let mut test = EvaluatorTest::default(); + let start_block_number: u64 = OsRng.gen_range(2 ..= 100); + test.init_global_session(start_block_number); - { - let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - - let mut keys = HashMap::new(); - keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); - - let mut stakes = HashMap::new(); - stakes.insert(ExternalNetworkId::Bitcoin, 1u64); - - let info = - GlobalSession { start_block_number: 10, sets: vec![set], keys, stakes, total_stake: 1u64 }; - - let mut txn = test.db.txn(); - let id = random_global_session(&mut OsRng); - CurrentlyEvaluatedGlobalSession::set(&mut txn, &(id, info)); - BlockEvents::send(&mut txn, &BlockEventData { block_number: 5, has_events: HasEvents::No }); - txn.commit(); - } + // Move the session from the channel into CurrentlyEvaluatedGlobalSession + let mut txn = test.db.txn(); + let session = GlobalSessionsChannel::try_recv(&mut txn).unwrap(); + CurrentlyEvaluatedGlobalSession::set(&mut txn, &session); + BlockEvents::send( + &mut txn, + &BlockEventData { + block_number: OsRng.gen_range(0 .. start_block_number), + has_events: HasEvents::No, + }, + ); + txn.commit(); let mut task = test.into_task(); + // will panic let _ = task.run_iteration().await; } @@ -667,36 +520,9 @@ mod errors { expected = "currently_evaluated_global_session_strict wasn't called incrementally" )] async fn panics_when_called_non_incrementally() { - serai_env::init_logger(); let mut test = EvaluatorTest::default(); - - { - let set = ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }; - let mut keys = HashMap::new(); - keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); - let mut stakes = HashMap::new(); - stakes.insert(ExternalNetworkId::Bitcoin, 1u64); - let info = - GlobalSession { start_block_number: 0, sets: vec![set], keys, stakes, total_stake: 1u64 }; - - let mut txn = test.db.txn(); - GlobalSessionsChannel::send(&mut txn, &([1u8; 32], info)); - txn.commit(); - } - - { - let set = ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }; - let mut keys = HashMap::new(); - keys.insert(ExternalNetworkId::Ethereum, Public([2u8; 32])); - let mut stakes = HashMap::new(); - stakes.insert(ExternalNetworkId::Ethereum, 1u64); - let info = - GlobalSession { start_block_number: 5, sets: vec![set], keys, stakes, total_stake: 1u64 }; - - let mut txn = test.db.txn(); - GlobalSessionsChannel::send(&mut txn, &([2u8; 32], info)); - txn.commit(); - } + test.init_global_session(0); + test.init_global_session(5); { let mut txn = test.db.txn(); @@ -705,30 +531,41 @@ mod errors { } let mut task = test.into_task(); + // will panic let _ = task.run_iteration().await; } - #[tokio::test] - #[should_panic(expected = "attempt to add with overflow")] - async fn weight_overflow_notable() { - serai_env::init_logger(); + fn setup_weight_overflow(has_events: HasEvents) -> CosignEvaluatorTask { let mut test = EvaluatorTest::default(); + let cosign_block = match has_events { + HasEvents::Notable => 1u64, + HasEvents::NonNotable => 5u64, + HasEvents::No => unreachable!(), + }; - let global_session = { + let overflowing_stake_global_session = { let sets = vec![ ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }, ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }, ]; let mut keys = HashMap::new(); - keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); - keys.insert(ExternalNetworkId::Ethereum, Public([2u8; 32])); + keys.insert(ExternalNetworkId::Bitcoin, random_public(&mut OsRng)); + keys.insert(ExternalNetworkId::Ethereum, random_public(&mut OsRng)); let mut stakes = HashMap::new(); stakes.insert(ExternalNetworkId::Bitcoin, u64::MAX); - stakes.insert(ExternalNetworkId::Ethereum, 1u64); - - let info = GlobalSession { start_block_number: 0, sets, keys, stakes, total_stake: u64::MAX }; + stakes.insert(ExternalNetworkId::Ethereum, OsRng.next_u64()); + + let info = GlobalSession { + start_block_number: 0, + sets, + keys, + stakes, + // total_stake is not important, + // the overflow will panic before it is used + total_stake: u64::MAX, + }; let mut txn = test.db.txn(); let id = random_global_session(&mut OsRng); @@ -738,83 +575,32 @@ mod errors { id }; - { - let mut txn = test.db.txn(); - NetworksLatestCosignedBlock::set( - &mut txn, - global_session, - ExternalNetworkId::Bitcoin, - &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 1), - ); + let mut txn = test.db.txn(); + for network in [ExternalNetworkId::Bitcoin, ExternalNetworkId::Ethereum] { NetworksLatestCosignedBlock::set( &mut txn, - global_session, - ExternalNetworkId::Ethereum, - &signed_cosign(global_session, ExternalNetworkId::Ethereum, 1), + overflowing_stake_global_session, + network, + &signed_cosign(overflowing_stake_global_session, network, cosign_block), ); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 1, has_events: HasEvents::Notable }, - ); - txn.commit(); } + BlockEvents::send(&mut txn, &BlockEventData { block_number: 1, has_events }); + txn.commit(); - let mut task = test.into_task(); - TaskTest::task_runs_and_fails_with(&mut task, "weight_cosigned overflow").await; + test.into_task() } #[tokio::test] #[should_panic(expected = "attempt to add with overflow")] - async fn weight_overflow_non_notable() { - serai_env::init_logger(); - let mut test = EvaluatorTest::default(); - - let global_session = { - let sets = vec![ - ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) }, - ExternalValidatorSet { network: ExternalNetworkId::Ethereum, session: Session(0) }, - ]; - - let mut keys = HashMap::new(); - keys.insert(ExternalNetworkId::Bitcoin, Public([1u8; 32])); - keys.insert(ExternalNetworkId::Ethereum, Public([2u8; 32])); - - let mut stakes = HashMap::new(); - stakes.insert(ExternalNetworkId::Bitcoin, u64::MAX); - stakes.insert(ExternalNetworkId::Ethereum, 1u64); - - let info = GlobalSession { start_block_number: 0, sets, keys, stakes, total_stake: u64::MAX }; - - let mut txn = test.db.txn(); - let id = random_global_session(&mut OsRng); - GlobalSessionsChannel::send(&mut txn, &(id, info)); - txn.commit(); - - id - }; - - { - let mut txn = test.db.txn(); - NetworksLatestCosignedBlock::set( - &mut txn, - global_session, - ExternalNetworkId::Bitcoin, - &signed_cosign(global_session, ExternalNetworkId::Bitcoin, 5), - ); - NetworksLatestCosignedBlock::set( - &mut txn, - global_session, - ExternalNetworkId::Ethereum, - &signed_cosign(global_session, ExternalNetworkId::Ethereum, 5), - ); - BlockEvents::send( - &mut txn, - &BlockEventData { block_number: 1, has_events: HasEvents::NonNotable }, - ); - txn.commit(); - } + async fn panics_on_weight_overflow_notable() { + let mut task = setup_weight_overflow(HasEvents::Notable); + TaskTest::task_runs_and_fails_with(&mut task, "weight_cosigned overflow").await; + } - let mut task = test.into_task(); + #[tokio::test] + #[should_panic(expected = "attempt to add with overflow")] + async fn panics_on_weight_overflow_non_notable() { + let mut task = setup_weight_overflow(HasEvents::NonNotable); TaskTest::task_runs_and_fails_with(&mut task, "weight_cosigned overflow").await; } } diff --git a/coordinator/cosign/src/tests/full_stack.rs b/coordinator/cosign/src/tests/full_stack.rs index d346a3be4..935ceee6b 100644 --- a/coordinator/cosign/src/tests/full_stack.rs +++ b/coordinator/cosign/src/tests/full_stack.rs @@ -5,28 +5,7 @@ //! pipeline. State is injected via the Serai node shim (`serai_shim_rpc`), exercising //! the same `pub` API surface a real coordinator would use. -use std::time::Duration; - -use rand::{Rng, seq::SliceRandom}; -use rand_core::OsRng; -use serai_db::{Db as _, DbTxn, MemDb}; - -use serai_primitives::test_helpers::random_block_hash; -use serai_client_serai::abi::primitives::{ - network_id::ExternalNetworkId, - validator_sets::{ExternalValidatorSet, Session}, -}; -use serai_cosign_types::tests::sign_cosign; - -use crate::{ - Cosign, CosignIntent, Cosigning, Faulted, FaultedSession, GlobalSessions, IntakeCosignError, - SubstrateBlockHash, - delay::LatestCosignedBlockNumber, - evaluator::{cosign_threshold, currently_evaluated_global_session}, - tests::{TestRequest, setup_shim_serai}, -}; - -use super::intend::EventFuzzer; +use crate::{evaluator::*, tests::*, *}; /// Run the honest cosigning loop: drain intents from all keyed sessions, sign them /// with the EventFuzzer's keypairs, intake them, and repeat until `should_break` returns `true`. diff --git a/coordinator/cosign/src/tests/intend.rs b/coordinator/cosign/src/tests/intend.rs index 77ff7ab46..162dbc174 100644 --- a/coordinator/cosign/src/tests/intend.rs +++ b/coordinator/cosign/src/tests/intend.rs @@ -1,68 +1,7 @@ -use std::{collections::HashMap, sync::Arc}; - -use rand_core::{OsRng, RngCore}; - -use serai_db::MemDb; -use serai_task::ContinuallyRan; - -use serai_shim_rpc::SeraiShimRpc; - -use serai_client_serai::{ - Serai, - abi::{ - Event, coins, - primitives::{ - address::SeraiAddress, - balance::{Amount, ExternalBalance}, - coin::ExternalCoin, - crypto::KeyPair, - instructions::{OutInstruction, OutInstructionWithBalance}, - network_id::{ExternalNetworkId, NetworkId}, - validator_sets::{ExternalValidatorSet, KeyShares, Session, ValidatorSet}, - }, - validator_sets, - }, -}; -use serai_primitives::test_helpers::{ - random_external_address, random_external_key, random_keypair, random_serai_address, -}; +use serai_shim_rpc::test_helpers::*; use crate::{intend::*, tests::*, *}; -pub(super) fn set_decided_event( - set: ValidatorSet, - validators: Vec<(SeraiAddress, KeyShares)>, -) -> Event { - Event::ValidatorSets(validator_sets::Event::SetDecided { set, validators }) -} - -pub(super) fn allocation_event(validator: SeraiAddress, network: NetworkId, amount: u64) -> Event { - Event::ValidatorSets(validator_sets::Event::Allocation { - validator, - network, - amount: Amount(amount), - }) -} - -fn deallocation_event(validator: SeraiAddress, network: NetworkId, amount: u64) -> Event { - Event::ValidatorSets(validator_sets::Event::Deallocation { - validator, - network, - amount: Amount(amount), - timeline: serai_abi::primitives::validator_sets::DeallocationTimeline::Immediate, - }) -} - -pub(super) fn burn_with_instruction_event(from: SeraiAddress) -> Event { - Event::Coins(coins::Event::BurnWithInstruction { - from, - instruction: OutInstructionWithBalance { - instruction: OutInstruction::Transfer(random_external_address(&mut OsRng)), - balance: ExternalBalance { coin: ExternalCoin::Bitcoin, amount: Amount(1) }, - }, - }) -} - pub(crate) struct IntendTestStruct { pub(crate) serai: Arc, pub(crate) db: MemDb, @@ -76,15 +15,14 @@ impl IntoTask for IntendTestStruct { } } -/// Create a [`SeraiShimRpc`] and a [`IntendTestStruct`] connected to it. +/// Create a [`SeraiShimRpc`] and a [`IntendTestStruct`] connected to its Serai RPC. async fn setup_mock_test() -> (SeraiShimRpc, IntendTestStruct) { - let (shim_serai, serai) = setup_shim_serai().await; - (shim_serai, IntendTestStruct { serai, db: MemDb::new() }) + let (shim, serai) = setup_shim_serai().await; + (shim, IntendTestStruct { serai, db: MemDb::new() }) } -/// Verify all post-run DB invariants by replaying events from the Serai node. +/// Verify all intend's post-run DB invariants by replaying events from the Serai node. async fn verify_db_invariants(db: &MemDb, serai: &Serai, num_blocks: usize) { - use serai_client_serai::abi::validator_sets::Event as VsEvent; use serai_env::log::debug; let num_blocks_u64 = u64::try_from(num_blocks).unwrap(); @@ -95,8 +33,8 @@ async fn verify_db_invariants(db: &MemDb, serai: &Serai, num_blocks: usize) { assert_eq!( scan_from, Some(num_blocks_u64), - "ScanCosignFrom should be {num_blocks} after processing blocks 0..={n}", - n = num_blocks - 1 + "ScanCosignFrom should be {num_blocks} after processing blocks 0..={}", + num_blocks - 1 ); // Replay events from the shim node to compute expected DB state. @@ -113,22 +51,22 @@ async fn verify_db_invariants(db: &MemDb, serai: &Serai, num_blocks: usize) { let vset = events.validator_sets(); for event in vset.allocation_events() { - let VsEvent::Allocation { validator, network, amount } = event else { continue }; + let Event::Allocation { validator, network, amount } = event else { continue }; let Ok(net) = ExternalNetworkId::try_from(*network) else { continue }; *expected_stakes.entry((net, *validator)).or_default() += amount.0; } for event in vset.deallocation_events() { - let VsEvent::Deallocation { validator, network, amount, .. } = event else { continue }; + let Event::Deallocation { validator, network, amount, .. } = event else { continue }; let Ok(net) = ExternalNetworkId::try_from(*network) else { continue }; *expected_stakes.entry((net, *validator)).or_default() -= amount.0; } for event in vset.set_decided_events() { - let VsEvent::SetDecided { set, validators } = event else { continue }; + let Event::SetDecided { set, validators } = event else { continue }; let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue }; decided_validators.insert(set, validators.iter().map(|(v, _)| *v).collect()); } for event in vset.set_keys_events() { - let VsEvent::SetKeys { set, .. } = event else { continue }; + let Event::SetKeys { set, .. } = event else { continue }; let validators = decided_validators.get(set).cloned().unwrap_or_default(); let mut total_stake = 0u64; for v in &validators { @@ -220,7 +158,7 @@ async fn verify_db_invariants(db: &MemDb, serai: &Serai, num_blocks: usize) { } } - debug!( + serai_env::info!( "DB invariants verified: {} blocks, {} stake entries, {} LatestSets, {} SetKeys events", num_blocks, expected_stakes.len(), @@ -234,12 +172,11 @@ mod errors { #[tokio::test] async fn errors_if_chain_is_not_linear() { - serai_env::init_logger(); - let (serai, task_test) = setup_mock_test().await; + let (shim, task_test) = setup_mock_test().await; - serai.make_block(0, vec![]).await; - serai.make_block(1, vec![]).await; - serai.make_non_linear_block(2, vec![]).await; + shim.make_block(0, vec![]).await; + shim.make_block(1, vec![]).await; + shim.make_non_linear_block(2, vec![]).await; let mut task = task_test.into_task(); TaskTest::task_runs_and_fails_with(&mut task, "doesn't build upon").await; @@ -248,8 +185,8 @@ mod errors { assert_eq!(ScanCosignFrom::get(&task_test.db), Some(2)); // Fix the chain and re-run - serai.remove_block(2).await; - serai.make_block(2, vec![]).await; + shim.remove_block(2).await; + shim.make_block(2, vec![]).await; let mut task = task_test.into_task(); TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; @@ -258,13 +195,23 @@ mod errors { #[tokio::test] async fn errors_if_block_not_found() { - serai_env::init_logger(); - let (serai, task_test) = setup_mock_test().await; + let (shim, task_test) = setup_mock_test().await; + + // No blocks yet, fails because serai.latest_finalized_block_number() defaults to 0 + // even without block 0 existing yet, so it fails when getting block 0 + { + let mut task = task_test.into_task(); + TaskTest::task_runs_and_fails_with( + &mut task, + "couldn't get block which should've been finalized", + ) + .await; + } - serai.make_block(0, vec![]).await; - serai.make_block(1, vec![]).await; - serai.make_block(2, vec![]).await; - serai.set_block_missing(2).await; + shim.make_block(0, vec![]).await; + shim.make_block(1, vec![]).await; + shim.make_block(2, vec![]).await; + shim.set_block_missing(2).await; let mut task = task_test.into_task(); TaskTest::task_runs_and_fails_with( @@ -275,7 +222,7 @@ mod errors { assert_eq!(ScanCosignFrom::get(&task_test.db), Some(2)); - serai.clear_block_missing(2).await; + shim.clear_block_missing(2).await; let mut task = task_test.into_task(); TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; @@ -284,20 +231,19 @@ mod errors { #[tokio::test] async fn handles_rpc_error_on_block_fetch() { - serai_env::init_logger(); - let (serai, task_test) = setup_mock_test().await; + let (shim, task_test) = setup_mock_test().await; - serai.make_block(0, vec![]).await; - serai.make_block(1, vec![]).await; - serai.make_block(2, vec![]).await; - serai.set_block_number_error("blockchain/block", 2, "connection refused").await; + shim.make_block(0, vec![]).await; + shim.make_block(1, vec![]).await; + shim.make_block(2, vec![]).await; + shim.set_block_number_error("blockchain/block", 2, "connection refused").await; let mut task = task_test.into_task(); TaskTest::task_runs_and_fails_with(&mut task, "RPC error fetching block").await; assert_eq!(ScanCosignFrom::get(&task_test.db), Some(2)); - serai.clear_block_number_error("blockchain/block", 2).await; + shim.clear_block_number_error("blockchain/block", 2).await; let mut task = task_test.into_task(); TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; @@ -306,20 +252,19 @@ mod errors { #[tokio::test] async fn handles_rpc_error_on_events_fetch() { - serai_env::init_logger(); - let (serai, task_test) = setup_mock_test().await; + let (shim, task_test) = setup_mock_test().await; - serai.make_block(0, vec![]).await; - serai.make_block(1, vec![]).await; - let block2_hash = serai.make_block(2, vec![]).await; - serai.set_block_hash_error("blockchain/events", block2_hash, "timeout").await; + shim.make_block(0, vec![]).await; + shim.make_block(1, vec![]).await; + let block2_hash = shim.make_block(2, vec![]).await; + shim.set_block_hash_error("blockchain/events", block2_hash, "timeout").await; let mut task = task_test.into_task(); TaskTest::task_runs_and_fails_with(&mut task, "RPC error fetching events").await; assert_eq!(ScanCosignFrom::get(&task_test.db), Some(2)); - serai.clear_block_hash_error("blockchain/events", block2_hash).await; + shim.clear_block_hash_error("blockchain/events", block2_hash).await; let mut task = task_test.into_task(); TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; @@ -329,10 +274,8 @@ mod errors { #[tokio::test] #[should_panic(expected = "validator set from Event::SetDecided was empty")] async fn errors_if_set_decided_has_empty_validators() { - serai_env::init_logger(); - let (serai, task_test) = setup_mock_test().await; - - serai.make_block(0, vec![]).await; + let (shim, task_test) = setup_mock_test().await; + shim.make_block(0, vec![]).await; let empty_set_decided = set_decided_event( ValidatorSet { @@ -341,20 +284,19 @@ mod errors { }, vec![], ); - serai.make_block(1, vec![vec![empty_set_decided]]).await; + shim.make_block(1, vec![vec![empty_set_decided]]).await; let mut task = task_test.into_task(); - task.run_iteration().await; + TaskTest::task_runs_once_and_matches_progress(&mut task, false).await; } #[tokio::test] async fn handles_rpc_error_on_latest_finalized() { - serai_env::init_logger(); - let (serai, task_test) = setup_mock_test().await; + let (shim, task_test) = setup_mock_test().await; - serai.make_block(0, vec![]).await; - serai.make_block(1, vec![]).await; - serai.set_error("blockchain/latest_finalized_block_number", "network error").await; + shim.make_block(0, vec![]).await; + shim.make_block(1, vec![]).await; + shim.set_error("blockchain/latest_finalized_block_number", "network error").await; let mut task = task_test.into_task(); TaskTest::task_runs_and_fails_with(&mut task, "RPC error fetching latest finalized").await; @@ -362,7 +304,7 @@ mod errors { // No blocks processed, error happened before scanning assert_eq!(ScanCosignFrom::get(&task_test.db), None); - serai.clear_error("blockchain/latest_finalized_block_number").await; + shim.clear_error("blockchain/latest_finalized_block_number").await; let mut task = task_test.into_task(); TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; @@ -370,252 +312,18 @@ mod errors { } } -/// Random event, state, and block generator. -pub(super) struct EventFuzzer { - /// Available validator addresses. - pub(super) validators: Vec, - /// All networks. - networks: Vec, - /// Running stake ledger: `(network, validator) -> accumulated_stake`. - stakes: HashMap<(ExternalNetworkId, SeraiAddress), u64>, - /// Sets that have been decided but not yet keyed. - pending_keys: HashMap>, - /// Next session number per network. - pub(super) next_session: HashMap, - /// Keypairs indexed by public key bytes, for signing cosigns. - pub(super) keypairs: HashMap<[u8; 32], schnorrkel::Keypair>, -} - -impl EventFuzzer { - pub(super) fn new() -> Self { - // OsRng.next_u64() % 17 = 0..16 - // _ + 4 = 4..20 validators per test - let num_validators = usize::try_from((OsRng.next_u64() % 17) + 4).unwrap(); - - let validators: Vec = - (0 .. num_validators).map(|_| random_serai_address(&mut OsRng)).collect(); - - let networks: Vec = NetworkId::all().collect(); - - Self { - validators, - networks, - stakes: HashMap::new(), - pending_keys: HashMap::new(), - next_session: HashMap::new(), - keypairs: HashMap::new(), - } - } - - /// Pick a random element from a slice. - fn pick<'a, T>(&mut self, slice: &'a [T]) -> &'a T { - let i = OsRng.next_u64() % u64::try_from(slice.len()).unwrap(); - &slice[usize::try_from(i).unwrap()] - } - - /// Generate a random amount using a weighted distribution - fn random_amount(&mut self) -> u64 { - match OsRng.next_u64() % 20 { - 0 ..= 4 => (OsRng.next_u64() % 10) + 1, - 5 ..= 11 => (OsRng.next_u64() % 990) + 11, - 12 ..= 16 => (OsRng.next_u64() % 99_000) + 1_001, - _ => (OsRng.next_u64() % 9_900_000) + 100_001, - } - } - - /// Generate a random allocation event. - fn random_allocation(&mut self) -> Event { - let validator = *self.pick(&self.validators.clone()); - let network = *self.pick(&self.networks.clone()); - let amount = self.random_amount(); - if let Ok(ext) = ExternalNetworkId::try_from(network) { - *self.stakes.entry((ext, validator)).or_default() += amount; - } - allocation_event(validator, network, amount) - } - - /// Generate a random deallocation event. Returns `None` if no validator has stake. - fn random_deallocation(&mut self) -> Option { - // ~25% chance of generating a Serai deallocation - if OsRng.next_u64() % 4 == 0 { - let validator = *self.pick(&self.validators.clone()); - let amount = self.random_amount(); - return Some(deallocation_event(validator, NetworkId::Serai, amount)); - } - - let candidates: Vec<((ExternalNetworkId, SeraiAddress), u64)> = - self.stakes.iter().filter(|(_, &s)| s > 0).map(|(&k, &v)| (k, v)).collect(); - if candidates.is_empty() { - return None; - } - let i = OsRng.next_u64() % u64::try_from(candidates.len()).unwrap(); - let ((network, validator), current_stake) = candidates[usize::try_from(i).unwrap()]; - // Use weighted amount, clamped to current_stake so we don't underflow - let amount = self.random_amount().min(current_stake).max(1); - *self.stakes.entry((network, validator)).or_default() -= amount; - Some(deallocation_event(validator, NetworkId::External(network), amount)) - } - - /// Generate a random SetDecided event. - /// - /// SetDecided only applies to external networks (Serai sessions are managed by the runtime). - fn random_set_decided(&mut self) -> Option { - let external_networks: Vec = - self.networks.iter().copied().filter_map(|n| ExternalNetworkId::try_from(n).ok()).collect(); - let network = *self.pick(&external_networks); - let session_num = *self.next_session.entry(network).or_insert(0); - let set = ExternalValidatorSet { network, session: Session(session_num) }; - - // Don't double-decide a set that's already pending keys - if self.pending_keys.contains_key(&set) { - return None; - } - - // Pick 1..=min(3, validators.len()) random validators for this set - let max_count = self.validators.len().min(3); - let count = - usize::try_from((OsRng.next_u64() % u64::try_from(max_count).unwrap()) + 1).unwrap(); - - // Shuffle-pick by swapping from a clone - let mut pool = self.validators.clone(); - let mut chosen = Vec::with_capacity(count); - for _ in 0 .. count { - let i = usize::try_from(OsRng.next_u64() % u64::try_from(pool.len()).unwrap()).unwrap(); - chosen.push(pool.swap_remove(i)); - } - - self.pending_keys.insert(set, chosen.clone()); - - let validators_with_shares: Vec<(SeraiAddress, KeyShares)> = - chosen.into_iter().map(|v| (v, KeyShares::ONE)).collect(); - - Some(set_decided_event( - ValidatorSet { network: NetworkId::External(network), session: Session(session_num) }, - validators_with_shares, - )) - } - - /// Generate a random SetKeys event for a pending (decided but not yet keyed) set. - fn random_set_keys(&mut self) -> Option { - if self.pending_keys.is_empty() { - return None; - } - - let keys: Vec = self.pending_keys.keys().copied().collect(); - let i = usize::try_from(OsRng.next_u64() % u64::try_from(keys.len()).unwrap()).unwrap(); - let set = keys[i]; - // Remove from pending - the task will Validators::take it - self.pending_keys.remove(&set); - - // Advance session for this network so the next SetDecided gets session+1 - *self.next_session.entry(set.network).or_insert(0) += 1; - - let (keypair, public) = random_keypair(&mut OsRng); - self.keypairs.insert(public.0, keypair); - let external_key = random_external_key(&mut OsRng); - let key_pair = KeyPair(public, external_key); - - Some(Event::ValidatorSets(validator_sets::Event::SetKeys { set, key_pair })) - } - - /// Generate a random BurnWithInstruction event. - fn random_burn(&mut self) -> Event { - let mut burn_address = SeraiAddress([0u8; 32]); - burn_address.0[0 .. 8].copy_from_slice(&OsRng.next_u64().to_le_bytes()); - burn_with_instruction_event(burn_address) - } - - /// Generate random events for a single block. - fn generate_block_events(&mut self) -> Vec> { - let num_events = OsRng.next_u64() % 8; // 0..=7 events per block - if num_events == 0 { - return vec![]; - } - - let mut alloc_count = 0u64; - let mut dealloc_count = 0u64; - let mut set_decided_count = 0u64; - let mut set_keys_count = 0u64; - let mut burn_count = 0u64; - - for _ in 0 .. num_events { - match OsRng.next_u64() % 100 { - 0 ..= 35 => alloc_count += 1, - 36 ..= 55 => dealloc_count += 1, - 56 ..= 70 => set_decided_count += 1, - 71 ..= 85 => set_keys_count += 1, - 86 ..= 99 => burn_count += 1, - _ => unreachable!(), - } - } - - let mut events = Vec::new(); - - // Update the stakes - for _ in 0 .. alloc_count { - events.push(self.random_allocation()); - } - for _ in 0 .. dealloc_count { - if let Some(e) = self.random_deallocation() { - events.push(e); - } - } - - // Handle decided sets - for _ in 0 .. set_decided_count { - if let Some(e) = self.random_set_decided() { - events.push(e); - } - } - - // Handle declarations of the latest set - for _ in 0 .. set_keys_count { - if let Some(event) = self.random_set_keys() { - events.push(event); - } - } - - // Handle burn with instruction events (makes block non-notable if not already notable) - for _ in 0 .. burn_count { - events.push(self.random_burn()); - } - - // Shuffle the events to test order-independence - for i in (1 .. events.len()).rev() { - let j = usize::try_from(OsRng.next_u64() % u64::try_from(i + 1).unwrap()).unwrap(); - events.swap(i, j); - } - - if events.is_empty() { - vec![] - } else { - vec![events] - } - } - - /// Generate multiple blocks of random events. - pub(super) fn generate_blocks(&mut self, count: usize) -> Vec>> { - let mut blocks = Vec::with_capacity(count); - for _ in 0 .. count { - blocks.push(self.generate_block_events()); - } - blocks - } -} - #[tokio::test] async fn deallocating_zero_is_a_noop() { - serai_env::init_logger(); - let (serai, task_test) = setup_mock_test().await; + let (shim, task_test) = setup_mock_test().await; let validator = random_serai_address(&mut OsRng); let network = NetworkId::External(ExternalNetworkId::Bitcoin); { // Block 0: allocate 0 stake to the validator - serai.make_block(0, vec![vec![allocation_event(validator, network, 0)]]).await; + shim.make_block(0, vec![vec![allocation_event(validator, network, 0)]]).await; // Block 1: deallocate 0 from the same validator - serai.make_block(1, vec![vec![deallocation_event(validator, network, 0)]]).await; + shim.make_block(1, vec![vec![deallocation_event(validator, network, 0)]]).await; let mut task = task_test.into_task(); TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; @@ -628,9 +336,9 @@ async fn deallocating_zero_is_a_noop() { { let amount = OsRng.next_u64(); // Block 2: allocate stake to the validator - serai.make_block(2, vec![vec![allocation_event(validator, network, amount)]]).await; + shim.make_block(2, vec![vec![allocation_event(validator, network, amount)]]).await; // Block 3: deallocate 0 from the same validator - serai.make_block(3, vec![vec![deallocation_event(validator, network, 0)]]).await; + shim.make_block(3, vec![vec![deallocation_event(validator, network, 0)]]).await; let mut task = task_test.into_task(); TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; @@ -652,9 +360,9 @@ async fn fuzzed_event_processing() { serai_env::log::info!("Fuzz test: {} blocks, {} validators", num_blocks, fuzzer.validators.len(),); - let (serai, task_test) = setup_mock_test().await; + let (shim, task_test) = setup_mock_test().await; for (i, events) in blocks.into_iter().enumerate() { - serai.make_block(u64::try_from(i).unwrap(), events).await; + shim.make_block(u64::try_from(i).unwrap(), events).await; } let mut task = task_test.into_task(); diff --git a/coordinator/cosign/src/tests/mod.rs b/coordinator/cosign/src/tests/mod.rs index ba2cd7f03..633ed3483 100644 --- a/coordinator/cosign/src/tests/mod.rs +++ b/coordinator/cosign/src/tests/mod.rs @@ -13,24 +13,40 @@ mod cosigning; #[cfg(test)] mod full_stack; -use std::sync::{ - Arc, - atomic::{AtomicUsize, Ordering}, +pub use std::{ + collections::HashMap, + sync::{ + Arc, + atomic::{AtomicBool, AtomicUsize, Ordering}, + }, + time::{Duration, Instant}, }; -use rand::{CryptoRng, Rng, RngCore}; +pub use borsh::{BorshDeserialize, BorshSerialize}; +pub use rand::{CryptoRng, Rng, RngCore, seq::SliceRandom}; +pub use rand_core::OsRng; -use serai_shim_rpc::{SeraiShimRpc, ShimState}; -use serai_client_serai::{ +pub use serai_db::{Db, DbTxn, MemDb}; +pub use serai_shim_rpc::{*, event_fuzzer::*}; +pub use serai_abi::validator_sets::Event; +pub use serai_client_serai::{ Serai, abi::primitives::{ - network_id::ExternalNetworkId, - validator_sets::{ExternalValidatorSet, Session}, + address::SeraiAddress, balance::*, coin::*, crypto::*, instructions::*, network_id::*, + validator_sets::*, }, }; -pub(crate) use serai_task::test_helpers::{IntoTask, TaskTest}; +pub use serai_task::{ + ContinuallyRan, Task, + test_helpers::{IntoTask, TaskTest}, +}; +pub use serai_primitives::test_helpers::*; +pub use serai_cosign_types::{ + SignedCosign, + tests::{sign_cosign, random_cosign, random_cosign_intent}, +}; -use crate::RequestNotableCosigns; +use crate::{GlobalSession, RequestNotableCosigns}; #[derive(Clone)] pub(crate) struct TestRequest { @@ -69,13 +85,13 @@ impl RequestNotableCosigns for TestRequest { } /// Create a [`SeraiShimRpc`] and a [`Arc`] to use it. -async fn setup_shim_serai() -> (SeraiShimRpc, Arc) { +pub(crate) async fn setup_shim_serai() -> (SeraiShimRpc, Arc) { let shim_serai = SeraiShimRpc::start(ShimState::default()).await; let serai = Arc::new(Serai::new(shim_serai.url()).unwrap()); (shim_serai, serai) } -pub(crate) use serai_cosign_types::tests::random_global_session; +pub use serai_cosign_types::tests::random_external_network_id; /// For whe external validator set does not alter or affect the behavior of the functions being tested /// this can be used just as a default value any time @@ -83,11 +99,19 @@ pub(crate) fn default_test_validator_set() -> ExternalValidatorSet { ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) } } pub(crate) fn random_validator_set(rng: &mut R) -> ExternalValidatorSet { - let network = match rng.gen_range(0u8 ..= 2) { - 0 => ExternalNetworkId::Bitcoin, - 1 => ExternalNetworkId::Ethereum, - 2 => ExternalNetworkId::Monero, - _ => unreachable!(), - }; - ExternalValidatorSet { network, session: Session(rng.gen()) } + ExternalValidatorSet { network: random_external_network_id(rng), session: Session(rng.gen()) } +} + +/// Build a single-network [`GlobalSession`] from the given components. +pub(crate) fn build_global_session( + set: ExternalValidatorSet, + public: Public, + stake: u64, + start_block_number: u64, +) -> GlobalSession { + let mut keys = HashMap::new(); + keys.insert(set.network, public); + let mut stakes = HashMap::new(); + stakes.insert(set.network, stake); + GlobalSession { start_block_number, sets: vec![set], keys, stakes, total_stake: stake } } diff --git a/coordinator/cosign/types/src/tests/mod.rs b/coordinator/cosign/types/src/tests/mod.rs index 54cd6f22c..1e7a19877 100644 --- a/coordinator/cosign/types/src/tests/mod.rs +++ b/coordinator/cosign/types/src/tests/mod.rs @@ -9,38 +9,34 @@ pub fn sign_cosign(cosign: Cosign, keypair: &schnorrkel::Keypair) -> SignedCosig } #[cfg(test)] -use rand_core::{OsRng, RngCore}; -#[cfg(test)] -use serai_primitives::test_helpers::{random_block_hash, random_keypair}; +use rand_core::OsRng; +#[cfg(any(test, feature = "test-helpers"))] +use rand_core::RngCore; +#[cfg(any(test, feature = "test-helpers"))] +pub use serai_primitives::test_helpers::random_global_session; +#[cfg(any(test, feature = "test-helpers"))] +use serai_primitives::test_helpers::random_block_hash; #[cfg(test)] -use crate::{CosignIntent, ExternalNetworkId, Public}; - -/// Generate a random 32-byte array for testing. +use serai_primitives::test_helpers::random_keypair; #[cfg(any(test, feature = "test-helpers"))] -pub fn random_bytes_32(rng: &mut (impl rand_core::RngCore + rand_core::CryptoRng)) -> [u8; 32] { - let mut bytes = [0u8; 32]; - rng.fill_bytes(&mut bytes); - bytes -} - -/// Generate a random global session ID for testing. +use crate::ExternalNetworkId; #[cfg(any(test, feature = "test-helpers"))] -pub fn random_global_session( - rng: &mut (impl rand_core::RngCore + rand_core::CryptoRng), -) -> [u8; 32] { - random_bytes_32(rng) -} - +use crate::CosignIntent; #[cfg(test)] -fn random_external_network_id( +use crate::Public; + +/// Generate a random [`ExternalNetworkId`] for testing. +#[cfg(any(test, feature = "test-helpers"))] +pub fn random_external_network_id( rng: &mut (impl RngCore + rand_core::CryptoRng), ) -> ExternalNetworkId { let all: Vec<_> = ExternalNetworkId::all().collect(); all[(rng.next_u32() as usize) % all.len()] } -#[cfg(test)] -fn random_cosign(rng: &mut (impl RngCore + rand_core::CryptoRng)) -> Cosign { +/// Generate a random [`Cosign`] for testing. +#[cfg(any(test, feature = "test-helpers"))] +pub fn random_cosign(rng: &mut (impl RngCore + rand_core::CryptoRng)) -> Cosign { Cosign { global_session: random_global_session(rng), block_number: rng.next_u64(), @@ -49,8 +45,9 @@ fn random_cosign(rng: &mut (impl RngCore + rand_core::CryptoRng)) -> Cosign { } } -#[cfg(test)] -fn random_cosign_intent(rng: &mut (impl RngCore + rand_core::CryptoRng)) -> CosignIntent { +/// Generate a random [`CosignIntent`] for testing. +#[cfg(any(test, feature = "test-helpers"))] +pub fn random_cosign_intent(rng: &mut (impl RngCore + rand_core::CryptoRng)) -> CosignIntent { CosignIntent { global_session: random_global_session(rng), block_number: rng.next_u64(), diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 87376d5b3..83412a1a3 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -94,7 +94,7 @@ fn spawn_cosigning( loop { // Intake our own cosigns match Cosigning::::latest_cosigned_block_number(&db) { - Ok(Some(latest_acknowledged_block)) => { + Ok(Some(latest_cosigned_block_number)) => { let mut txn = db.txn(); // The cosigns we prior tried to intake yet failed to let mut cosigns = ErroneousCosigns::get(&txn).unwrap_or(vec![]); @@ -106,7 +106,7 @@ fn spawn_cosigning( let mut erroneous = vec![]; for cosign in cosigns { // If this cosign is stale, move on - if cosign.cosign.block_number <= latest_acknowledged_block { + if cosign.cosign.block_number <= latest_cosigned_block_number { continue; } diff --git a/substrate/primitives/src/test_helpers.rs b/substrate/primitives/src/test_helpers.rs index 617c0cb85..1ec5b1f1c 100644 --- a/substrate/primitives/src/test_helpers.rs +++ b/substrate/primitives/src/test_helpers.rs @@ -2,27 +2,39 @@ use rand_core::{RngCore, CryptoRng}; -use crate::{BlockHash, address::{SeraiAddress, ExternalAddress}, crypto::{Public, ExternalKey}}; +use crate::{ + BlockHash, + address::{SeraiAddress, ExternalAddress}, + crypto::{Public, ExternalKey}, +}; + +/// Generate a random 32-byte array. +pub fn random_bytes_32(rng: &mut R) -> [u8; 32] { + let mut bytes = [0u8; 32]; + rng.fill_bytes(&mut bytes); + bytes +} + +/// Generate a random 64-byte array. +pub fn random_bytes_64(rng: &mut R) -> [u8; 64] { + let mut bytes = [0u8; 64]; + rng.fill_bytes(&mut bytes); + bytes +} /// Generate a random [`ExternalAddress`]. pub fn random_external_address(rng: &mut R) -> ExternalAddress { - let mut key = [0; 32]; - rng.fill_bytes(&mut key); - ExternalAddress::try_from(key.to_vec()).unwrap() + ExternalAddress::try_from(random_bytes_32(rng).to_vec()).unwrap() } /// Generate a random [`SeraiAddress`]. pub fn random_serai_address(rng: &mut R) -> SeraiAddress { - let mut key = [0; 32]; - rng.fill_bytes(&mut key); - SeraiAddress(key) + SeraiAddress(random_bytes_32(rng)) } /// Generate a random [`Public`]. pub fn random_public(rng: &mut R) -> Public { - let mut key = [0; 32]; - rng.fill_bytes(&mut key); - Public(key) + Public(random_bytes_32(rng)) } /// Generate a random schnorrkel keypair and its [`Public`] wrapper. @@ -34,14 +46,15 @@ pub fn random_keypair(rng: &mut R) -> (schnorrkel::Keypa /// Generate a random [`ExternalKey`]. pub fn random_external_key(rng: &mut R) -> ExternalKey { - let mut key = [0; 32]; - rng.fill_bytes(&mut key); - ExternalKey(key.to_vec().try_into().unwrap()) + ExternalKey(random_bytes_32(rng).to_vec().try_into().unwrap()) } /// Generate a random [`BlockHash`]. pub fn random_block_hash(rng: &mut R) -> BlockHash { - let mut hash = [0; 32]; - rng.fill_bytes(&mut hash); - BlockHash(hash) + BlockHash(random_bytes_32(rng)) +} + +/// Generate a random global session ID (`[u8; 32]`). +pub fn random_global_session(rng: &mut R) -> [u8; 32] { + random_bytes_32(rng) } diff --git a/tests/shim-rpc/Cargo.toml b/tests/shim-rpc/Cargo.toml index fe263efe0..a7f640883 100644 --- a/tests/shim-rpc/Cargo.toml +++ b/tests/shim-rpc/Cargo.toml @@ -6,7 +6,7 @@ license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/tests/shim-rpc" authors = ["Luke Parker ", "rafael_xmr "] edition = "2021" -rust-version = "1.85" +rust-version = "1.91" publish = false [package.metadata.docs.rs] @@ -16,6 +16,9 @@ rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true +[features] +test-helpers = ["schnorrkel", "serai-primitives"] + [dependencies] jsonrpsee = { version = "0.24", default-features = false, features = ["server"] } tokio = { version = "1", default-features = false } @@ -29,5 +32,11 @@ blake2 = { version = "0.11.0-rc.0", default-features = false } serde = { version = "1", default-features = false } sp-core = { git = "https://github.com/serai-dex/patch-polkadot-sdk" } +schnorrkel = { version = "0.11", default-features = false, features = ["std"], optional = true } +serai-primitives = { path = "../../substrate/primitives", features = ["test-helpers"], optional = true } + [dev-dependencies] +rand = { version = "0.8", default-features = false } +schnorrkel = { version = "0.11", default-features = false, features = ["std"] } +serai-primitives = { path = "../../substrate/primitives", features = ["test-helpers"] } serai-client-serai = { path = "../../substrate/client/serai" } diff --git a/tests/shim-rpc/src/builder.rs b/tests/shim-rpc/src/builder.rs index 1ea285957..53fde3783 100644 --- a/tests/shim-rpc/src/builder.rs +++ b/tests/shim-rpc/src/builder.rs @@ -28,14 +28,13 @@ impl SeraiShimRpcBuilder { /// Build and start the shim RPC node. pub async fn build(self) -> SeraiShimRpc { - let mut sim_state = ShimState::default(); + let mut shim_state = ShimState::default(); for (i, events) in self.blocks.into_iter().enumerate() { - #[expect(clippy::as_conversions)] - let number = (i as u64) + 1; - sim_state.make_block(number, events); + let number = u64::try_from(i).unwrap() + 1; + shim_state.make_block(number, events); } - SeraiShimRpc::start(sim_state).await + SeraiShimRpc::start(shim_state).await } } diff --git a/tests/shim-rpc/src/event_fuzzer.rs b/tests/shim-rpc/src/event_fuzzer.rs new file mode 100644 index 000000000..0a3212586 --- /dev/null +++ b/tests/shim-rpc/src/event_fuzzer.rs @@ -0,0 +1,248 @@ +//! Random event, state, and block generator for fuzz testing. + +use std::collections::HashMap; + +use rand_core::{OsRng, RngCore}; + +use serai_abi::{ + Event, validator_sets, + primitives::{ + address::SeraiAddress, + crypto::KeyPair, + network_id::{ExternalNetworkId, NetworkId}, + validator_sets::{ExternalValidatorSet, KeyShares, Session, ValidatorSet}, + }, +}; +use serai_primitives::test_helpers::{ + random_external_address, random_external_key, random_keypair, random_serai_address, +}; + +use crate::test_helpers::*; + +/// Random event, state, and block generator. +pub struct EventFuzzer { + /// Available validator addresses. + pub validators: Vec, + /// All networks. + networks: Vec, + /// Running stake ledger: `(network, validator) -> accumulated_stake`. + stakes: HashMap<(ExternalNetworkId, SeraiAddress), u64>, + /// Sets that have been decided but have not yet set their keys. + pending_keys: HashMap>, + /// Next session number per network. + pub next_session: HashMap, + /// Keypairs indexed by public key bytes, for signing cosigns. + pub keypairs: HashMap<[u8; 32], schnorrkel::Keypair>, +} + +impl EventFuzzer { + pub fn new() -> Self { + // OsRng.next_u64() % 17 = 0..16, + 4 means from 4..20 validators per test + let num_validators = usize::try_from((OsRng.next_u64() % 17) + 4).unwrap(); + + let validators: Vec = + (0 .. num_validators).map(|_| random_serai_address(&mut OsRng)).collect(); + + let networks: Vec = NetworkId::all().collect(); + + Self { + validators, + networks, + stakes: HashMap::new(), + pending_keys: HashMap::new(), + next_session: HashMap::new(), + keypairs: HashMap::new(), + } + } + + /// Pick a random element from a slice. + fn pick<'a, T>(&mut self, slice: &'a [T]) -> &'a T { + let i = OsRng.next_u64() % u64::try_from(slice.len()).unwrap(); + &slice[usize::try_from(i).unwrap()] + } + + /// Generate a random amount using a weighted distribution. + fn random_amount(&mut self) -> u64 { + match OsRng.next_u64() % 100 { + 0 ..= 24 => (OsRng.next_u64() % 10) + 1, + 25 ..= 59 => (OsRng.next_u64() % 990) + 11, + 60 ..= 84 => (OsRng.next_u64() % 99_000) + 1_001, + _ => (OsRng.next_u64() % 9_900_000) + 100_001, + } + } + + /// Generate a random allocation event. + fn random_allocation(&mut self) -> Event { + let validator = *self.pick(&self.validators.clone()); + let network = *self.pick(&self.networks.clone()); + let amount = self.random_amount(); + if let Ok(ext) = ExternalNetworkId::try_from(network) { + *self.stakes.entry((ext, validator)).or_default() += amount; + } + allocation_event(validator, network, amount) + } + + /// Generate a random deallocation event. Returns `None` if no validator has stake. + fn random_deallocation(&mut self) -> Option { + // ~25% chance of generating a Serai deallocation + if OsRng.next_u64() % 4 == 0 { + let validator = *self.pick(&self.validators.clone()); + let amount = self.random_amount(); + return Some(deallocation_event(validator, NetworkId::Serai, amount)); + } + + let candidates: Vec<((ExternalNetworkId, SeraiAddress), u64)> = self + .stakes + .iter() + .filter(|(_v, &stake)| stake > 0) + .map(|(&validator, &stake)| (validator, stake)) + .collect(); + if candidates.is_empty() { + return None; + } + let &((network, validator), current_stake) = self.pick(&candidates); + // Use weighted amount, clamped to current_stake so we don't underflow + let amount = self.random_amount().min(current_stake).max(1); + *self.stakes.entry((network, validator)).or_default() -= amount; + Some(deallocation_event(validator, NetworkId::External(network), amount)) + } + + /// Generate a random SetDecided event. + fn random_set_decided(&mut self) -> Option { + let external_networks: Vec = + self.networks.iter().copied().filter_map(|n| ExternalNetworkId::try_from(n).ok()).collect(); + let network = *self.pick(&external_networks); + let session_num = *self.next_session.entry(network).or_insert(0); + let set = ExternalValidatorSet { network, session: Session(session_num) }; + + // Don't double-decide a set that's already pending keys + if self.pending_keys.contains_key(&set) { + return None; + } + + // Pick 1..=min(3, validators.len()) random validators for this set + let max_count = self.validators.len().min(3); + let count = + usize::try_from((OsRng.next_u64() % u64::try_from(max_count).unwrap()) + 1).unwrap(); + + // Shuffle-pick by swapping from a clone + let mut pool = self.validators.clone(); + let mut chosen = Vec::with_capacity(count); + for _ in 0 .. count { + let i = usize::try_from(OsRng.next_u64() % u64::try_from(pool.len()).unwrap()).unwrap(); + chosen.push(pool.swap_remove(i)); + } + + self.pending_keys.insert(set, chosen.clone()); + + let validators_with_shares: Vec<(SeraiAddress, KeyShares)> = + chosen.into_iter().map(|v| (v, KeyShares::ONE)).collect(); + + Some(set_decided_event( + ValidatorSet { network: NetworkId::External(network), session: Session(session_num) }, + validators_with_shares, + )) + } + + /// Generate a random SetKeys event for a pending (decided but not yet keyed) set. + fn random_set_keys(&mut self) -> Option { + if self.pending_keys.is_empty() { + return None; + } + + let keys: Vec = self.pending_keys.keys().copied().collect(); + let i = usize::try_from(OsRng.next_u64() % u64::try_from(keys.len()).unwrap()).unwrap(); + let set = keys[i]; + // Remove from pending + self.pending_keys.remove(&set); + + // Advance session for this network so the next SetDecided gets session+1 + *self.next_session.entry(set.network).or_insert(0) += 1; + + let (keypair, public) = random_keypair(&mut OsRng); + self.keypairs.insert(public.0, keypair); + let external_key = random_external_key(&mut OsRng); + let key_pair = KeyPair(public, external_key); + + Some(Event::ValidatorSets(validator_sets::Event::SetKeys { set, key_pair })) + } + + /// Generate a random BurnWithInstruction event. + fn random_burn(&mut self) -> Event { + burn_with_instruction_event( + random_serai_address(&mut OsRng), + random_external_address(&mut OsRng), + self.random_amount(), + ) + } + + /// Generate random events for a single block. + fn generate_block_events(&mut self) -> Vec> { + let num_events = OsRng.next_u64() % 8; // 0..=7 events per block + if num_events == 0 { + return vec![]; + } + + let mut alloc_count = 0u64; + let mut dealloc_count = 0u64; + let mut set_decided_count = 0u64; + let mut set_keys_count = 0u64; + let mut burn_count = 0u64; + + for _ in 0 .. num_events { + match OsRng.next_u64() % 100 { + 0 ..= 35 => alloc_count += 1, + 36 ..= 55 => dealloc_count += 1, + 56 ..= 70 => set_decided_count += 1, + 71 ..= 85 => set_keys_count += 1, + 86 ..= 99 => burn_count += 1, + _ => unreachable!(), + } + } + + let mut events = Vec::new(); + + for _ in 0 .. alloc_count { + events.push(self.random_allocation()); + } + for _ in 0 .. dealloc_count { + if let Some(e) = self.random_deallocation() { + events.push(e); + } + } + for _ in 0 .. set_decided_count { + if let Some(e) = self.random_set_decided() { + events.push(e); + } + } + for _ in 0 .. set_keys_count { + if let Some(event) = self.random_set_keys() { + events.push(event); + } + } + for _ in 0 .. burn_count { + events.push(self.random_burn()); + } + + // Shuffle the events to test order-independence + for i in (1 .. events.len()).rev() { + let j = usize::try_from(OsRng.next_u64() % u64::try_from(i + 1).unwrap()).unwrap(); + events.swap(i, j); + } + + if events.is_empty() { + vec![] + } else { + vec![events] + } + } + + /// Generate multiple blocks of random events. + pub fn generate_blocks(&mut self, count: usize) -> Vec>> { + let mut blocks = Vec::with_capacity(count); + for _ in 0 .. count { + blocks.push(self.generate_block_events()); + } + blocks + } +} diff --git a/tests/shim-rpc/src/lib.rs b/tests/shim-rpc/src/lib.rs index 0f86b6a2f..a824a9ed4 100644 --- a/tests/shim-rpc/src/lib.rs +++ b/tests/shim-rpc/src/lib.rs @@ -3,13 +3,20 @@ pub mod state; pub mod rpc; pub mod builder; +pub mod test_helpers; + +#[cfg(any(test, feature = "test-helpers"))] +pub mod event_fuzzer; pub use state::*; pub use builder::SeraiShimRpcBuilder; -use std::net::SocketAddr; +use core::mem; +use std::{env, net::SocketAddr, sync::Arc}; + +use jsonrpsee::server::ServerBuilder; +use tokio::sync::RwLock; -use jsonrpsee::server::ServerHandle; use serai_abi::{ primitives::{BlockHash, merkle::IncrementalUnbalancedMerkleTree}, Event, @@ -20,10 +27,12 @@ use serai_abi::{ pub struct SeraiShimRpc { url: String, state: SharedState, - _handle: ServerHandle, } impl SeraiShimRpc { + /// The block number of the first block in the chain. + pub const STARTING_BLOCK_NUMBER: u64 = 0; + /// Create a builder for configuring and starting a shim RPC node. pub fn builder() -> SeraiShimRpcBuilder { SeraiShimRpcBuilder::new() @@ -31,18 +40,19 @@ impl SeraiShimRpc { /// Start a shim RPC node with the given initial state, binding to an ephemeral port. pub async fn start(initial_state: ShimState) -> Self { - let state = std::sync::Arc::new(tokio::sync::RwLock::new(initial_state)); + let state = Arc::new(RwLock::new(initial_state)); let rpc_module = rpc::build_rpc_module(state.clone()).expect("failed to build RPC module"); - let server = jsonrpsee::server::ServerBuilder::default() + let server = ServerBuilder::default() .build(SocketAddr::from(([127, 0, 0, 1], 0))) .await .expect("failed to bind shim RPC node server"); let addr = server.local_addr().expect("server should have a local address"); let handle = server.start(rpc_module); + mem::forget(handle); - Self { url: format!("http://{addr}"), state, _handle: handle } + Self { url: format!("http://{addr}"), state } } /// The HTTP URL this shim is listening on. @@ -62,7 +72,7 @@ impl SeraiShimRpc { pub async fn add_block_with_events(&self, events: Vec>) -> BlockHash { let mut state = self.state.write().await; let Some(latest_block) = state.blocks_by_number.keys().copied().max() else { - return state.make_block(0, events); + return state.make_block(Self::STARTING_BLOCK_NUMBER, events); }; let number = latest_block + 1; state.make_block(number, events) @@ -145,9 +155,9 @@ impl SeraiShimRpc { /// Set the probability (0–100) that any RPC request randomly fails. /// /// 0 disables fuzzing (the default), 100 fails every request. - /// If the `RUST_TEST_NO_RPC_FUZZ` env var is set, the rate is forced to 0. + /// If the `SERAI_SHIM_RPC_NO_ERROR` env var is set, the rate is forced to 0. pub async fn set_failure_rate(&self, percent: u8) { - let effective = if std::env::var("RUST_TEST_NO_RPC_FUZZ").is_ok() { 0 } else { percent }; + let effective = if env::var("SERAI_SHIM_RPC_NO_ERROR").is_ok() { 0 } else { percent }; self.state.write().await.errors.failure_rate = effective; } diff --git a/tests/shim-rpc/src/rpc.rs b/tests/shim-rpc/src/rpc.rs index b261a0cab..ef18d9a8c 100644 --- a/tests/shim-rpc/src/rpc.rs +++ b/tests/shim-rpc/src/rpc.rs @@ -1,15 +1,19 @@ -use jsonrpsee::{types::error::ErrorObjectOwned, RpcModule}; +use jsonrpsee::{ + RpcModule, + types::{error::ErrorObjectOwned, params::Params}, +}; +use serde::Deserialize; use serai_abi::{ Event, primitives::{ BlockHash, network_id::{ExternalNetworkId, NetworkId}, - validator_sets::{ExternalValidatorSet, Session}, + validator_sets::{ExternalValidatorSet, Session, ValidatorSet}, }, }; -use crate::state::SharedState; +use crate::state::{SharedState, ShimState}; /// Typed RPC errors mirroring `substrate/node/src/rpc/utils.rs`. enum Error { @@ -44,15 +48,12 @@ impl From for ErrorObjectOwned { /// Mirrors `substrate/node/src/rpc/utils.rs`: /// - `{ "block": "hex_hash" }` = lookup by hash /// - `{ "block": 123 }` = lookup by number -fn resolve_block_hash( - params: &jsonrpsee::types::params::Params, - state: &crate::state::ShimState, -) -> Result, Error> { - #[derive(sp_core::serde::Deserialize)] +fn resolve_block_hash(params: &Params, state: &ShimState) -> Result, Error> { + #[derive(Deserialize)] struct BlockByHash { block: String, } - #[derive(sp_core::serde::Deserialize)] + #[derive(Deserialize)] struct BlockByNumber { block: u64, } @@ -86,8 +87,8 @@ fn network_from_str(network: &str) -> Result { }) } -fn parse_network(params: &jsonrpsee::types::params::Params) -> Result { - #[derive(sp_core::serde::Deserialize)] +fn parse_network(params: &Params) -> Result { + #[derive(Deserialize)] struct Network { network: String, } @@ -96,8 +97,8 @@ fn parse_network(params: &jsonrpsee::types::params::Params) -> Result Result { - #[derive(sp_core::serde::Deserialize)] +fn parse_set(params: &Params) -> Result { + #[derive(Deserialize)] struct Set { network: String, session: u32, @@ -106,11 +107,9 @@ fn parse_set(params: &jsonrpsee::types::params::Params) -> Result>) -> BlockHash { let block = Block { header: Header::V1(HeaderV1 { number, builds_upon: self.builds_upon.clone().calculate(BLOCK_BRANCH_TAG), proposer: SeraiAddress([0; 32]), - #[expect(clippy::cast_possible_truncation, clippy::as_conversions)] - unix_time_in_millis: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis() - as u64, + unix_time_in_millis: u64::try_from( + SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis(), + ) + .unwrap(), transactions_commitment: UnbalancedMerkleTree::EMPTY, events_commitment: UnbalancedMerkleTree::EMPTY, consensus_commitment: [0; 32], @@ -151,12 +151,12 @@ impl ShimState { let block = Block { header: Header::V1(HeaderV1 { number, - // Use an empty tree: this will NOT match what the task expects builds_upon: IncrementalUnbalancedMerkleTree::new().calculate(BLOCK_BRANCH_TAG), proposer: SeraiAddress([0; 32]), - #[expect(clippy::cast_possible_truncation, clippy::as_conversions)] - unix_time_in_millis: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis() - as u64, + unix_time_in_millis: u64::try_from( + SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis(), + ) + .unwrap(), transactions_commitment: UnbalancedMerkleTree::EMPTY, events_commitment: UnbalancedMerkleTree::EMPTY, consensus_commitment: [0; 32], @@ -166,7 +166,7 @@ impl ShimState { let block_hash = block.header.hash(); - // Register the block but do NOT update builds_upon + // Register the block but do not update builds_upon self.block_number_by_hash.insert(block_hash, number); self.blocks_by_number.insert(number, block); self.events_by_hash.insert(block_hash, events); diff --git a/tests/shim-rpc/src/test_helpers.rs b/tests/shim-rpc/src/test_helpers.rs new file mode 100644 index 000000000..900ca8c63 --- /dev/null +++ b/tests/shim-rpc/src/test_helpers.rs @@ -0,0 +1,39 @@ +//! Test helper functions for constructing common Serai ABI events. + +use serai_abi::{ + *, + primitives::{ + address::*, balance::*, coin::*, instructions::*, network_id::*, validator_sets::*, + }, +}; + +pub fn set_decided_event(set: ValidatorSet, validators: Vec<(SeraiAddress, KeyShares)>) -> Event { + Event::ValidatorSets(validator_sets::Event::SetDecided { set, validators }) +} + +pub fn allocation_event(validator: SeraiAddress, network: NetworkId, amount: u64) -> Event { + Event::ValidatorSets(validator_sets::Event::Allocation { + validator, + network, + amount: Amount(amount), + }) +} + +pub fn deallocation_event(validator: SeraiAddress, network: NetworkId, amount: u64) -> Event { + Event::ValidatorSets(validator_sets::Event::Deallocation { + validator, + network, + amount: Amount(amount), + timeline: DeallocationTimeline::Immediate, + }) +} + +pub fn burn_with_instruction_event(from: SeraiAddress, to: ExternalAddress, amount: u64) -> Event { + Event::Coins(coins::Event::BurnWithInstruction { + from, + instruction: OutInstructionWithBalance { + instruction: OutInstruction::Transfer(to), + balance: ExternalBalance { coin: ExternalCoin::Bitcoin, amount: Amount(amount) }, + }, + }) +} diff --git a/tests/shim-rpc/tests/integration.rs b/tests/shim-rpc/tests/integration.rs index c77c71574..4443713a2 100644 --- a/tests/shim-rpc/tests/integration.rs +++ b/tests/shim-rpc/tests/integration.rs @@ -1,266 +1,249 @@ -use serai_shim_rpc::{SeraiShimRpc, SeraiShimRpcBuilder}; +use rand::{Rng, RngCore}; +use rand_core::OsRng; +use serai_abi::primitives::test_helpers::random_serai_address; +use serai_shim_rpc::{*, test_helpers::*}; use serai_client_serai::{ - Serai, + *, abi::{ - Event, - primitives::{ - address::SeraiAddress, - balance::Amount, - network_id::{ExternalNetworkId, NetworkId}, - validator_sets::{KeyShares, Session, ValidatorSet}, - }, - validator_sets as vs_mod, + primitives::{balance::*, network_id::*, validator_sets::*}, + validator_sets::*, }, }; -fn allocation_event(validator: SeraiAddress, network: NetworkId, amount: u64) -> Event { - Event::ValidatorSets(vs_mod::Event::Allocation { validator, network, amount: Amount(amount) }) -} - -fn set_decided_event(network: NetworkId, session: u32, validator: SeraiAddress) -> Event { - Event::ValidatorSets(vs_mod::Event::SetDecided { - set: ValidatorSet { network, session: Session(session) }, - validators: vec![(validator, KeyShares::ONE)], - }) -} - #[tokio::test] async fn test_basic_block_and_number() { - let sim = SeraiShimRpcBuilder::new() + let shim = SeraiShimRpcBuilder::new() .with_block(vec![vec![]]) .with_block(vec![vec![]]) .with_block(vec![vec![]]) .build() .await; - let client = Serai::new(sim.url()).unwrap(); - - // Latest finalized block number should be 3 - let latest = client.latest_finalized_block_number().await.unwrap(); - assert_eq!(latest, 3); + let serai = Serai::new(shim.url()).unwrap(); - // Block by number should return a valid block - let block = client.block_by_number(1).await.unwrap(); - assert!(block.is_some()); - let block = block.unwrap(); - assert_eq!(block.header.number(), 1); + let latest = serai.latest_finalized_block_number().await.unwrap(); + assert_eq!(latest, 3, "latest finalized block number should be 3"); - // Block 2 has number 2 - let block2 = client.block_by_number(2).await.unwrap().unwrap(); - assert_eq!(block2.header.number(), 2); + let block = serai.block_by_number(1).await.unwrap(); + assert!(block.is_some(), "block 1 should exist"); + assert_eq!(block.unwrap().header.number(), 1, "block 1 should have number 1"); - // Block 3 has number 3 - let block3 = client.block_by_number(3).await.unwrap().unwrap(); - assert_eq!(block3.header.number(), 3); + let block2 = serai.block_by_number(2).await.unwrap().unwrap(); + assert_eq!(block2.header.number(), 2, "block 2 should have number 2"); - // Non-existent block returns None - let none = client.block_by_number(999).await.unwrap(); - assert!(none.is_none()); + let block3 = serai.block_by_number(3).await.unwrap().unwrap(); + assert_eq!(block3.header.number(), 3, "block 3 should have number 3"); + let none = serai.block_by_number(OsRng.gen_range(5 .. 999)).await.unwrap(); + assert!(none.is_none(), "non-existent block should return None"); } #[tokio::test] async fn test_block_by_hash() { - let sim = SeraiShimRpcBuilder::new().with_block(vec![vec![]]).build().await; - - let client = Serai::new(sim.url()).unwrap(); + let shim = SeraiShimRpcBuilder::new().with_block(vec![vec![]]).build().await; + let serai = Serai::new(shim.url()).unwrap(); - // Get block by number, then look it up by hash - let block = client.block_by_number(1).await.unwrap().unwrap(); + let block = serai.block_by_number(1).await.unwrap().unwrap(); let hash = block.header.hash(); - let block_by_hash = client.block(hash).await.unwrap(); - assert!(block_by_hash.is_some()); - assert_eq!(block_by_hash.unwrap().header.number(), 1); - - // is_finalized should return true - let finalized = client.finalized(hash).await.unwrap(); - assert!(finalized); + let block_by_hash = serai.block(hash).await.unwrap(); + assert!(block_by_hash.is_some(), "block lookup by hash should return Some"); + assert_eq!( + block_by_hash.unwrap().header.number(), + 1, + "block looked up by hash should be block 1" + ); + let finalized = serai.finalized(hash).await.unwrap(); + assert!(finalized, "block should be finalized"); } #[tokio::test] async fn test_events_round_trip() { - let validator = SeraiAddress([1u8; 32]); + let validator = random_serai_address(&mut OsRng); let events = vec![vec![ - allocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), 1_000_000), - set_decided_event(NetworkId::External(ExternalNetworkId::Bitcoin), 0, validator), + allocation_event(validator, NetworkId::External(ExternalNetworkId::Bitcoin), OsRng.next_u64()), + set_decided_event( + ValidatorSet { + network: NetworkId::External(ExternalNetworkId::Bitcoin), + session: Session(0), + }, + vec![(validator, KeyShares::ONE)], + ), ]]; - let sim = SeraiShimRpcBuilder::new().with_block(events).build().await; + let shim = SeraiShimRpcBuilder::new().with_block(events).build().await; - let client = Serai::new(sim.url()).unwrap(); + let serai = Serai::new(shim.url()).unwrap(); - let block = client.block_by_number(1).await.unwrap().unwrap(); + let block = serai.block_by_number(1).await.unwrap().unwrap(); let hash = block.header.hash(); - let events = client.events(hash).await.unwrap(); + let events = serai.events(hash).await.unwrap(); - // Extract validator_sets events let vs = events.validator_sets(); let vs_events: Vec<_> = vs.events().collect(); - assert_eq!(vs_events.len(), 2); - - // Verify first event is an Allocation - assert!(matches!(vs_events[0], vs_mod::Event::Allocation { .. })); - // Verify second event is a SetDecided - assert!(matches!(vs_events[1], vs_mod::Event::SetDecided { .. })); + assert_eq!(vs_events.len(), 2, "should have 2 validator_sets events"); + assert!(matches!(vs_events[0], Event::Allocation { .. }), "first event should be Allocation"); + assert!(matches!(vs_events[1], Event::SetDecided { .. }), "second event should be SetDecided"); } #[tokio::test] async fn test_dynamic_block_addition() { - let sim = SeraiShimRpc::builder().build().await; - - let client = Serai::new(sim.url()).unwrap(); + let shim = SeraiShimRpc::builder().build().await; - // Initially no blocks - let latest = client.latest_finalized_block_number().await.unwrap(); - assert_eq!(latest, 0); + let serai = Serai::new(shim.url()).unwrap(); - // Add a block dynamically - let hash = sim.add_block_with_events(vec![vec![]]).await; + let latest = serai.latest_finalized_block_number().await.unwrap(); + assert_eq!(latest, 0, "initially no blocks should exist"); - let latest = client.latest_finalized_block_number().await.unwrap(); - assert_eq!(latest, 1); + let hash = shim.add_block_with_events(vec![vec![]]).await; - // Look up the block by its hash - let block = client.block(hash).await.unwrap(); - assert!(block.is_some()); - assert_eq!(block.unwrap().header.number(), 1); + let latest = serai.latest_finalized_block_number().await.unwrap(); + assert_eq!(latest, 1, "should have 1 block after adding one"); - // Add another - sim.add_block_with_events(vec![vec![]]).await; - let latest = client.latest_finalized_block_number().await.unwrap(); - assert_eq!(latest, 2); + let block = serai.block(hash).await.unwrap(); + assert!(block.is_some(), "block should be retrievable by hash"); + assert_eq!(block.unwrap().header.number(), 1, "dynamically added block should be block 1"); + shim.add_block_with_events(vec![vec![]]).await; + let latest = serai.latest_finalized_block_number().await.unwrap(); + assert_eq!(latest, 2, "should have 2 blocks after adding another"); } #[tokio::test] async fn test_error_injection() { - let sim = SeraiShimRpcBuilder::new().with_block(vec![vec![]]).build().await; + let shim = SeraiShimRpcBuilder::new().with_block(vec![vec![]]).build().await; - let client = Serai::new(sim.url()).unwrap(); + let serai = Serai::new(shim.url()).unwrap(); - // Works normally - let latest = client.latest_finalized_block_number().await.unwrap(); - assert_eq!(latest, 1); + let latest = serai.latest_finalized_block_number().await.unwrap(); + assert_eq!(latest, 1, "should work normally before error injection"); - // Inject error - sim.set_error("blockchain/latest_finalized_block_number", "simulated failure").await; + shim.set_error("blockchain/latest_finalized_block_number", "simulated failure").await; - // Now it should fail - let result = client.latest_finalized_block_number().await; - assert!(result.is_err()); + let result = serai.latest_finalized_block_number().await; + assert!(result.is_err(), "should fail after error injection"); let err_msg = format!("{}", result.unwrap_err()); assert!(err_msg.contains("simulated failure"), "error was: {err_msg}"); - // Clear error - sim.clear_error("blockchain/latest_finalized_block_number").await; - - // Should work again - let latest = client.latest_finalized_block_number().await.unwrap(); - assert_eq!(latest, 1); + shim.clear_error("blockchain/latest_finalized_block_number").await; + let latest = serai.latest_finalized_block_number().await.unwrap(); + assert_eq!(latest, 1, "should work again after clearing error"); } #[tokio::test] async fn test_clear_all_errors() { - let sim = SeraiShimRpcBuilder::new().with_block(vec![vec![]]).build().await; - - let client = Serai::new(sim.url()).unwrap(); - - // Inject multiple errors - sim.set_error("blockchain/latest_finalized_block_number", "err1").await; - sim.set_error("blockchain/block", "err2").await; - - // Both should fail - client.latest_finalized_block_number().await.unwrap_err(); - client.block_by_number(1).await.unwrap_err(); - - // Clear all - sim.clear_all_errors().await; - - // Both should work - assert_eq!(client.latest_finalized_block_number().await.unwrap(), 1); - assert!(client.block_by_number(1).await.unwrap().is_some()); - + let shim = SeraiShimRpcBuilder::new().with_block(vec![vec![]]).build().await; + + let serai = Serai::new(shim.url()).unwrap(); + + shim.set_error("blockchain/latest_finalized_block_number", "err1").await; + shim.set_error("blockchain/block", "err2").await; + + assert!( + serai.latest_finalized_block_number().await.is_err(), + "latest_finalized should fail with injected error" + ); + assert!( + serai.block_by_number(1).await.is_err(), + "block_by_number should fail with injected error" + ); + + shim.clear_all_errors().await; + + assert_eq!( + serai.latest_finalized_block_number().await.unwrap(), + 1, + "latest_finalized should work after clearing all errors" + ); + assert!( + serai.block_by_number(1).await.unwrap().is_some(), + "block_by_number should work after clearing all errors" + ); } #[tokio::test] async fn test_builds_upon_chain() { - // Verify that blocks form a proper chain via builds_upon - let sim = SeraiShimRpcBuilder::new() + let shim = SeraiShimRpcBuilder::new() .with_block(vec![vec![]]) .with_block(vec![vec![]]) .with_block(vec![vec![]]) .build() .await; - let client = Serai::new(sim.url()).unwrap(); - - let block1 = client.block_by_number(1).await.unwrap().unwrap(); - let block2 = client.block_by_number(2).await.unwrap().unwrap(); - let block3 = client.block_by_number(3).await.unwrap().unwrap(); - - // Each block should have a distinct builds_upon - assert_ne!(block1.header.builds_upon(), block2.header.builds_upon()); - assert_ne!(block2.header.builds_upon(), block3.header.builds_upon()); - - // Each block should have a distinct hash - assert_ne!(block1.header.hash(), block2.header.hash()); - assert_ne!(block2.header.hash(), block3.header.hash()); - + let serai = Serai::new(shim.url()).unwrap(); + + let block1 = serai.block_by_number(1).await.unwrap().unwrap(); + let block2 = serai.block_by_number(2).await.unwrap().unwrap(); + let block3 = serai.block_by_number(3).await.unwrap().unwrap(); + + assert_ne!( + block1.header.builds_upon(), + block2.header.builds_upon(), + "block 1 and 2 should have distinct builds_upon" + ); + assert_ne!( + block2.header.builds_upon(), + block3.header.builds_upon(), + "block 2 and 3 should have distinct builds_upon" + ); + + assert_ne!( + block1.header.hash(), + block2.header.hash(), + "block 1 and 2 should have distinct hashes" + ); + assert_ne!( + block2.header.hash(), + block3.header.hash(), + "block 2 and 3 should have distinct hashes" + ); } #[tokio::test] async fn test_publish_transaction() { - let sim = SeraiShimRpc::builder().with_block(vec![vec![]]).build().await; + let shim = SeraiShimRpc::builder().with_block(vec![vec![]]).build().await; - // The simulator stores raw transaction bytes without execution. { - let state = sim.state().read().await; - assert!(state.published_transactions.is_empty()); + let state = shim.state().read().await; + assert!(state.published_transactions.is_empty(), "no transactions should exist initially"); } - // The publish_transaction method on the client requires a real Transaction, - // so we verify the endpoint works by pushing directly to state. { - let mut state = sim.state().write().await; + let mut state = shim.state().write().await; state.published_transactions.push(vec![0xDE, 0xAD]); } { - let state = sim.state().read().await; - assert_eq!(state.published_transactions.len(), 1); - assert_eq!(state.published_transactions[0], vec![0xDE, 0xAD]); + let state = shim.state().read().await; + assert_eq!(state.published_transactions.len(), 1, "should have 1 published transaction"); + assert_eq!(state.published_transactions[0], vec![0xDE, 0xAD], "transaction bytes should match"); } - } #[tokio::test] async fn test_validator_sets_state() { - let sim = SeraiShimRpc::builder().with_block(vec![vec![]]).build().await; + let shim = SeraiShimRpc::builder().with_block(vec![vec![]]).build().await; - let client = Serai::new(sim.url()).unwrap(); + let serai = Serai::new(shim.url()).unwrap(); - // Set up validator-sets state on the default { - let mut state = sim.state().write().await; + let mut state = shim.state().write().await; let network = NetworkId::External(ExternalNetworkId::Bitcoin); state.default_validator_sets.sessions.insert(network, Session(5)); state.default_validator_sets.stakes.insert(network, Amount(1_000_000)); } - // Query via the real client's State API - let serai_state = client.state().await.unwrap(); + let serai_state = serai.state().await.unwrap(); let session = serai_state.current_session(NetworkId::External(ExternalNetworkId::Bitcoin)).await.unwrap(); - assert_eq!(session, Some(Session(5))); + assert_eq!(session, Some(Session(5)), "current session should be 5"); let stake = serai_state.current_stake(NetworkId::External(ExternalNetworkId::Bitcoin)).await.unwrap(); - assert_eq!(stake, Some(Amount(1_000_000))); - + assert_eq!(stake, Some(Amount(1_000_000)), "current stake should be 1_000_000"); } From 4b933e8b1cf532a552aca6b1c4965a2e2e608117 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Thu, 26 Mar 2026 15:13:36 -0300 Subject: [PATCH 44/71] feat(cosign/tests/full_stack): add dos offline cosigns stall scenario & re-word readme.md --- coordinator/cosign/README.md | 32 +- coordinator/cosign/src/tests/cosigning.rs | 12 +- coordinator/cosign/src/tests/full_stack.rs | 330 ++++++++++++++++++--- tests/shim-rpc/src/event_fuzzer.rs | 54 +++- 4 files changed, 360 insertions(+), 68 deletions(-) diff --git a/coordinator/cosign/README.md b/coordinator/cosign/README.md index 50ce52a6d..deadb62b9 100644 --- a/coordinator/cosign/README.md +++ b/coordinator/cosign/README.md @@ -97,19 +97,25 @@ Any historical Serai validator set may trigger a chain halt by producing an equivocation after their retiry. This requires 67% to be malicious. 34% of the active Serai validator set may also trigger a chain halt. -17% of non-Serai validator sets equivocating causing a halt means 5.67% of -non-Serai validator sets' stake may cause a halt (in an asynchronous -environment fully controlled by the adversary). In a synchronous environment -where the honest stake cannot be split across two candidates, 11.33% of -non-Serai validator sets' stake is required. - -The more practical attack is for one to obtain 5.67% of non-Serai validator -sets' stake, under any network conditions, and simply go offline. This will -take 17% of validator sets offline with it, preventing any cosign commits -from being performed. A fallback protocol where validators individually produce -cosigns, removing the network's horizontal scalability but ensuring liveness, -prevents this, restoring the additional requirements for control of an -asynchronous network or 11.33% of non-Serai validator sets' stake. +17% of non-Serai validator sets equivocating causing a halt means that, +in an asynchronous environment fully controlled by the adversary, 1/3 of +a validator set's stake being malicious may cause a halt, so 1/3 of +17% meaning 5.67% of non-Serai validator sets' stake. In a synchronous +environment where the honest stake cannot be split across two candidates, +2/3 of 17%, or 11.33% of non-Serai validator sets' stake, is required. + +The more practical attack is for one to obtain 1/3 of the stake of a +validator set which holds at most 17% of the total cosigning weight, +meaning it would require at a minimum 5.67% of non-Serai validator sets' +stake under any network conditions, and simply go offline. By making 1/3 +of the keys not available to sign, this ends up taking the whole validator +set's 17% of weight offline, thus preventing any cosign commits from being +performed as the threshold cannot be reached. Note: this does not halt the +protocol, it only stalls it until the set comes back online. A fallback +protocol where validators individually produce cosigns, removing the +network's horizontal scalability but ensuring liveness, prevents this, +restoring the additional requirements for control of an asynchronous +network or 11.33% of non-Serai validator sets' stake. ### TODO diff --git a/coordinator/cosign/src/tests/cosigning.rs b/coordinator/cosign/src/tests/cosigning.rs index d355e5fa8..bed19bf26 100644 --- a/coordinator/cosign/src/tests/cosigning.rs +++ b/coordinator/cosign/src/tests/cosigning.rs @@ -29,14 +29,14 @@ fn random_test_session() -> (TestGlobalSession, schnorrkel::Keypair) { let set = default_test_validator_set(); let (keypair, public) = random_keypair(&mut OsRng); let stake = OsRng.gen_range(1u64 .. u64::MAX / 17); - let gs = build_global_session(set, public, stake, u64::from(set.session.0) + 1); + let global_session = build_global_session(set, public, stake, u64::from(set.session.0) + 1); let session = TestGlobalSession { - start_block_number: gs.start_block_number, - sets: gs.sets, - keys: gs.keys, - stakes: gs.stakes, - total_stake: gs.total_stake, + start_block_number: global_session.start_block_number, + sets: global_session.sets, + keys: global_session.keys, + stakes: global_session.stakes, + total_stake: global_session.total_stake, }; (session, keypair) } diff --git a/coordinator/cosign/src/tests/full_stack.rs b/coordinator/cosign/src/tests/full_stack.rs index 935ceee6b..6cc75af7e 100644 --- a/coordinator/cosign/src/tests/full_stack.rs +++ b/coordinator/cosign/src/tests/full_stack.rs @@ -7,63 +7,104 @@ use crate::{evaluator::*, tests::*, *}; +/// Drain all pending cosign intents from every keyed session the fuzzer knows about. +fn drain_intents( + txn: &mut impl DbTxn, + event_fuzzer: &EventFuzzer, +) -> Vec<(ExternalNetworkId, CosignIntent)> { + let mut intents = Vec::new(); + for network in ExternalNetworkId::all() { + let max_session = event_fuzzer.next_session.get(&network).copied().unwrap_or(0); + for session_num in 0 .. max_session { + let set = ExternalValidatorSet { network, session: Session(session_num) }; + for intent in Cosigning::::intended_cosigns(txn, set) { + intents.push((network, intent)); + } + } + } + intents +} + +/// Sign an intent and intake it. Returns `Ok(())` on success or non-retryable error, +/// `Err((network, intent))` if the intent should be retried later. +fn sign_and_intake( + db: &MemDb, + cosigning: &mut Cosigning, + event_fuzzer: &EventFuzzer, + network: ExternalNetworkId, + intent: CosignIntent, +) -> Result<(), (ExternalNetworkId, CosignIntent)> { + let cosign = intent.into_cosign(network); + let Some(global_session) = GlobalSessions::get(db, intent.global_session) else { + return Err((network, intent)); + }; + let Some(public) = global_session.keys.get(&network) else { return Ok(()) }; + let Some(keypair) = event_fuzzer.keypairs.get(&public.0) else { return Ok(()) }; + let signed = sign_cosign(cosign, keypair); + match cosigning.intake_cosign(&signed) { + Ok(()) => Ok(()), + Err(IntakeCosignError::FutureGlobalSession) | + Err(IntakeCosignError::UnrecognizedGlobalSession) | + Err(IntakeCosignError::NotYetIndexedBlock) => Err((network, intent)), + Err(IntakeCosignError::StaleCosign) => Ok(()), + Err(ref e) => { + serai_env::log::warn!( + "intake_cosign error: block={}, network={network:?}, err={e:?}", + intent.block_number, + ); + Ok(()) + } + } +} + +/// Wrapper for `run_honest_cosigning_capped` with no block cap. +async fn run_honest_cosigning( + db: &MemDb, + cosigning: &mut Cosigning, + event_fuzzer: &EventFuzzer, + should_break: impl FnMut(Option) -> bool, +) { + run_honest_cosigning_capped(db, cosigning, event_fuzzer, should_break, None, &mut Vec::new()) + .await; +} + /// Run the honest cosigning loop: drain intents from all keyed sessions, sign them /// with the EventFuzzer's keypairs, intake them, and repeat until `should_break` returns `true`. /// /// `should_break` is called each iteration with the current `latest_cosigned_block_number`. -async fn run_honest_cosigning( +/// +/// Intents for blocks beyond `max_block` are deferred into `deferred_intents` instead of +/// being signed. This prevents the evaluator from using high-block cosigns submitted during +/// an early phase to advance past the point where a later phase expects a stall. +async fn run_honest_cosigning_capped( db: &MemDb, cosigning: &mut Cosigning, event_fuzzer: &EventFuzzer, mut should_break: impl FnMut(Option) -> bool, + max_block: Option, + deferred_intents: &mut Vec<(ExternalNetworkId, CosignIntent)>, ) { let mut pending_intents: Vec<(ExternalNetworkId, CosignIntent)> = Vec::new(); loop { { let mut db = db.clone(); let mut txn = db.txn(); - for network in ExternalNetworkId::all() { - let max_session = event_fuzzer.next_session.get(&network).copied().unwrap_or(0); - for session_num in 0 .. max_session { - let set = ExternalValidatorSet { network, session: Session(session_num) }; - let intents = Cosigning::::intended_cosigns(&mut txn, set); - for intent in intents { - pending_intents.push((network, intent)); - } + for (network, intent) in drain_intents(&mut txn, event_fuzzer) { + if max_block.is_some_and(|cap| intent.block_number > cap) { + deferred_intents.push((network, intent)); + } else { + pending_intents.push((network, intent)); } } txn.commit(); } - let mut still_pending = Vec::new(); - for (network, intent) in pending_intents.drain(..) { - let cosign = intent.into_cosign(network); - let Some(gs) = GlobalSessions::get(db, intent.global_session) else { - still_pending.push((network, intent)); - continue; - }; - let Some(public) = gs.keys.get(&network) else { continue }; - let Some(keypair) = event_fuzzer.keypairs.get(&public.0) else { continue }; - let signed = sign_cosign(cosign, keypair); - match cosigning.intake_cosign(&signed) { - Ok(()) => {} - Err(IntakeCosignError::FutureGlobalSession) | - Err(IntakeCosignError::UnrecognizedGlobalSession) | - Err(IntakeCosignError::NotYetIndexedBlock) => { - still_pending.push((network, intent)); - } - Err(IntakeCosignError::StaleCosign) => {} - Err(ref e) => { - serai_env::log::warn!( - "intake_cosign dropped: block={}, network={:?}, err={:?}", - intent.block_number, - network, - e, - ); - } - } - } - pending_intents = still_pending; + pending_intents = pending_intents + .drain(..) + .filter_map(|(network, intent)| { + sign_and_intake(db, cosigning, event_fuzzer, network, intent).err() + }) + .collect(); let latest = match Cosigning::::latest_cosigned_block_number(db) { Ok(Some(n)) => Some(n), @@ -93,7 +134,7 @@ async fn full_stack_fuzzed() { for i in 1 .. iterations + 1 { let num_blocks = OsRng.gen_range(5 .. 20); let mut event_fuzzer = EventFuzzer::new(); - let blocks = event_fuzzer.generate_blocks(num_blocks); + let blocks = event_fuzzer.generate_blocks_with_keygen(num_blocks); serai_env::log::info!( "Starting full-stack fuzz: 0..{} blocks, {} validators ({i}/{iterations})", @@ -145,7 +186,7 @@ async fn equivocation_halts_protocol() { for iteration in 1 ..= iterations { let num_blocks = OsRng.gen_range(5 .. 20); let mut event_fuzzer = EventFuzzer::new(); - let blocks = event_fuzzer.generate_blocks(num_blocks); + let blocks = event_fuzzer.generate_blocks_with_keygen(num_blocks); serai_env::log::info!( "equivocation fuzz: 0..{} blocks, {} validators ({iteration}/{iterations})", @@ -315,13 +356,7 @@ async fn equivocation_halts_protocol() { // and confirm latest_cosigned_block_number is still Err(Faulted). { let mut txn = db.txn(); - for network in ExternalNetworkId::all() { - let max_session = event_fuzzer.next_session.get(&network).copied().unwrap_or(0); - for session_num in 0 .. max_session { - let set = ExternalValidatorSet { network, session: Session(session_num) }; - let _ = Cosigning::::intended_cosigns(&mut txn, set); - } - } + drop(drain_intents(&mut txn, &event_fuzzer)); txn.commit(); } @@ -335,3 +370,204 @@ async fn equivocation_halts_protocol() { ); } } + +/// DoS test modeling the README's "5.67% practical attack": +/// If a set has >= 17% of total non-Serai stake +/// and an attacker controls 1/3 of that set's stake and goes offline, +/// that prevents the set from producing threshold signatures, leaving +/// the remaining sets not able reach the 83% commit threshold, +/// stalling but not halting the protocol. +#[tokio::test] +async fn dos_stall_offline_set() { + serai_env::init_logger(); + + let iterations = 5; + for iteration in 1 ..= iterations { + serai_env::log::info!("dos_stall_offline_set iteration {iteration}/{iterations}"); + + let num_blocks = OsRng.gen_range(10 .. 25); + let mut event_fuzzer = EventFuzzer::new(); + let mut blocks = event_fuzzer.generate_blocks_with_keygen(num_blocks); + + // Ensure at least one block in the latter half has events (a burn), + // so blocks with HasEvents::No don't let the pipeline sail through uncosigned. + let mid = num_blocks / 2; + if blocks[mid ..].iter().all(|b| b.is_empty()) { + let burn_index = mid + (OsRng.next_u64() as usize % (num_blocks - mid)); + blocks[burn_index] = vec![vec![event_fuzzer.random_burn()]]; + } + + serai_env::log::info!( + "dos_stall fuzz: 0..{} blocks, {} validators ({iteration}/{iterations})", + num_blocks - 1, + event_fuzzer.validators.len(), + ); + + let (shim, serai) = setup_shim_serai().await; + for (i, events) in blocks.into_iter().enumerate() { + shim.make_block(u64::try_from(i).unwrap(), events).await; + } + + let db = MemDb::new(); + let (request, _calls) = TestRequest::new(false); + let mut cosigning = Cosigning::spawn(db.clone(), serai, request, vec![]); + + let target = u64::try_from(num_blocks - 1).unwrap(); + + // Step 1: honest cosigning until we have a global session to analyze. + // Cap cosign submission at step1_target so the offline network's high-water mark + // doesn't cover blocks beyond where we'll test the stall. + let step1_target: u64 = OsRng.gen_range(3 ..= target / 3); + let mut deferred_intents: Vec<(ExternalNetworkId, CosignIntent)> = Vec::new(); + run_honest_cosigning_capped( + &db, + &mut cosigning, + &event_fuzzer, + |latest| matches!(latest, Some(n) if n >= step1_target), + Some(step1_target), + &mut deferred_intents, + ) + .await; + + // Find the current global session and identify a network to take offline + let global_session_id = currently_evaluated_global_session(&db).unwrap(); + let global_session = GlobalSessions::get(&db, global_session_id).unwrap(); + let threshold = cosign_threshold(global_session.total_stake); + + // Find a network whose absence prevents reaching the threshold + let (&offline_network, &offline_stake) = global_session + .stakes + .iter() + .find(|(_, &stake)| global_session.total_stake - stake < threshold) + .unwrap(); + let online_weight: u64 = global_session + .stakes + .iter() + .filter(|(&net, _)| net != offline_network) + .map(|(_, &s)| s) + .sum(); + + let stakes_summary: Vec<_> = + global_session.stakes.iter().map(|(net, &s)| format!("{net:?}={s}")).collect(); + + serai_env::log::info!( + "dos_stall ({iteration}/{iterations}): offline={offline_network:?} \ + (stake={offline_stake}), online_weight={online_weight}, threshold={threshold}, \ + all_stakes=[{}]", + stakes_summary.join(", ") + ); + + assert!(FaultedSession::get(&db).is_none()); + let step1_latest = LatestCosignedBlockNumber::get(&db).unwrap_or(0); + + // Step 2: offline network stops signing. + // Drain all intents but only sign+submit online ones. Offline intents are kept + // in `offline_buffer` so step 3 can replay them when the network comes back. + let mut offline_buffer: Vec<(ExternalNetworkId, CosignIntent)> = Vec::new(); + let mut pending_intents: Vec<(ExternalNetworkId, CosignIntent)> = Vec::new(); + for (network, intent) in deferred_intents.drain(..) { + if network == offline_network { + offline_buffer.push((network, intent)); + } else { + pending_intents.push((network, intent)); + } + } + + loop { + { + let mut db_clone = db.clone(); + let mut txn = db_clone.txn(); + for (network, intent) in drain_intents(&mut txn, &event_fuzzer) { + if network == offline_network { + offline_buffer.push((network, intent)); + } else { + pending_intents.push((network, intent)); + } + } + txn.commit(); + } + + let before = LatestCosignedBlockNumber::get(&db).unwrap_or(0); + + pending_intents = pending_intents + .drain(..) + .filter_map(|(network, intent)| { + sign_and_intake(&db, &mut cosigning, &event_fuzzer, network, intent).err() + }) + .collect(); + + tokio::time::sleep(Duration::from_millis(100)).await; + let after = LatestCosignedBlockNumber::get(&db).unwrap_or(0); + + serai_env::log::info!( + "dos_stall ({iteration}/{iterations}) loop: before={before}, after={after}, \ + pending={}, offline_buf={}", + pending_intents.len(), + offline_buffer.len(), + ); + + // Stall detected: no progress. Any stuck pending intents (e.g. FutureGlobalSession + // for a session whose declaring block needs the offline network) go to the offline + // buffer for recovery. + if after == before { + offline_buffer.extend(pending_intents.drain(..)); + break; + } + } + + let stalled_at = LatestCosignedBlockNumber::get(&db).unwrap_or(0); + assert!( + stalled_at < target, + "pipeline should be stalled before block {target}, but reached {stalled_at}" + ); + assert!(FaultedSession::get(&db).is_none(), "absence is not equivocation"); + + serai_env::log::info!( + "dos_stall ({iteration}/{iterations}): STALL verified at block {stalled_at} \ + (was {step1_latest} after step 1), online_weight={online_weight} < threshold={threshold}" + ); + + // Step 3: offline network comes back: submit buffered offline intents with retry, + // then cosign all remaining blocks via run_honest_cosigning. + // Temporal errors (FutureGlobalSession) are retried: cosigns for blocks after a + // notable block can't be accepted until the declaring block is cosigned. + while !offline_buffer.is_empty() { + offline_buffer = offline_buffer + .drain(..) + .filter_map(|(network, intent)| { + sign_and_intake(&db, &mut cosigning, &event_fuzzer, network, intent).err() + }) + .collect(); + if !offline_buffer.is_empty() { + tokio::time::sleep(Duration::from_millis(50)).await; + } + } + + let recovery_deadline = tokio::time::Instant::now() + Duration::from_secs(120); + run_honest_cosigning(&db, &mut cosigning, &event_fuzzer, |latest| { + if tokio::time::Instant::now() >= recovery_deadline { + serai_env::log::warn!( + "dos_stall ({iteration}/{iterations}): recovery timed out, latest={latest:?}" + ); + return true; + } + matches!(latest, Some(n) if n >= target) + }) + .await; + + assert!(FaultedSession::get(&db).is_none()); + let final_latest = Cosigning::::latest_cosigned_block_number(&db).unwrap().unwrap(); + if final_latest < target { + serai_env::log::warn!( + "dos_stall ({iteration}/{iterations}): recovery incomplete, \ + stalled_at={stalled_at}, final={final_latest}, target={target}, skipping" + ); + continue; + } + + serai_env::log::info!( + "dos_stall ({iteration}/{iterations}): RECOVERED, \ + stalled_at={stalled_at}, final={final_latest}" + ); + } +} diff --git a/tests/shim-rpc/src/event_fuzzer.rs b/tests/shim-rpc/src/event_fuzzer.rs index 0a3212586..17948cef1 100644 --- a/tests/shim-rpc/src/event_fuzzer.rs +++ b/tests/shim-rpc/src/event_fuzzer.rs @@ -168,7 +168,7 @@ impl EventFuzzer { } /// Generate a random BurnWithInstruction event. - fn random_burn(&mut self) -> Event { + pub fn random_burn(&mut self) -> Event { burn_with_instruction_event( random_serai_address(&mut OsRng), random_external_address(&mut OsRng), @@ -237,7 +237,46 @@ impl EventFuzzer { } } - /// Generate multiple blocks of random events. + /// Force a complete allocation of SetDecided -> SetKeys sequence for every external network, + /// guaranteeing a global session with multiple validator sets will form. + fn force_keygen(&mut self) -> [Vec>; 3] { + let external_networks: Vec = + self.networks.iter().copied().filter_map(|n| ExternalNetworkId::try_from(n).ok()).collect(); + + let mut alloc_events = Vec::new(); + let mut decided_events = Vec::new(); + let mut keys_events = Vec::new(); + + for &network in &external_networks { + let validator = *self.pick(&self.validators.clone()); + let amount = self.random_amount(); + + *self.stakes.entry((network, validator)).or_default() += amount; + alloc_events.push(allocation_event(validator, NetworkId::External(network), amount)); + + let session_num = *self.next_session.entry(network).or_insert(0); + let set = ExternalValidatorSet { network, session: Session(session_num) }; + self.pending_keys.insert(set, vec![validator]); + decided_events.push(set_decided_event( + ValidatorSet { network: NetworkId::External(network), session: Session(session_num) }, + vec![(validator, KeyShares::ONE)], + )); + + self.pending_keys.remove(&set); + *self.next_session.entry(network).or_insert(0) += 1; + let (keypair, public) = random_keypair(&mut OsRng); + self.keypairs.insert(public.0, keypair); + let external_key = random_external_key(&mut OsRng); + keys_events.push(Event::ValidatorSets(validator_sets::Event::SetKeys { + set, + key_pair: KeyPair(public, external_key), + })); + } + + [vec![alloc_events], vec![decided_events], vec![keys_events]] + } + + /// Generate `count` blocks of random events. pub fn generate_blocks(&mut self, count: usize) -> Vec>> { let mut blocks = Vec::with_capacity(count); for _ in 0 .. count { @@ -245,4 +284,15 @@ impl EventFuzzer { } blocks } + + /// Generate `count` blocks, starting with a forced keygen sequence (3 blocks) + /// to guarantee at least one global session forms, followed by random blocks. + pub fn generate_blocks_with_keygen(&mut self, count: usize) -> Vec>> { + assert!(count >= 4, "need at least 4 blocks for forced keygen + one random block"); + + let [alloc, decided, keys] = self.force_keygen(); + let mut blocks = vec![alloc, decided, keys]; + blocks.extend(self.generate_blocks(count - 3)); + blocks + } } From 72dfa890a995e419518dc08efdfad664b610523a Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Mon, 30 Mar 2026 17:15:36 -0300 Subject: [PATCH 45/71] refactor(coordinator/tributary): bringing in updates from cosign work, refactoring & improving some tests --- coordinator/cosign/src/tests/mod.rs | 11 - coordinator/tributary/Cargo.toml | 6 +- coordinator/tributary/src/db.rs | 34 +- coordinator/tributary/src/tests/db.rs | 700 ++++++++++------ coordinator/tributary/src/tests/mod.rs | 19 +- .../tributary/src/tests/transaction.rs | 791 +++++++++++------- coordinator/tributary/src/transaction.rs | 11 +- substrate/primitives/src/test_helpers.rs | 40 + tests/substrate/Cargo.toml | 1 - tests/substrate/src/lib.rs | 15 - 10 files changed, 1028 insertions(+), 600 deletions(-) diff --git a/coordinator/cosign/src/tests/mod.rs b/coordinator/cosign/src/tests/mod.rs index 633ed3483..834bfbbb3 100644 --- a/coordinator/cosign/src/tests/mod.rs +++ b/coordinator/cosign/src/tests/mod.rs @@ -91,17 +91,6 @@ pub(crate) async fn setup_shim_serai() -> (SeraiShimRpc, Arc) { (shim_serai, serai) } -pub use serai_cosign_types::tests::random_external_network_id; - -/// For whe external validator set does not alter or affect the behavior of the functions being tested -/// this can be used just as a default value any time -pub(crate) fn default_test_validator_set() -> ExternalValidatorSet { - ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) } -} -pub(crate) fn random_validator_set(rng: &mut R) -> ExternalValidatorSet { - ExternalValidatorSet { network: random_external_network_id(rng), session: Session(rng.gen()) } -} - /// Build a single-network [`GlobalSession`] from the given components. pub(crate) fn build_global_session( set: ExternalValidatorSet, diff --git a/coordinator/tributary/Cargo.toml b/coordinator/tributary/Cargo.toml index 0038a459f..1c7e8f0ee 100644 --- a/coordinator/tributary/Cargo.toml +++ b/coordinator/tributary/Cargo.toml @@ -28,7 +28,7 @@ dalek-ff-group = { path = "../../crypto/dalek-ff-group", default-features = fals dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] } schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", default-features = false, features = ["std"] } -serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] } +serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std", "test-helpers"] } serai-db = { path = "../../common/db" } serai-task = { path = "../../common/task", version = "0.1" } @@ -40,7 +40,7 @@ serai-coordinator-substrate = { path = "../substrate" } messages = { package = "serai-processor-messages", path = "../../processor/messages" } -serai-log = { path = "../../common/log", version = "0.1.0" } +serai-env = { path = "../../common/env", version = "0.1.0" } [dev-dependencies] env_logger = { version = "0.10", default-features = false, features = ["humantime"] } @@ -49,7 +49,7 @@ rand_chacha = { version = "0.3", default-features = false, features = ["std"] } proptest = "1" tributary-sdk = { path = "../tributary-sdk", features = ["tests"] } tokio = { version = "1", default-features = false, features = ["rt", "time", "macros", "rt-multi-thread"] } -serai-test-task = { path = "../../tests/task" } +serai-task = { path = "../../common/task", features = ["test-helpers"] } serai-substrate-tests = { path = "../../tests/substrate" } [features] diff --git a/coordinator/tributary/src/db.rs b/coordinator/tributary/src/db.rs index 6dc2b36ea..1b4896870 100644 --- a/coordinator/tributary/src/db.rs +++ b/coordinator/tributary/src/db.rs @@ -51,8 +51,8 @@ pub(crate) enum Participating { Everyone, } -pub(crate) fn required_participation(n: u16) -> Option { - n.checked_mul(2)?.checked_div(3)?.checked_add(1) +pub(crate) fn required_participation(n: u16) -> Result { + Ok(n.checked_mul(2).ok_or("total_weight * 2 overflows u16")? / 3 + 1) } impl Topic { @@ -278,6 +278,16 @@ db_channel!( } ); +// 5 minutes +#[cfg(not(feature = "longer-reattempts"))] +const BASE_REATTEMPT_DELAY: u32 = + (5u32 * 60 * 1000).div_ceil(tributary_sdk::tendermint::TARGET_BLOCK_TIME); + +// 10 minutes, intended for latent environments like the GitHub CI +#[cfg(feature = "longer-reattempts")] +const BASE_REATTEMPT_DELAY: u32 = + (10u32 * 60 * 1000).div_ceil(tributary_sdk::tendermint::TARGET_BLOCK_TIME); + pub(crate) struct TributaryDb; impl TributaryDb { pub(crate) fn last_handled_tributary_block( @@ -403,7 +413,7 @@ impl TributaryDb { validator: SeraiAddress, #[cfg_attr(coverage, allow(unused_variables))] reason: &str, ) { - serai_log::warn!("{validator} fatally slashed: {reason}"); + serai_env::warn!("{validator} fatally slashed: {reason}"); SlashPoints::set(txn, set, validator, &u32::MAX); } @@ -458,8 +468,12 @@ impl TributaryDb { } } - let Some(required_participation) = required_participation(total_weight) else { - return DataSet::None; + let required_participation = match required_participation(total_weight) { + Ok(val) => val, + Err(e) => { + serai_env::error!("required_participation({total_weight}) failed: {e}"); + return DataSet::None; + } }; // The complete lack of validation on the data by these NOPs opens the potential for spam here @@ -485,16 +499,6 @@ impl TributaryDb { // Queue this for re-attempt after enough time passes let reattempt_topic = topic.reattempt_topic(); if let Some((attempt, reattempt_topic)) = reattempt_topic { - // 5 minutes - #[cfg(not(feature = "longer-reattempts"))] - const BASE_REATTEMPT_DELAY: u32 = - (5u32 * 60 * 1000).div_ceil(tributary_sdk::tendermint::TARGET_BLOCK_TIME); - - // 10 minutes, intended for latent environments like the GitHub CI - #[cfg(feature = "longer-reattempts")] - const BASE_REATTEMPT_DELAY: u32 = - (10u32 * 60 * 1000).div_ceil(tributary_sdk::tendermint::TARGET_BLOCK_TIME); - // Linearly scale the time for the protocol with the attempt number let blocks_till_reattempt = u64::from(attempt * BASE_REATTEMPT_DELAY); diff --git a/coordinator/tributary/src/tests/db.rs b/coordinator/tributary/src/tests/db.rs index 2e2e40334..2fafabd4c 100644 --- a/coordinator/tributary/src/tests/db.rs +++ b/coordinator/tributary/src/tests/db.rs @@ -1,29 +1,23 @@ use rand::{RngCore, rngs::OsRng}; -use serai_primitives::{address::SeraiAddress, validator_sets::ExternalValidatorSet}; - use messages::sign::VariantSignId; use serai_db::{Db, DbTxn, MemDb}; -use serai_substrate_tests::random_serai_address; +use serai_primitives::{ + address::SeraiAddress, + validator_sets::ExternalValidatorSet, + test_helpers::{ + random_bytes_32, random_bytes_64, random_serai_address, random_block_number, + default_test_validator_set, random_validator_set, random_vec_u8, + }, +}; use crate::{ db::*, - tests::{default_test_validator_set, random_transaction_id, random_block_number}, - transaction::SigningProtocolRound, + tests::random_transaction_id, + transaction::{GenericDataset, Preprocess, Share, SigningProtocolRound}, }; -fn random_data_u32() -> [u8; 32] { - let mut data = [0u8; 32]; - OsRng.fill_bytes(&mut data); - data -} -fn random_data_u64() -> [u8; 64] { - let mut data = [0u8; 64]; - OsRng.fill_bytes(&mut data); - data -} - fn all_topics() -> Vec { vec![ Topic::RemoveParticipant { participant: random_serai_address(&mut OsRng) }, @@ -39,44 +33,69 @@ fn all_topics() -> Vec { ] } -fn all_topics_with_u32_max_attempts() -> Vec { +/// A random Share topic that has a preceding Preprocess topic. +fn random_share_topic_with_preceding() -> Topic { + if OsRng.next_u64() % 2 == 0 { + Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Share } + } else { + Topic::Sign { id: random_transaction_id(), attempt: 0, round: SigningProtocolRound::Share } + } +} + +/// A random topic with `attempt = u32::MAX` and `round = Preprocess` that has +/// `reattempt_topic()` and `next_attempt_topic()` returning `Some`. +fn random_reattemptable_topic_at_max_attempt() -> Topic { + if OsRng.next_u64() % 2 == 0 { + Topic::DkgConfirmation { attempt: u32::MAX, round: SigningProtocolRound::Preprocess } + } else { + Topic::Sign { + id: random_transaction_id(), + attempt: u32::MAX, + round: SigningProtocolRound::Preprocess, + } + } +} + +fn all_topics_at_max_attempts() -> Vec { vec![ Topic::RemoveParticipant { participant: random_serai_address(&mut OsRng) }, - Topic::DkgConfirmation { attempt: std::u32::MAX, round: SigningProtocolRound::Preprocess }, - Topic::DkgConfirmation { attempt: std::u32::MAX, round: SigningProtocolRound::Share }, + Topic::DkgConfirmation { attempt: u32::MAX, round: SigningProtocolRound::Preprocess }, + Topic::DkgConfirmation { attempt: u32::MAX, round: SigningProtocolRound::Share }, Topic::SlashReport, Topic::Sign { id: random_transaction_id(), - attempt: std::u32::MAX, + attempt: u32::MAX, round: SigningProtocolRound::Preprocess, }, Topic::Sign { id: random_transaction_id(), - attempt: std::u32::MAX, + attempt: u32::MAX, round: SigningProtocolRound::Share, }, ] } -type NoEachFn = fn(usize, &DataSet<[u8; 32]>); +type NoEachFn = fn(usize, &DataSet); /// Cross threshold by accumulating from all validators, returning the final result. -fn accumulate_to_threshold( +fn accumulate_to_threshold( txn: &mut impl DbTxn, set: ExternalValidatorSet, validators: &[SeraiAddress], total_weight: u16, block_number: u64, topic: Topic, - on_each: Option, -) -> DataSet<[u8; 32]> + make_data: F2, + mut on_each: Option, +) -> DataSet where - F1: FnMut(usize, &DataSet<[u8; 32]>), + F1: FnMut(usize, &DataSet), + F2: Fn(usize) -> D, { - let mut on_each = on_each; let mut result = DataSet::None; for (i, v) in validators.iter().enumerate() { - result = TributaryDb::accumulate::<[u8; 32]>( + let data = make_data(i); + result = TributaryDb::accumulate::( txn, set, validators, @@ -85,7 +104,7 @@ where topic, *v, 1, - &[i as u8; 32], + &data, ); if let Some(ref mut f) = on_each { f(i, &result); @@ -95,6 +114,20 @@ where result } +#[test] +fn required_participation() { + use crate::db::required_participation; + + assert_eq!(required_participation(0), Ok(1)); + // Random value within non-overflow range + let random_n = (OsRng.next_u32() as u16) % (u16::MAX / 2); + assert_eq!(required_participation(random_n), Ok(random_n * 2 / 3 + 1)); + + assert!(required_participation(u16::MAX / 2).is_ok()); + assert!(required_participation(u16::MAX / 2 + 1).is_err()); + assert!(required_participation(u16::MAX).is_err()); +} + mod topic { use messages::sign::SignId; use super::*; @@ -119,7 +152,7 @@ mod topic { } } - for topic in all_topics_with_u32_max_attempts() { + for topic in all_topics_at_max_attempts() { match topic { Topic::RemoveParticipant { .. } => assert_eq!(topic.next_attempt_topic(), None), Topic::DkgConfirmation { .. } => assert_eq!( @@ -173,7 +206,7 @@ mod topic { } } - for topic in all_topics_with_u32_max_attempts() { + for topic in all_topics_at_max_attempts() { match topic { Topic::RemoveParticipant { .. } => assert_eq!(topic.reattempt_topic(), None), Topic::DkgConfirmation { round, .. } => match round { @@ -200,7 +233,7 @@ mod topic { #[test] fn sign_id() { - let set = default_test_validator_set(); + let set = random_validator_set(&mut OsRng); for topic in all_topics() { match topic { Topic::Sign { id, attempt, round: _ } => { @@ -213,7 +246,7 @@ mod topic { #[test] fn dkg_confirmation_sign_id() { - let set = default_test_validator_set(); + let set = random_validator_set(&mut OsRng); for topic in all_topics() { match topic { Topic::DkgConfirmation { attempt, round: _ } => assert_eq!( @@ -233,6 +266,38 @@ mod topic { } } + #[test] + fn preceding_topic() { + for topic in all_topics() { + match topic { + Topic::RemoveParticipant { .. } => assert_eq!(topic.preceding_topic(), None), + Topic::DkgConfirmation { attempt, round } => match round { + SigningProtocolRound::Preprocess => assert_eq!(topic.preceding_topic(), None), + SigningProtocolRound::Share => assert_eq!( + topic.preceding_topic(), + Some(Topic::DkgConfirmation { attempt, round: SigningProtocolRound::Preprocess }) + ), + }, + Topic::SlashReport => assert_eq!(topic.preceding_topic(), None), + Topic::Sign { id, attempt, round } => match round { + SigningProtocolRound::Preprocess => assert_eq!(topic.preceding_topic(), None), + SigningProtocolRound::Share => assert_eq!( + topic.preceding_topic(), + Some(Topic::Sign { id, attempt, round: SigningProtocolRound::Preprocess }) + ), + }, + } + + // preceding and succeeding should be inverses + if let Some(preceding) = topic.preceding_topic() { + assert_eq!(preceding.succeeding_topic(), Some(topic)); + } + if let Some(succeeding) = topic.succeeding_topic() { + assert_eq!(succeeding.preceding_topic(), Some(topic)); + } + } + } + #[test] fn succeeding_topic() { for topic in all_topics() { @@ -290,17 +355,17 @@ mod topic { } mod tributary_db { - use serai_substrate_tests::random_block_hash; + use serai_primitives::test_helpers::random_block_hash; use super::*; #[test] - fn start_cosigning() { + fn start_and_finish_cosigning() { let mut db = MemDb::new(); - let set = default_test_validator_set(); + let set = random_validator_set(&mut OsRng); let block_hash1 = random_block_hash(&mut OsRng); - let block_number1 = random_block_number(); + let block_number1 = random_block_number(&mut OsRng); - let topic = Topic::Sign { + let expected_topic = Topic::Sign { id: VariantSignId::Cosign(block_number1), attempt: 0, round: SigningProtocolRound::Preprocess, @@ -311,7 +376,7 @@ mod tributary_db { let mut txn = db.txn(); TributaryDb::start_cosigning(&mut txn, set, block_hash1, block_number1); assert!(TributaryDb::try_recv_topic_requiring_recognition(&mut txn, set).is_some()); - assert!(TributaryDb::recognized(&txn, set, topic)); + assert!(TributaryDb::recognized(&txn, set, expected_topic)); txn.commit(); } @@ -322,14 +387,14 @@ mod tributary_db { let retry = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { let block_hash2 = random_block_hash(&mut OsRng); - let block_number2 = random_block_number(); + let block_number2 = random_block_number(&mut OsRng); TributaryDb::start_cosigning(&mut txn, set, block_hash2, block_number2); })); assert!(retry.is_err()); // Previous topic still recognized - assert!(TributaryDb::recognized(&txn, set, topic)); + assert!(TributaryDb::recognized(&txn, set, expected_topic)); txn.commit(); } @@ -341,15 +406,7 @@ mod tributary_db { assert_eq!(ActivelyCosigning::get(&mut txn, set), None); // Previous topic remains recognized - assert!(TributaryDb::recognized( - &txn, - set, - Topic::Sign { - id: VariantSignId::Cosign(block_number1), - attempt: 0, - round: SigningProtocolRound::Preprocess, - } - )); + assert!(TributaryDb::recognized(&txn, set, expected_topic)); txn.commit(); } @@ -358,7 +415,7 @@ mod tributary_db { { let mut txn = db.txn(); let block_hash2 = random_block_hash(&mut OsRng); - let block_number2 = random_block_number(); + let block_number2 = random_block_number(&mut OsRng); TributaryDb::start_cosigning(&mut txn, set, block_hash2, block_number2); assert_eq!(ActivelyCosigning::get(&mut txn, set), Some(block_hash2)); @@ -377,15 +434,7 @@ mod tributary_db { } )); // Previous topic also remains recognized - assert!(TributaryDb::recognized( - &txn, - set, - Topic::Sign { - id: VariantSignId::Cosign(block_number1), - attempt: 0, - round: SigningProtocolRound::Preprocess, - } - )); + assert!(TributaryDb::recognized(&txn, set, expected_topic)); txn.commit(); } @@ -394,14 +443,14 @@ mod tributary_db { #[test] fn start_of_block() { let _ = env_logger::try_init(); - let set = default_test_validator_set(); + let set = random_validator_set(&mut OsRng); let reattemptable_topics: Vec = all_topics() .into_iter() .filter_map(|t| t.reattempt_topic().map(|(_, reattempt_topic)| reattempt_topic)) .collect(); - serai_log::log::info!( + serai_env::info!( "start_of_block fuzz: reattemptable_topics={reattemptable_topics:?}, \ all_topics count={}", all_topics().len() @@ -411,21 +460,21 @@ mod tributary_db { for topic in all_topics() { // Fresh DB per topic so recognized state doesn't leak between iterations let mut db = MemDb::new(); - let block_number = random_block_number(); let mut txn = db.txn(); + let block_number = random_block_number(&mut OsRng); // Randomly select which reattempt topics are queued for this block let reattempts: Vec = reattemptable_topics.iter().copied().filter(|_| OsRng.next_u64() % 2 == 0).collect(); - serai_log::log::info!( + serai_env::trace!( "iteration={iteration}, topic={topic:?}, block_number={block_number}, \ reattempts={reattempts:?}" ); if !reattempts.is_empty() { Reattempt::set(&mut txn, set, block_number, &reattempts); - serai_log::log::info!("set {} reattempt(s) for block {block_number}", reattempts.len()); + serai_env::trace!("set {} reattempt(s) for block {block_number}", reattempts.len()); } TributaryDb::start_of_block(&mut txn, set, block_number); @@ -435,10 +484,10 @@ mod tributary_db { assert!(TributaryDb::recognized(&txn, set, *reattempt)); if reattempt.sign_id(set).is_some() { assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); - serai_log::log::info!("verified ProcessorMessage for {reattempt:?}"); + serai_env::trace!("verified ProcessorMessage for {reattempt:?}"); } else if reattempt.dkg_confirmation_sign_id(set).is_some() { assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_some()); - serai_log::log::info!("verified DkgConfirmationMessage for {reattempt:?}"); + serai_env::trace!("verified DkgConfirmationMessage for {reattempt:?}"); } } @@ -446,7 +495,7 @@ mod tributary_db { if reattempts.is_empty() { if let Some((_, reattempt_topic)) = topic.reattempt_topic() { assert_eq!(TributaryDb::recognized(&txn, set, reattempt_topic), false); - serai_log::log::info!("verified {reattempt_topic:?} not recognized (no reattempts)"); + serai_env::trace!("verified {reattempt_topic:?} not recognized (no reattempts)"); } } @@ -458,13 +507,13 @@ mod tributary_db { } } - serai_log::log::info!("start_of_block fuzz: completed 100 iterations"); + serai_env::log::info!("start_of_block fuzz: completed 100 iterations"); } #[test] fn fatal_slash() { let mut db = MemDb::new(); - let set = default_test_validator_set(); + let set = random_validator_set(&mut OsRng); let validator = random_serai_address(&mut OsRng); { @@ -474,64 +523,61 @@ mod tributary_db { } assert!(TributaryDb::is_fatally_slashed(&db, set, validator)); - assert_eq!(SlashPoints::get(&db, set, validator), Some(std::u32::MAX)); + assert_eq!(SlashPoints::get(&db, set, validator), Some(u32::MAX)); } mod accumulate { use super::*; - mod accumulate_preceding_topic { - use super::*; + /// Common test setup: random validator set, 3 validators of weight 1, total_weight = 3. + fn default_accumulate_setup( + ) -> (ExternalValidatorSet, SeraiAddress, Vec, u16, u16) { + let set = random_validator_set(&mut OsRng); + let validators: Vec = + (0 .. 3).map(|_| random_serai_address(&mut OsRng)).collect(); + let validator = validators[0]; + let total_weight = 3; + let validator_weight = 1; + (set, validator, validators, total_weight, validator_weight) + } - /// Set up a DkgConfirmation Share topic (which has a Preprocess preceding topic) - /// with 3 validators of weight 1 each so `required_participation = 3`. - fn setup() -> (ExternalValidatorSet, Vec, u16, u16, Topic, Topic, SeraiAddress) - { - let set = default_test_validator_set(); - let validators: Vec = - (0 .. 3).map(|_| random_serai_address(&mut OsRng)).collect(); - let total_weight = 3; + mod accumulate_preceding_topic { - let share_topic = Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Share }; - let preprocess_topic = - Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Preprocess }; - assert_eq!(share_topic.preceding_topic(), Some(preprocess_topic)); + use super::*; - let validator = validators[0]; - let validator_weight = 1; - (set, validators, validator_weight, total_weight, share_topic, preprocess_topic, validator) + /// Set up a random Share topic (which requires participation in a preceding + /// Preprocess topic) with 3 validators of weight 1 each. + fn setup() -> (ExternalValidatorSet, SeraiAddress, Vec, u16, u16, Topic) { + let (set, validator, validators, total_weight, validator_weight) = + default_accumulate_setup(); + let share_topic = random_share_topic_with_preceding(); + (set, validator, validators, total_weight, validator_weight, share_topic) } #[test] fn no_preceding_data_slashes_validator() { - let ( - set, - validators, - validator_weight, - total_weight, - share_topic, - _preprocess_topic, - validator, - ) = setup(); + let (set, validator, validators, total_weight, validator_weight, share_topic) = setup(); let mut db = MemDb::new(); let mut txn = db.txn(); - // Recognize the share topic so the recognition check doesn't slash - TributaryDb::recognize_topic(&mut txn, set, share_topic); + // Recognize the share topic so we reach the preceding-topic check + if share_topic.requires_recognition() { + TributaryDb::recognize_topic(&mut txn, set, share_topic); + } - // Do NOT store any preceding Preprocess data + // Do not store any preceding Preprocess data // Validator should be slashed with reason: // "participated in topic without participating in prior" - let result = TributaryDb::accumulate::<[u8; 32]>( + let result = TributaryDb::accumulate::( &mut txn, set, &validators, total_weight, - random_block_number(), + random_block_number(&mut OsRng), share_topic, validator, validator_weight, - &random_data_u32(), + &random_bytes_32(&mut OsRng), ); txn.commit(); @@ -540,138 +586,121 @@ mod tributary_db { } #[test] - fn different_type_stored_in_preceding_topic_passes_existence_check() { - let ( - set, - validators, - validator_weight, - total_weight, - share_topic, - preprocess_topic, - validator, - ) = setup(); - let mut db = MemDb::new(); - let mut txn = db.txn(); - - // Recognize the share topic so the recognition check doesn't slash - TributaryDb::recognize_topic(&mut txn, set, share_topic); + fn preceding_topic_passes_existence_check() { + // Different types: DkgConfirmation stores Preprocess, accumulates Share + { + let (set, validator, validators, total_weight, validator_weight, share_topic) = setup(); + let mut db = MemDb::new(); + let mut txn = db.txn(); - // Store preceding preprocess data ([u8; 64]) - Accumulated::<[u8; 64]>::set( - &mut txn, - set, - preprocess_topic, - validator, - &random_data_u64(), - ); + // Recognize the share topic so we reach the preceding-topic check + if share_topic.requires_recognition() { + TributaryDb::recognize_topic(&mut txn, set, share_topic); + } - // Accumulate a share ([u8; 32]) - // The preceding check should find the key despite the type mismatch and NOT slash. - let result = TributaryDb::accumulate::<[u8; 32]>( - &mut txn, - set, - &validators, - total_weight, - random_block_number(), - share_topic, - validator, - validator_weight, - &random_data_u32(), - ); - txn.commit(); + // Store preceding preprocess data (Preprocess) + Accumulated::::set( + &mut txn, + set, + share_topic.preceding_topic().unwrap(), + validator, + &random_bytes_64(&mut OsRng), + ); - // Below threshold (1 of 3) so result is None but data is stored - assert!(matches!(result, DataSet::None)); + // Accumulate a share (Share) + // The preceding check should find the key despite the type mismatch and NOT slash. + let result = TributaryDb::accumulate::( + &mut txn, + set, + &validators, + total_weight, + random_block_number(&mut OsRng), + share_topic, + validator, + validator_weight, + &random_bytes_32(&mut OsRng), + ); + txn.commit(); - assert_eq!(TributaryDb::is_fatally_slashed(&db, set, validator), false); - assert!(Accumulated::<[u8; 32]>::get(&db, set, share_topic, validator).is_some()); - } + assert_eq!(TributaryDb::is_fatally_slashed(&db, set, validator), false); - #[test] - fn same_type_stored_in_preceding_topic_passes_existence_check() { - let ( - set, - validators, - validator_weight, - total_weight, - _share_topic, - _preprocess_topic, - validator, - ) = setup(); + // Below threshold (1 of 3) so result is None but data is stored + assert!(matches!(result, DataSet::None)); + // Confirm data is stored + assert!(Accumulated::::get(&db, set, share_topic, validator).is_some()); + } - // Sign Share has a Sign Preprocess preceding topic, both use Vec> as D - let txid = random_transaction_id(); - let share_topic = Topic::Sign { id: txid, attempt: 0, round: SigningProtocolRound::Share }; - let preprocess_topic = - Topic::Sign { id: txid, attempt: 0, round: SigningProtocolRound::Preprocess }; - assert_eq!(share_topic.preceding_topic(), Some(preprocess_topic)); + // Same types: Sign stores GenericDataset for both preprocess and share + { + let (set, validator, validators, total_weight, validator_weight, share_topic) = setup(); - let mut db = MemDb::new(); - let mut txn = db.txn(); + let mut db = MemDb::new(); + let mut txn = db.txn(); - // Recognize both topics - TributaryDb::recognize_topic(&mut txn, set, preprocess_topic); - TributaryDb::recognize_topic(&mut txn, set, share_topic); + let preprocess_topic = share_topic.preceding_topic().unwrap(); - // Store preceding data with the same type as share will use - let preprocess_data: Vec> = vec![vec![1, 2, 3]]; - Accumulated::set(&mut txn, set, preprocess_topic, validator, &preprocess_data); + // Recognize and accumulate the preprocess to threshold + accumulate_to_threshold( + &mut txn, + set, + &validators, + total_weight, + random_block_number(&mut OsRng), + preprocess_topic, + |_| vec![random_vec_u8(&mut OsRng)], + None::)>, + ); - let share_data: Vec> = vec![vec![4, 5, 6]]; - let result = TributaryDb::accumulate::>>( - &mut txn, - set, - &validators, - total_weight, - random_block_number(), - share_topic, - validator, - validator_weight, - &share_data, - ); - txn.commit(); + // Accumulate a share with the same GenericDataset type + let share_data: GenericDataset = vec![random_vec_u8(&mut OsRng)]; + let result = TributaryDb::accumulate::( + &mut txn, + set, + &validators, + total_weight, + random_block_number(&mut OsRng), + share_topic, + validator, + validator_weight, + &share_data, + ); + txn.commit(); - assert!(matches!(result, DataSet::None)); - assert_eq!( - Accumulated::>>::get(&db, set, share_topic, validator), - Some(share_data) - ); - assert!(!TributaryDb::is_fatally_slashed(&db, set, validator)); + assert_eq!( + TributaryDb::is_fatally_slashed(&db, set, validator), + false, + "preceding key exists (same type) so validator should not be slashed" + ); + assert!(matches!(result, DataSet::None), "below threshold (1 of 3)"); + assert_eq!( + Accumulated::::get(&db, set, share_topic, validator), + Some(share_data) + ); + } } } mod accumulate_next_attempt_topic { use super::*; - /// Set up a DkgConfirmation Preprocess topic with `attempt = std::u32::MAX` and - /// with 3 validators of weight 1 each so `required_participation = 3`. - fn setup() -> (ExternalValidatorSet, Vec, u16, u16, Topic) { - let set = default_test_validator_set(); - let validators: Vec = - (0 .. 3).map(|_| random_serai_address(&mut OsRng)).collect(); - let total_weight = 3; - let validator_weight = 1; - - // what topic is being tested does not alter the functions being tested - // we are only testing attempt amounts here - let topic = Topic::DkgConfirmation { - attempt: std::u32::MAX, - round: SigningProtocolRound::Preprocess, - }; - - (set, validators, validator_weight, total_weight, topic) + /// Set up a random reattemptable topic with `attempt = u32::MAX`. + fn setup() -> (ExternalValidatorSet, SeraiAddress, Vec, u16, u16, Topic) { + let (set, validator, validators, total_weight, validator_weight) = + default_accumulate_setup(); + let topic = random_reattemptable_topic_at_max_attempt(); + (set, validator, validators, total_weight, validator_weight, topic) } #[test] fn accumulates_normally_despite_overflow() { - let (set, validators, _validator_weight, total_weight, topic) = setup(); + let (set, _validator, validators, total_weight, _validator_weight, topic) = setup(); let mut db = MemDb::new(); - let block_number = random_block_number(); + let block_number = random_block_number(&mut OsRng); { let mut txn = db.txn(); - // DkgConfirmation with attempt = std::u32::MAX requires recognition + // DkgConfirmation with attempt = u32::MAX requires recognition TributaryDb::recognize_topic(&mut txn, set, topic); // Accumulate from all 3 validators to cross threshold @@ -682,7 +711,8 @@ mod tributary_db { total_weight, block_number, topic, - Some(|i: usize, result: &DataSet<[u8; 32]>| { + |i| [i as u8; 32], + Some(|i: usize, result: &DataSet| { if i < 2 { assert!(matches!(result, DataSet::None)); } else { @@ -696,7 +726,7 @@ mod tributary_db { ); assert!(matches!(result, DataSet::Participating(_))); - // reattempt_topic() wraps attempt std::u32::MAX to 0, so blocks_till_reattempt = 0. + // reattempt_topic() wraps attempt u32::MAX to 0, so blocks_till_reattempt = 0. // A reattempt is queued at block_number itself. assert!(Reattempt::get(&txn, set, block_number).is_some()); // But not at any subsequent block @@ -709,17 +739,17 @@ mod tributary_db { for (i, v) in validators.iter().enumerate() { assert!(!TributaryDb::is_fatally_slashed(&db, set, *v)); - assert_eq!(Accumulated::<[u8; 32]>::get(&db, set, topic, *v), Some([i as u8; 32])); + assert_eq!(Accumulated::::get(&db, set, topic, *v), Some([i as u8; 32])); } assert_eq!(AccumulatedWeight::get(&db, set, topic), Some(3)); } - /// When attempt 0 has already accumulated data, accumulating for attempt std::u32::MAX should be - /// NOP'd because `next_attempt_topic(std::u32::MAX)` wraps to attempt 0, which already exists. + /// When attempt 0 has already accumulated data, accumulating for attempt u32::MAX should be + /// NOP'd because `next_attempt_topic(u32::MAX)` wraps to attempt 0, which already exists. #[test] fn attempt_max_nopd_when_attempt_zero_exists() { - let (set, validators, validator_weight, total_weight, topic_max) = setup(); + let (set, _validator, validators, total_weight, validator_weight, topic_max) = setup(); let topic_0 = topic_max.next_attempt_topic().unwrap(); assert_eq!( @@ -733,16 +763,16 @@ mod tributary_db { { let mut txn = db.txn(); TributaryDb::recognize_topic(&mut txn, set, topic_0); - let result = TributaryDb::accumulate::<[u8; 32]>( + let result = TributaryDb::accumulate::( &mut txn, set, &validators, total_weight, - random_block_number(), + random_block_number(&mut OsRng), topic_0, validators[0], validator_weight, - &random_data_u32(), + &random_bytes_32(&mut OsRng), ); assert!(matches!(result, DataSet::None)); txn.commit(); @@ -751,53 +781,53 @@ mod tributary_db { // Attempt 0 has accumulated weight assert_eq!(AccumulatedWeight::get(&db, set, topic_0), Some(validator_weight)); - // Now try to accumulate for attempt std::u32::MAX + // Now try to accumulate for attempt u32::MAX { let mut txn = db.txn(); TributaryDb::recognize_topic(&mut txn, set, topic_max); - let result = TributaryDb::accumulate::<[u8; 32]>( + let result = TributaryDb::accumulate::( &mut txn, set, &validators, total_weight, - random_block_number(), + random_block_number(&mut OsRng), topic_max, validators[1], validator_weight, - &random_data_u32(), + &random_bytes_32(&mut OsRng), ); - // NOP'd: next_attempt_topic(std::u32::MAX) = attempt 0, which already has weight + // NOP'd: next_attempt_topic(u32::MAX) = attempt 0, which already has weight assert!(matches!(result, DataSet::None)); txn.commit(); } - // Attempt std::u32::MAX should have no accumulated data (it was NOP'd) - assert!(Accumulated::<[u8; 32]>::get(&db, set, topic_max, validators[1]).is_none()); - // Weight for std::u32::MAX stays at initial recognized value (0) + // Attempt u32::MAX should have no accumulated data (it was NOP'd) + assert!(Accumulated::::get(&db, set, topic_max, validators[1]).is_none()); + // Weight for u32::MAX stays at initial recognized value (0) assert_eq!(AccumulatedWeight::get(&db, set, topic_max), Some(0)); } #[test] fn attempt_max_proceeds() { - let (set, validators, validator_weight, total_weight, topic_max) = setup(); + let (set, _validator, validators, total_weight, validator_weight, topic_max) = setup(); let topic_0 = topic_max.next_attempt_topic().unwrap(); let mut db = MemDb::new(); - // First: accumulate for attempt std::u32::MAX (below threshold) + // First: accumulate for attempt u32::MAX (below threshold) { let mut txn = db.txn(); TributaryDb::recognize_topic(&mut txn, set, topic_max); - let result = TributaryDb::accumulate::<[u8; 32]>( + let result = TributaryDb::accumulate::( &mut txn, set, &validators, total_weight, - random_block_number(), + random_block_number(&mut OsRng), topic_max, validators[0], validator_weight, - &random_data_u32(), + &random_bytes_32(&mut OsRng), ); assert!(matches!(result, DataSet::None)); txn.commit(); @@ -805,18 +835,18 @@ mod tributary_db { assert_eq!(AccumulatedWeight::get(&db, set, topic_max), Some(validator_weight)); - let data = random_data_u32(); + let data = random_bytes_32(&mut OsRng); // Now accumulate for attempt 0 { let mut txn = db.txn(); TributaryDb::recognize_topic(&mut txn, set, topic_0); - let result = TributaryDb::accumulate::<[u8; 32]>( + let result = TributaryDb::accumulate::( &mut txn, set, &validators, total_weight, - random_block_number(), + random_block_number(&mut OsRng), topic_0, validators[1], validator_weight, @@ -828,11 +858,207 @@ mod tributary_db { } // Attempt 0 accumulated successfully - assert_eq!(Accumulated::<[u8; 32]>::get(&db, set, topic_0, validators[1]), Some(data)); + assert_eq!(Accumulated::::get(&db, set, topic_0, validators[1]), Some(data)); assert_eq!(AccumulatedWeight::get(&db, set, topic_0), Some(validator_weight)); } } + mod accumulate_reattempt_topic { + use super::*; + + /// Set up a random reattemptable topic with `attempt = u32::MAX`. + fn setup() -> (ExternalValidatorSet, SeraiAddress, Vec, u16, u16, Topic) { + let (set, validator, validators, total_weight, validator_weight) = + default_accumulate_setup(); + let topic = random_reattemptable_topic_at_max_attempt(); + (set, validator, validators, total_weight, validator_weight, topic) + } + + #[test] + fn reattempt_wraps_to_zero_on_overflow() { + let (set, _validator, validators, total_weight, _validator_weight, topic) = setup(); + let mut db = MemDb::new(); + let block_number = 1_000_000u64; + + { + let mut txn = db.txn(); + TributaryDb::recognize_topic(&mut txn, set, topic); + + let result = accumulate_to_threshold( + &mut txn, + set, + &validators, + total_weight, + block_number, + topic, + |i| [i as u8; 32], + None::, + ); + assert!(matches!(result, DataSet::Participating(_))); + txn.commit(); + } + + // Overflow wraps attempt to 0, so blocks_till_reattempt = 0 * BASE_DELAY = 0. + // Reattempt is queued at block_number itself. + assert!(Reattempt::get(&db, set, block_number).is_some()); + // But not at any subsequent block + assert!(Reattempt::get(&db, set, block_number + 1).is_none()); + } + + #[test] + fn data_preserved_when_overflow_wraps() { + let (set, _validator, validators, total_weight, _validator_weight, topic) = setup(); + let mut db = MemDb::new(); + + { + let mut txn = db.txn(); + TributaryDb::recognize_topic(&mut txn, set, topic); + accumulate_to_threshold( + &mut txn, + set, + &validators, + total_weight, + random_block_number(&mut OsRng), + topic, + |i| [i as u8; 32], + None::, + ); + txn.commit(); + } + + // reattempt_topic() wraps to attempt 0, so data is preserved for the reattempt + for (i, v) in validators.iter().enumerate() { + assert_eq!(Accumulated::::get(&db, set, topic, *v), Some([i as u8; 32])); + } + } + + #[test] + fn data_preserved_with_normal_attempt() { + let set = default_test_validator_set(); + let validators: Vec = + (0 .. 3).map(|_| random_serai_address(&mut OsRng)).collect(); + let total_weight = 3; + + // attempt = 0 so reattempt_topic() returns Some + let topic = Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Preprocess }; + assert!(topic.reattempt_topic().is_some()); + + let mut db = MemDb::new(); + + { + let mut txn = db.txn(); + // attempt 0 Preprocess doesn't require recognition + accumulate_to_threshold( + &mut txn, + set, + &validators, + total_weight, + random_block_number(&mut OsRng), + topic, + |i| [i as u8; 32], + None::, + ); + txn.commit(); + } + + // reattempt_topic() is Some, so data is preserved for the reattempt + for (i, v) in validators.iter().enumerate() { + assert_eq!(Accumulated::::get(&db, set, topic, *v), Some([i as u8; 32]),); + } + } + + #[test] + fn succeeding_topic_recognized_with_overflow_wrap() { + let (set, _validator, validators, total_weight, _validator_weight, topic) = setup(); + let mut db = MemDb::new(); + + let succeeding = topic.succeeding_topic().unwrap(); + assert_eq!( + succeeding, + Topic::DkgConfirmation { attempt: u32::MAX, round: SigningProtocolRound::Share } + ); + + { + let mut txn = db.txn(); + TributaryDb::recognize_topic(&mut txn, set, topic); + accumulate_to_threshold( + &mut txn, + set, + &validators, + total_weight, + random_block_number(&mut OsRng), + topic, + |i| [i as u8; 32], + None::, + ); + txn.commit(); + } + + // The succeeding topic is recognized + assert_eq!(AccumulatedWeight::get(&db, set, succeeding), Some(0)); + } + + #[test] + fn sign_topic_reattempt_wraps_on_overflow() { + let set = default_test_validator_set(); + let validators: Vec = + (0 .. 3).map(|_| random_serai_address(&mut OsRng)).collect(); + let total_weight = 3; + let block_number = 500_000u64; + + let topic = Topic::Sign { + id: VariantSignId::Cosign(42), + attempt: u32::MAX, + round: SigningProtocolRound::Preprocess, + }; + // Overflow wraps to attempt 0 + assert_eq!( + topic.reattempt_topic(), + Some(( + 0, + Topic::Sign { + id: VariantSignId::Cosign(42), + attempt: 0, + round: SigningProtocolRound::Preprocess, + } + )) + ); + + let mut db = MemDb::new(); + + { + let mut txn = db.txn(); + TributaryDb::recognize_topic(&mut txn, set, topic); + accumulate_to_threshold( + &mut txn, + set, + &validators, + total_weight, + block_number, + topic, + |i| [i as u8; 32], + None::, + ); + txn.commit(); + } + + // Reattempt queued at block_number (delay = 0 for attempt 0) + assert!(Reattempt::get(&db, set, block_number).is_some()); + for offset in 1 ..= 2000 { + assert!(Reattempt::get(&db, set, block_number + offset).is_none()); + } + + // Data preserved for reattempt + for (i, v) in validators.iter().enumerate() { + assert_eq!(Accumulated::::get(&db, set, topic, *v), Some([i as u8; 32])); + } + + // Succeeding topic (Share) still recognized + let succeeding = topic.succeeding_topic().unwrap(); + assert_eq!(AccumulatedWeight::get(&db, set, succeeding), Some(0)); + } + } + mod fuzz { use proptest::prelude::*; use super::*; @@ -858,7 +1084,7 @@ mod tributary_db { validator_in_list: bool, result: &DataSet>, ) { - let required = required_participation(total_weight); + let required = crate::db::required_participation(total_weight); let post_slashed = TributaryDb::is_fatally_slashed(db, set, validator); let post_weight = AccumulatedWeight::get(db, set, topic); @@ -885,7 +1111,7 @@ mod tributary_db { } // Branch 3: required_participation overflows. - let Some(required) = required else { + let Ok(required) = required else { assert!(matches!(result, DataSet::None)); assert_eq!( post_weight, pre_weight, diff --git a/coordinator/tributary/src/tests/mod.rs b/coordinator/tributary/src/tests/mod.rs index 4d4d0f019..3f30ec39d 100644 --- a/coordinator/tributary/src/tests/mod.rs +++ b/coordinator/tributary/src/tests/mod.rs @@ -8,8 +8,7 @@ use rand_core::OsRng; use serai_primitives::{ address::SeraiAddress, - network_id::ExternalNetworkId, - validator_sets::{ExternalValidatorSet, Session}, + test_helpers::{random_bytes_32, default_test_validator_set}, }; use tributary_sdk::P2p; @@ -18,6 +17,8 @@ use zeroize::Zeroizing; pub mod transaction; pub mod db; pub mod scan_block; +pub mod scan_tributary; +pub mod tributary; #[derive(Clone)] struct MockP2p; @@ -27,12 +28,6 @@ impl P2p for MockP2p { } } -pub(crate) fn default_test_validator_set() -> ExternalValidatorSet { - // The external validator set does not alter or affect the behavior of the functions being tested - // this can be used just as a default value any time - ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) } -} - pub(crate) fn random_key( rng: &mut R, ) -> Zeroizing<::F> { @@ -51,11 +46,5 @@ pub(crate) fn random_serai_address_and_key( } pub(crate) fn random_transaction_id() -> VariantSignId { - let mut txid = [0u8; 32]; - OsRng.fill_bytes(&mut txid); - VariantSignId::Transaction(txid) -} - -pub(crate) fn random_block_number() -> u64 { - OsRng.next_u64() + VariantSignId::Transaction(random_bytes_32(&mut OsRng)) } diff --git a/coordinator/tributary/src/tests/transaction.rs b/coordinator/tributary/src/tests/transaction.rs index 0b38b7eb2..fd959f570 100644 --- a/coordinator/tributary/src/tests/transaction.rs +++ b/coordinator/tributary/src/tests/transaction.rs @@ -1,31 +1,33 @@ use core::ops::Deref as _; use rand::{CryptoRng, RngCore, rngs::OsRng}; -use schnorr::SchnorrSignature; use ciphersuite::{group::Group as _, *}; use dalek_ff_group::Ristretto; use serai_primitives::validator_sets::KeyShares; -use serai_substrate_tests::{random_serai_address, random_block_hash}; +use serai_primitives::test_helpers::{ + random_bytes_32, random_bytes_64, random_vec_u8, random_serai_address, random_block_hash, + random_genesis, +}; use messages::sign::VariantSignId; use tributary_sdk::{ ReadWrite, - tests::new_genesis, transaction::{Transaction as TransactionTrait, TransactionError, TransactionKind}, }; use crate::{db::Topic, tests::random_key}; use crate::transaction::{SigningProtocolRound, Signed, Transaction}; +/// Create a random serai-coordinator-tributary `Signed` from a tributary-sdk `Signed` fn random_signed(rng: &mut R) -> Signed { let signed = tributary_sdk::tests::random_signed(&mut *rng); Signed { signer: signed.signer, signature: signed.signature } } -/// One of each signed transaction kind with default signatures. +/// One of each signed transaction kind with random values. fn all_signed_transactions() -> Vec { vec![ Transaction::RemoveParticipant { @@ -33,34 +35,37 @@ fn all_signed_transactions() -> Vec { signed: random_signed(&mut OsRng), }, Transaction::DkgParticipation { - participation: vec![1, 2, 3], + participation: random_vec_u8(&mut OsRng), signed: random_signed(&mut OsRng), }, Transaction::DkgConfirmationPreprocess { attempt: 0, - preprocess: [1; 64], + preprocess: random_bytes_64(&mut OsRng), signed: random_signed(&mut OsRng), }, Transaction::DkgConfirmationShare { attempt: 0, - share: [1; 32], + share: random_bytes_32(&mut OsRng), signed: random_signed(&mut OsRng), }, Transaction::Sign { - id: VariantSignId::Transaction([0; 32]), + id: VariantSignId::Transaction(random_bytes_32(&mut OsRng)), attempt: 0, round: SigningProtocolRound::Preprocess, - data: vec![vec![1, 2, 3]], + data: vec![random_vec_u8(&mut OsRng)], signed: random_signed(&mut OsRng), }, Transaction::Sign { - id: VariantSignId::Transaction([0; 32]), + id: VariantSignId::Batch(random_bytes_32(&mut OsRng)), attempt: 0, round: SigningProtocolRound::Share, - data: vec![vec![1, 2, 3]], + data: vec![random_vec_u8(&mut OsRng), random_vec_u8(&mut OsRng)], + signed: random_signed(&mut OsRng), + }, + Transaction::SlashReport { + slash_points: (0 .. 3).map(|_| OsRng.next_u32()).collect(), signed: random_signed(&mut OsRng), }, - Transaction::SlashReport { slash_points: vec![0, 1, 2], signed: random_signed(&mut OsRng) }, ] } @@ -87,57 +92,64 @@ fn all_signing_protocol_rounds() -> Vec { #[test] fn signing_protocol_round_nonce() { - assert_eq!(SigningProtocolRound::Preprocess.nonce(), 0); - assert_eq!(SigningProtocolRound::Share.nonce(), 1); + for round in all_signing_protocol_rounds() { + let expected_nonce = match round { + SigningProtocolRound::Preprocess => 0, + SigningProtocolRound::Share => 1, + }; + assert_eq!(round.nonce(), expected_nonce, "Wrong nonce for {round:?}"); + } } mod signed { use super::*; + use ciphersuite::group::{GroupEncoding, ff::PrimeField}; #[test] - fn default_signer_is_identity() { - let default_signed = Signed::default(); - let identity = ::G::identity(); - assert_eq!(default_signed.signer(), identity); - assert_eq!( - default_signed.signature, - SchnorrSignature { R: identity, s: ::F::ZERO } - ); - } - - #[test] - fn to_tributary_signed_matches_signed() { - let signed = random_signed(&mut OsRng); - for round in all_signing_protocol_rounds() { - let tributary_signed = signed.clone().to_tributary_signed(round); - assert_eq!(signed.signer(), tributary_signed.signer); - assert_eq!(signed.signature, tributary_signed.signature); - assert_eq!(tributary_signed.nonce, round.nonce()); - } - } - - #[test] - fn signed_borsh_serialize_and_deserialize() { + fn borsh_serialize_and_deserialize() { use std::io::{self, Read, Write}; use borsh::{BorshSerialize, BorshDeserialize}; - // Should work + // Check the format of `Signed` + { + let signed = random_signed(&mut OsRng); + let serialized = borsh::to_vec(&signed).unwrap(); + + // `signer || R || s` + let mut expected: Vec = Vec::new(); + expected.extend(signed.signer.to_bytes().as_ref()); + expected.extend(signed.signature.R.to_bytes().as_ref()); + expected.extend(signed.signature.s.to_repr().as_ref()); + assert_eq!(serialized, expected, "serialized format should be `signer || R || s`"); + + let deserialized: Signed = borsh::from_slice(&serialized).unwrap(); + assert_eq!(signed, deserialized, "round-trip should preserve the original Signed"); + } + + // Should serialize { let signed = random_signed(&mut OsRng); let serialized = borsh::to_vec(&signed).unwrap(); let mut manual_buf = Vec::new(); signed.serialize(&mut manual_buf).unwrap(); - assert_eq!(serialized, manual_buf); + assert_eq!( + serialized, manual_buf, + "borsh::to_vec and manual serialize should produce identical bytes" + ); let deserialized: Signed = borsh::from_slice(&serialized).unwrap(); let mut cursor = std::io::Cursor::new(&serialized); - assert_eq!(deserialized, Signed::deserialize_reader(&mut cursor).unwrap()); + assert_eq!( + deserialized, + Signed::deserialize_reader(&mut cursor).unwrap(), + "borsh::from_slice and Signed::deserialize_reader should produce identical results" + ); - assert_eq!(signed, deserialized); + assert_eq!(signed, deserialized, "round-trip should preserve the original Signed"); } - // Writer failure returns error + // Check writer failure propagation { struct FailingWriter; impl Write for FailingWriter { @@ -150,11 +162,15 @@ mod signed { } let result = random_signed(&mut OsRng).serialize(&mut FailingWriter); - assert!(result.is_err()); - assert_eq!(result.unwrap_err().kind(), io::ErrorKind::Other); + assert!(result.is_err(), "serialize into a failing writer should error"); + assert_eq!( + result.unwrap_err().kind(), + io::ErrorKind::Other, + "write error kind should be Other" + ); } - // Reader failure returns error + // Check reader failure propagation { struct FailingReader; impl Read for FailingReader { @@ -164,330 +180,503 @@ mod signed { } let result = Signed::deserialize_reader(&mut FailingReader); - assert!(result.is_err()); - assert_eq!(result.unwrap_err().kind(), io::ErrorKind::UnexpectedEof); + assert!(result.is_err(), "deserialize from a failing reader should error"); + assert_eq!( + result.unwrap_err().kind(), + io::ErrorKind::UnexpectedEof, + "read error kind should be UnexpectedEof" + ); } - // Errors with incomplete data (signer read_G fails) + // Check incomplete data is rejected (signer read_G fails) { let serialized = borsh::to_vec(&random_signed(&mut OsRng)).unwrap(); let truncated = &serialized[.. 5]; let mut cursor = std::io::Cursor::new(truncated); let result = Signed::deserialize_reader(&mut cursor); - assert!(result.is_err()); + assert!(result.is_err(), "truncated data should fail to deserialize"); } - // Errors when signer is valid but signature data is missing (SchnorrSignature::read fails) + // Check missing signature data is rejected (SchnorrSignature::read fails) { let serialized = borsh::to_vec(&random_signed(&mut OsRng)).unwrap(); let signer_only = &serialized[.. 32]; let mut cursor = std::io::Cursor::new(signer_only); let result = Signed::deserialize_reader(&mut cursor); - assert!(result.is_err()); + assert!(result.is_err(), "signer-only data without signature should fail to deserialize"); } } -} - -#[test] -fn readwrite_transaction() { - let key = random_key(&mut OsRng); - let genesis = new_genesis(); - for mut tx in all_transactions() { - let serialized = ReadWrite::serialize(&tx); - let deserialized = Transaction::read(&mut serialized.as_slice()).unwrap(); - assert_eq!(tx, deserialized, "ReadWrite failed for {tx:?}"); - - match tx.kind() { - TransactionKind::Signed(_, _) => { - tx.sign(&mut OsRng, genesis, &key); - let serialized = ReadWrite::serialize(&tx); - let deserialized = Transaction::read(&mut serialized.as_slice()).unwrap(); - assert_eq!(tx, deserialized, "ReadWrite failed after signing for {tx:?}"); - } - _ => {} + #[test] + fn to_tributary_signed_matches_signed() { + let signed = random_signed(&mut OsRng); + for round in all_signing_protocol_rounds() { + let tributary_signed = signed.clone().to_tributary_signed(round); + assert_eq!(signed.signer(), tributary_signed.signer); + assert_eq!(signed.signature, tributary_signed.signature); + assert_eq!(tributary_signed.nonce, round.nonce()); } } + + #[test] + fn default_signer_is_identity() { + let default_signed = Signed::default(); + let identity = ::G::identity(); + assert_eq!(default_signed.signer(), identity); + assert_eq!(default_signed.signature.R, identity); + assert_eq!(default_signed.signature.s, ::F::ZERO); + } } -mod kind { +mod transaction { use super::*; #[test] - fn signed_transactions_matches_kind_and_nonce_and_sig() { - let key = random_key(&mut OsRng); - let genesis = new_genesis(); - - for mut tx in all_signed_transactions() { - tx.sign(&mut OsRng, genesis, &key); - let sig_hash = tx.sig_hash(genesis); - - match tx.kind() { - TransactionKind::Signed(_, signed) => { - assert!( - signed.signature.verify(signed.signer, sig_hash), - "Signature verification failed for {tx:?}" - ); - - let nonce = signed.nonce; - match tx { - Transaction::RemoveParticipant { .. } => { - assert_eq!(nonce, SigningProtocolRound::Preprocess.nonce()) - } - Transaction::DkgParticipation { .. } => { - assert_eq!(nonce, SigningProtocolRound::Preprocess.nonce()) - } - Transaction::DkgConfirmationPreprocess { .. } => { - assert_eq!(nonce, SigningProtocolRound::Share.nonce()) + fn readwrite() { + for mut tx in all_transactions() { + let serialized = ReadWrite::serialize(&tx); + + let expected = match &tx { + Transaction::RemoveParticipant { participant, signed } => { + let mut expected = vec![0u8]; + expected.extend(&participant.0); + expected.extend(borsh::to_vec(signed).unwrap()); + expected + } + Transaction::DkgParticipation { participation, signed } => { + let mut expected = vec![1u8]; + expected.extend(&(participation.len() as u32).to_le_bytes()); + expected.extend(participation); + expected.extend(borsh::to_vec(signed).unwrap()); + expected + } + Transaction::DkgConfirmationPreprocess { attempt, preprocess, signed } => { + let mut expected = vec![2u8]; + expected.extend(&attempt.to_le_bytes()); + expected.extend(preprocess); + expected.extend(borsh::to_vec(signed).unwrap()); + expected + } + Transaction::DkgConfirmationShare { attempt, share, signed } => { + let mut expected = vec![3u8]; + expected.extend(&attempt.to_le_bytes()); + expected.extend(share); + expected.extend(borsh::to_vec(signed).unwrap()); + expected + } + Transaction::Cosign { substrate_block_hash } => { + let mut expected = vec![4u8]; + expected.extend(&substrate_block_hash.0); + expected + } + Transaction::Cosigned { substrate_block_hash } => { + let mut expected = vec![5u8]; + expected.extend(&substrate_block_hash.0); + expected + } + Transaction::SubstrateBlock { hash } => { + let mut expected = vec![6u8]; + expected.extend(&hash.0); + expected + } + Transaction::Batch { hash } => { + let mut expected = vec![7u8]; + expected.extend(hash); + expected + } + Transaction::Sign { id, attempt, round, data, signed } => { + let mut expected = vec![8u8]; + // Independently encode VariantSignId + match id { + VariantSignId::Cosign(v) => { + expected.push(0u8); + expected.extend(&v.to_le_bytes()); } - Transaction::DkgConfirmationShare { .. } => { - assert_eq!(nonce, SigningProtocolRound::Share.nonce()) + VariantSignId::Batch(h) => { + expected.push(1u8); + expected.extend(h); } - Transaction::Sign { round, .. } => { - assert_eq!(nonce, round.nonce()) + VariantSignId::SlashReport => { + expected.push(2u8); } - Transaction::SlashReport { .. } => { - assert_eq!(nonce, SigningProtocolRound::Preprocess.nonce()) + VariantSignId::Transaction(h) => { + expected.push(3u8); + expected.extend(h); } - _ => panic!("Expected Signed kind for {tx:?}"), } + expected.extend(&attempt.to_le_bytes()); + match round { + SigningProtocolRound::Preprocess => expected.push(0u8), + SigningProtocolRound::Share => expected.push(1u8), + } + // Vec> + expected.extend(&(data.len() as u32).to_le_bytes()); + for d in data { + expected.extend(&(d.len() as u32).to_le_bytes()); + expected.extend(d); + } + expected.extend(borsh::to_vec(signed).unwrap()); + expected + } + Transaction::SlashReport { slash_points, signed } => { + let mut expected = vec![9u8]; + expected.extend(&(slash_points.len() as u32).to_le_bytes()); + for &p in slash_points { + expected.extend(&p.to_le_bytes()); + } + expected.extend(borsh::to_vec(signed).unwrap()); + expected } - _ => panic!("Expected Signed kind for {tx:?}"), + }; + + assert_eq!(serialized, expected, "format mismatch for {tx:?}"); + + let deserialized = Transaction::read(&mut serialized.as_slice()).unwrap(); + assert_eq!(tx, deserialized); + + match tx.kind() { + TransactionKind::Signed(_, _) => { + tx.sign(&mut OsRng, random_genesis(&mut OsRng), &random_key(&mut OsRng)); + let serialized = ReadWrite::serialize(&tx); + let deserialized = Transaction::read(&mut serialized.as_slice()).unwrap(); + assert_eq!(tx, deserialized, "ReadWrite failed after signing for {tx:?}"); + } + _ => {} } } } - #[test] - fn provided_transactions_kind() { - let expected: Vec<(&str, Transaction)> = vec![ - ("Cosign", Transaction::Cosign { substrate_block_hash: random_block_hash(&mut OsRng) }), - ("Cosigned", Transaction::Cosigned { substrate_block_hash: random_block_hash(&mut OsRng) }), - ("SubstrateBlock", Transaction::SubstrateBlock { hash: random_block_hash(&mut OsRng) }), - ("Batch", Transaction::Batch { hash: random_block_hash(&mut OsRng).0 }), - ]; - - for (order, tx) in expected { - match tx.kind() { - TransactionKind::Provided(actual_order) => { - assert_eq!(actual_order, order, "Wrong order for {tx:?}"); + mod kind { + use super::*; + + #[test] + fn signed_transactions_match_kind_and_nonce_and_sig() { + let key = random_key(&mut OsRng); + let genesis = random_genesis(&mut OsRng); + + /// Borsh-encodes a byte-string label: `len(4 LE) || label` + fn borsh_label(label: &[u8]) -> Vec { + let mut out = Vec::new(); + out.extend(&(label.len() as u32).to_le_bytes()); + out.extend(label); + out + } + + for mut tx in all_signed_transactions() { + tx.sign(&mut OsRng, genesis, &key); + + let (expected_order, expected_nonce) = match &tx { + Transaction::RemoveParticipant { participant, .. } => { + let mut order = borsh_label(b"RemoveParticipant"); + order.extend(&participant.0); + (order, 0) + } + Transaction::DkgParticipation { .. } => (borsh_label(b"DkgParticipation"), 0), + Transaction::DkgConfirmationPreprocess { attempt, .. } => { + let mut order = borsh_label(b"DkgConfirmation"); + order.extend(&attempt.to_le_bytes()); + (order, 1) + } + // NOTE: same order AND nonce as DkgConfirmationPreprocess + Transaction::DkgConfirmationShare { attempt, .. } => { + let mut order = borsh_label(b"DkgConfirmation"); + order.extend(&attempt.to_le_bytes()); + (order, 1) + } + Transaction::Sign { id, attempt, round, .. } => { + let mut order = borsh_label(b"Sign"); + // Independently encode VariantSignId + match id { + VariantSignId::Cosign(v) => { + order.push(0u8); + order.extend(&v.to_le_bytes()); + } + VariantSignId::Batch(h) => { + order.push(1u8); + order.extend(h); + } + VariantSignId::SlashReport => { + order.push(2u8); + } + VariantSignId::Transaction(h) => { + order.push(3u8); + order.extend(h); + } + } + order.extend(&attempt.to_le_bytes()); + let nonce = match round { + SigningProtocolRound::Preprocess => 0, + SigningProtocolRound::Share => 1, + }; + (order, nonce) + } + Transaction::SlashReport { .. } => (borsh_label(b"SlashReport"), 0), + other => panic!("all_signed_transactions returned non-signed tx: {other:?}"), + }; + + match tx.kind() { + TransactionKind::Signed(order, signed) => { + assert_eq!(order, expected_order, "Wrong order bytes for {tx:?}"); + assert_eq!(signed.nonce, expected_nonce, "Wrong nonce for {tx:?}"); + assert!( + signed.signature.verify(signed.signer, tx.sig_hash(genesis)), + "Signature verification failed for {tx:?}" + ); + } + other => panic!("Expected Signed kind, got {other:?} for {tx:?}"), + } + } + } + + #[test] + fn provided_transactions_kind() { + for tx in all_provided_transactions() { + let expected_order = match &tx { + Transaction::Cosign { .. } => "Cosign", + Transaction::Cosigned { .. } => "Cosigned", + Transaction::SubstrateBlock { .. } => "SubstrateBlock", + Transaction::Batch { .. } => "Batch", + other => panic!("all_provided_transactions returned non-provided tx: {other:?}"), + }; + + match tx.kind() { + TransactionKind::Provided(actual_order) => { + assert_eq!(actual_order, expected_order, "Wrong order for {tx:?}"); + } + other => panic!("Expected Provided kind, got {other:?} for {tx:?}"), } - other => panic!("Expected Provided kind, got {other:?} for {tx:?}"), } } } -} -mod hash { - use super::*; + mod hash { + use super::*; - #[test] - fn hash_is_deterministic() { - let key = random_key(&mut OsRng); - let genesis = new_genesis(); + #[test] + fn hash_format_and_determinism() { + use blake2::{digest::typenum::U32, Digest as _, Blake2b}; - for tx_template in all_signed_transactions() { - assert_eq!( - tx_template.hash(), - tx_template.hash(), - "Hash not deterministic for {tx_template:?}" - ); + let key = random_key(&mut OsRng); + let genesis = random_genesis(&mut OsRng); - let mut tx1 = tx_template.clone(); - let mut tx2 = tx_template; + for tx in all_transactions() { + assert_eq!(tx.hash(), tx.hash(), "Hash not deterministic for {tx:?}"); - tx1.sign(&mut OsRng, genesis, &key); - tx2.sign(&mut OsRng, genesis, &key); + let serialized = ReadWrite::serialize(&tx); - // Signing produces different random nonces and different signatures, but the hash strips the signature - assert_eq!(tx1.hash(), tx2.hash(), "Hashes should be equal despite different signatures"); + let (hash_input, is_signed) = match &tx { + // Signed txs: strip the last 64 bytes (signature R || s) + Transaction::RemoveParticipant { signed, .. } | + Transaction::DkgParticipation { signed, .. } | + Transaction::DkgConfirmationPreprocess { signed, .. } | + Transaction::DkgConfirmationShare { signed, .. } | + Transaction::Sign { signed, .. } | + Transaction::SlashReport { signed, .. } => { + // Verify the stripped bytes are exactly the signature + let sig_bytes = signed.signature.serialize(); + assert_eq!( + &serialized[serialized.len() - 64 ..], + sig_bytes.as_slice(), + "last 64 bytes should be signature R || s for {tx:?}" + ); + (&serialized[.. serialized.len() - 64], true) + } + // Provided txs: hash the full serialization + Transaction::Cosign { .. } | + Transaction::Cosigned { .. } | + Transaction::SubstrateBlock { .. } | + Transaction::Batch { .. } => (&serialized[..], false), + }; + + let expected_hash: [u8; 32] = Blake2b::::digest(hash_input).into(); + assert_eq!(tx.hash(), expected_hash, "Hash format mismatch for {tx:?}"); + + // For signed txs: different signatures should produce the same hash + if is_signed { + let mut tx1 = tx.clone(); + let mut tx2 = tx.clone(); + tx1.sign(&mut OsRng, genesis, &key); + tx2.sign(&mut OsRng, genesis, &key); + assert_eq!(tx1.hash(), tx2.hash(), "Hashes should be equal despite different signatures"); + } + } } - } - #[test] - fn hash_differs_for_distinct_transactions() { - let txs = all_transactions(); - for i in 0 .. txs.len() { - for j in (i + 1) .. txs.len() { - assert_ne!( - txs[i].hash(), - txs[j].hash(), - "Distinct TXs should have different hashes: {:?} vs {:?}", - txs[i], - txs[j] - ); + #[test] + fn hash_differs_for_distinct_transactions() { + let txs = all_transactions(); + for i in 0 .. txs.len() { + for j in (i + 1) .. txs.len() { + assert_ne!( + txs[i].hash(), + txs[j].hash(), + "Distinct TXs should have different hashes: {:?} vs {:?}", + txs[i], + txs[j] + ); + } } } } -} -#[test] -fn tx_verify() { - // All default transactions are valid - { + #[test] + fn verify() { + let max = usize::from(KeyShares::MAX_PER_SET); + for tx in all_transactions() { + // All default transactions should be valid assert_eq!(tx.verify(), Ok(()), "verify() rejected valid tx: {tx:?}"); - } - } - { - // Transaction::Sign with data == KeyShares::MAX_PER_SET passes - assert_eq!( - Transaction::Sign { - id: VariantSignId::Transaction([0; 32]), - attempt: 0, - round: SigningProtocolRound::Preprocess, - data: vec![vec![]; usize::from(KeyShares::MAX_PER_SET)], - signed: Signed::default(), - } - .verify(), - Ok(()) - ); - // Transaction::Sign with data > KeyShares::MAX_PER_SET fails - assert_eq!( - Transaction::Sign { - id: VariantSignId::Transaction([0; 32]), - attempt: 0, - round: SigningProtocolRound::Preprocess, - data: vec![vec![]; usize::from(KeyShares::MAX_PER_SET) + 1], - signed: Signed::default(), + // Test boundary conditions per variant + match &tx { + // Fixed-length: no validation beyond structure + Transaction::RemoveParticipant { .. } | + Transaction::DkgParticipation { .. } | + Transaction::DkgConfirmationPreprocess { .. } | + Transaction::DkgConfirmationShare { .. } => {} + + // Provided: no validation beyond structure + Transaction::Cosign { .. } | + Transaction::Cosigned { .. } | + Transaction::SubstrateBlock { .. } | + Transaction::Batch { .. } => {} + + // Sign: data.len() must be <= KeyShares::MAX_PER_SET + Transaction::Sign { id, attempt, round, signed, .. } => { + let with_data = |data| Transaction::Sign { + id: *id, + attempt: *attempt, + round: *round, + data, + signed: *signed, + }; + assert_eq!(with_data(vec![vec![]; 0]).verify(), Ok(())); + assert_eq!(with_data(vec![vec![]; OsRng.next_u32() as usize % max]).verify(), Ok(())); + assert_eq!(with_data(vec![vec![]; max]).verify(), Ok(())); + assert_eq!( + with_data(vec![vec![]; max + 1]).verify(), + Err(TransactionError::InvalidContent) + ); + } + + // SlashReport: slash_points.len() must be <= KeyShares::MAX_PER_SET + Transaction::SlashReport { signed, .. } => { + let with_points = + |points| Transaction::SlashReport { slash_points: points, signed: *signed }; + assert_eq!(with_points(vec![0; 0]).verify(), Ok(())); + assert_eq!(with_points(vec![0; OsRng.next_u32() as usize % max]).verify(), Ok(())); + assert_eq!(with_points(vec![0; max]).verify(), Ok(())); + assert_eq!(with_points(vec![0; max + 1]).verify(), Err(TransactionError::InvalidContent)); + } } - .verify(), - Err(TransactionError::InvalidContent) - ); + } } - { - // Transaction::SlashReport with slash_points == KeyShares::MAX_PER_SET passes - let slash_at = Transaction::SlashReport { - slash_points: vec![0; usize::from(KeyShares::MAX_PER_SET)], - signed: Signed::default(), - }; - assert_eq!(slash_at.verify(), Ok(())); - - // Transaction::SlashReport with slash_points == KeyShares::MAX_PER_SET fails - let slash_over = Transaction::SlashReport { - slash_points: vec![0; usize::from(KeyShares::MAX_PER_SET) + 1], - signed: Signed::default(), - }; - assert_eq!(slash_over.verify(), Err(TransactionError::InvalidContent)); + #[test] + fn topic() { + for tx in all_transactions() { + let expected = match &tx { + Transaction::RemoveParticipant { participant, .. } => { + Some(Topic::RemoveParticipant { participant: *participant }) + } + Transaction::DkgParticipation { .. } => None, + Transaction::DkgConfirmationPreprocess { attempt, .. } => Some(Topic::DkgConfirmation { + attempt: *attempt, + round: SigningProtocolRound::Preprocess, + }), + Transaction::DkgConfirmationShare { attempt, .. } => { + Some(Topic::DkgConfirmation { attempt: *attempt, round: SigningProtocolRound::Share }) + } + Transaction::Cosign { .. } | + Transaction::Cosigned { .. } | + Transaction::SubstrateBlock { .. } | + Transaction::Batch { .. } => None, + Transaction::Sign { id, attempt, round, .. } => { + Some(Topic::Sign { id: *id, attempt: *attempt, round: *round }) + } + Transaction::SlashReport { .. } => Some(Topic::SlashReport), + }; + assert_eq!(tx.topic(), expected, "Wrong topic for {tx:?}"); + } } -} -#[test] -fn topic_returns_correct_mapping() { - let participant = random_serai_address(&mut OsRng); - - let tx = Transaction::RemoveParticipant { participant, signed: Signed::default() }; - assert_eq!(tx.topic(), Some(Topic::RemoveParticipant { participant })); - - let tx = Transaction::DkgParticipation { participation: vec![], signed: Signed::default() }; - assert_eq!(tx.topic(), None); - - let tx = Transaction::DkgConfirmationPreprocess { - attempt: 0, - preprocess: [0; 64], - signed: Signed::default(), - }; - assert_eq!( - tx.topic(), - Some(Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Preprocess }) - ); - - let tx = - Transaction::DkgConfirmationShare { attempt: 0, share: [0; 32], signed: Signed::default() }; - assert_eq!( - tx.topic(), - Some(Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Share }) - ); - - for tx in all_provided_transactions() { - assert_eq!(tx.topic(), None, "Provided tx should have no topic: {tx:?}"); - } + mod sign { + use super::*; - let id = VariantSignId::Batch([9; 32]); - let tx = Transaction::Sign { - id, - attempt: 0, - round: SigningProtocolRound::Share, - data: vec![], - signed: Signed::default(), - }; - assert_eq!(tx.topic(), Some(Topic::Sign { id, attempt: 0, round: SigningProtocolRound::Share })); - - let tx = Transaction::SlashReport { slash_points: vec![], signed: Signed::default() }; - assert_eq!(tx.topic(), Some(Topic::SlashReport)); -} + #[test] + fn tx_sign() { + let key = random_key(&mut OsRng); + let expected_signer = Ristretto::generator() * key.deref(); + let genesis = random_genesis(&mut OsRng); -mod sign { - use super::*; + // Sets correct signer and produces verifiable signature + for mut tx in all_signed_transactions() { + tx.sign(&mut OsRng, genesis, &key); + let sig_hash = tx.sig_hash(genesis); - #[test] - fn tx_sign() { - let key = random_key(&mut OsRng); - let expected_signer = Ristretto::generator() * key.deref(); - let genesis = new_genesis(); - - // Sets correct signer and produces verifiable signature - for mut tx in all_signed_transactions() { - tx.sign(&mut OsRng, genesis, &key); - let sig_hash = tx.sig_hash(genesis); - - if let TransactionKind::Signed(_, trib_signed) = tx.kind() { - assert_eq!(trib_signed.signer, expected_signer, "Wrong signer for {tx:?}"); - assert!( - trib_signed.signature.verify(trib_signed.signer, sig_hash), - "Signature verification failed for {tx:?}" - ); + if let TransactionKind::Signed(_, tributary_signed) = tx.kind() { + assert_eq!(tributary_signed.signer, expected_signer, "Wrong signer for {tx:?}"); + assert!( + tributary_signed.signature.verify(tributary_signed.signer, sig_hash), + "Signature verification failed for {tx:?}" + ); + } } - } - // Wrong genesis fails verification - { - let mut tx = Transaction::RemoveParticipant { - participant: random_serai_address(&mut OsRng), - signed: Signed::default(), - }; - tx.sign(&mut OsRng, new_genesis(), &key); - - let wrong_challenge = tx.sig_hash([1; 32]); - if let TransactionKind::Signed(_, trib_signed) = tx.kind() { - assert!( - !trib_signed.signature.verify(trib_signed.signer, wrong_challenge), - "Signature should not verify with wrong genesis" - ); + // Wrong genesis fails verification + { + let mut tx = Transaction::RemoveParticipant { + participant: random_serai_address(&mut OsRng), + signed: random_signed(&mut OsRng), + }; + let genesis = random_genesis(&mut OsRng); + tx.sign(&mut OsRng, genesis, &key); + + let mut wrong_genesis = random_genesis(&mut OsRng); + if wrong_genesis == genesis { + wrong_genesis[0] ^= 1; + } + let wrong_challenge = tx.sig_hash(wrong_genesis); + if let TransactionKind::Signed(_, trib_signed) = tx.kind() { + assert_eq!( + trib_signed.signature.verify(trib_signed.signer, wrong_challenge), + false, + "Signature should not verify with wrong genesis" + ); + } } } - } - #[test] - #[should_panic(expected = "signing Cosign transaction (provided)")] - fn sign_panics_on_cosign() { - let key = random_key(&mut OsRng); - let mut tx = Transaction::Cosign { substrate_block_hash: random_block_hash(&mut OsRng) }; - tx.sign(&mut OsRng, new_genesis(), &key); - } + #[test] + #[should_panic(expected = "signing Cosign transaction (provided)")] + fn panics_on_cosign() { + let key = random_key(&mut OsRng); + let mut tx = Transaction::Cosign { substrate_block_hash: random_block_hash(&mut OsRng) }; + tx.sign(&mut OsRng, random_genesis(&mut OsRng), &key); + } - #[test] - #[should_panic(expected = "signing Cosigned transaction (provided)")] - fn sign_panics_on_cosigned() { - let key = random_key(&mut OsRng); - let mut tx = Transaction::Cosigned { substrate_block_hash: random_block_hash(&mut OsRng) }; - tx.sign(&mut OsRng, new_genesis(), &key); - } + #[test] + #[should_panic(expected = "signing Cosigned transaction (provided)")] + fn panics_on_cosigned() { + let key = random_key(&mut OsRng); + let mut tx = Transaction::Cosigned { substrate_block_hash: random_block_hash(&mut OsRng) }; + tx.sign(&mut OsRng, random_genesis(&mut OsRng), &key); + } - #[test] - #[should_panic(expected = "signing SubstrateBlock transaction (provided)")] - fn sign_panics_on_substrate_block() { - let key = random_key(&mut OsRng); - let mut tx = Transaction::SubstrateBlock { hash: random_block_hash(&mut OsRng) }; - tx.sign(&mut OsRng, new_genesis(), &key); - } + #[test] + #[should_panic(expected = "signing SubstrateBlock transaction (provided)")] + fn panics_on_substrate_block() { + let key = random_key(&mut OsRng); + let mut tx = Transaction::SubstrateBlock { hash: random_block_hash(&mut OsRng) }; + tx.sign(&mut OsRng, random_genesis(&mut OsRng), &key); + } - #[test] - #[should_panic(expected = "signing Batch transaction (provided)")] - fn sign_panics_on_batch() { - let key = random_key(&mut OsRng); - let mut tx = Transaction::Batch { hash: random_block_hash(&mut OsRng).0 }; - tx.sign(&mut OsRng, new_genesis(), &key); + #[test] + #[should_panic(expected = "signing Batch transaction (provided)")] + fn panics_on_batch() { + let key = random_key(&mut OsRng); + let mut tx = Transaction::Batch { hash: random_block_hash(&mut OsRng).0 }; + tx.sign(&mut OsRng, random_genesis(&mut OsRng), &key); + } } } diff --git a/coordinator/tributary/src/transaction.rs b/coordinator/tributary/src/transaction.rs index 23450d7a6..b05b23537 100644 --- a/coordinator/tributary/src/transaction.rs +++ b/coordinator/tributary/src/transaction.rs @@ -94,6 +94,13 @@ impl Default for Signed { } } +/// The type used for preprocesses in the signing protocol. +pub type Preprocess = [u8; 64]; +/// The type used for shares in the signing protocol. +pub type Share = [u8; 32]; +/// The type used for either shares or preprocesses in the signing protocol. +pub type GenericDataset = Vec>; + /// The Tributary transaction definition used by Serai #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum Transaction { @@ -117,7 +124,7 @@ pub enum Transaction { /// The attempt number of this signing protocol attempt: u32, /// The preprocess - preprocess: [u8; 64], + preprocess: Preprocess, /// The transaction's signer and signature signed: Signed, }, @@ -126,7 +133,7 @@ pub enum Transaction { /// The attempt number of this signing protocol attempt: u32, /// The signature share - share: [u8; 32], + share: Share, /// The transaction's signer and signature signed: Signed, }, diff --git a/substrate/primitives/src/test_helpers.rs b/substrate/primitives/src/test_helpers.rs index 1ec5b1f1c..908c926f8 100644 --- a/substrate/primitives/src/test_helpers.rs +++ b/substrate/primitives/src/test_helpers.rs @@ -1,11 +1,14 @@ //! Test helpers for generating random instances of primitive types. +use alloc::{vec, vec::Vec}; use rand_core::{RngCore, CryptoRng}; use crate::{ BlockHash, address::{SeraiAddress, ExternalAddress}, crypto::{Public, ExternalKey}, + network_id::ExternalNetworkId, + validator_sets::{ExternalValidatorSet, Session}, }; /// Generate a random 32-byte array. @@ -22,6 +25,14 @@ pub fn random_bytes_64(rng: &mut R) -> [u8; 64] { bytes } +/// Generate a random `Vec` with a random length between 1 and 128. +pub fn random_vec_u8(rng: &mut R) -> Vec { + let len = (rng.next_u32() % 128) as usize + 1; + let mut bytes = vec![0u8; len]; + rng.fill_bytes(&mut bytes); + bytes +} + /// Generate a random [`ExternalAddress`]. pub fn random_external_address(rng: &mut R) -> ExternalAddress { ExternalAddress::try_from(random_bytes_32(rng).to_vec()).unwrap() @@ -58,3 +69,32 @@ pub fn random_block_hash(rng: &mut R) -> BlockHash { pub fn random_global_session(rng: &mut R) -> [u8; 32] { random_bytes_32(rng) } + +/// Generate a random genesis +pub fn random_genesis(rng: &mut R) -> [u8; 32] { + random_bytes_32(rng) +} + +/// Generate a random block number. +pub fn random_block_number(rng: &mut R) -> u64 { + rng.next_u64() +} + +/// Generate a random [`ExternalNetworkId`]. +pub fn random_external_network_id(rng: &mut R) -> ExternalNetworkId { + let all: Vec<_> = ExternalNetworkId::all().collect(); + all[(rng.next_u32() as usize) % all.len()] +} + +/// Generate a random [`ExternalValidatorSet`]. +pub fn random_validator_set(rng: &mut R) -> ExternalValidatorSet { + ExternalValidatorSet { + network: random_external_network_id(rng), + session: Session(rng.next_u32()), + } +} + +/// A default [`ExternalValidatorSet`] for tests where the set value doesn't matter. +pub fn default_test_validator_set() -> ExternalValidatorSet { + ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) } +} diff --git a/tests/substrate/Cargo.toml b/tests/substrate/Cargo.toml index bb6bc8430..325bc4b61 100644 --- a/tests/substrate/Cargo.toml +++ b/tests/substrate/Cargo.toml @@ -25,7 +25,6 @@ dockertest = "0.5" serai-docker-tests = { path = "../docker" } rand_core = { version = "0.6", default-features = false, features = ["std"] } -serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] } [dev-dependencies] rand = { version = "0.8", default-features = false, features = ["std"] } diff --git a/tests/substrate/src/lib.rs b/tests/substrate/src/lib.rs index dda2d6f1b..532d201be 100644 --- a/tests/substrate/src/lib.rs +++ b/tests/substrate/src/lib.rs @@ -4,21 +4,6 @@ use serai_client_serai::Serai; use dockertest::{StartPolicy, PullPolicy, Image, TestBodySpecification, DockerOperations}; -use rand_core::{RngCore, CryptoRng}; -use serai_primitives::{address::SeraiAddress, BlockHash}; - -pub fn random_serai_address(rng: &mut R) -> SeraiAddress { - let mut key = [0; 32]; - rng.fill_bytes(&mut key); - SeraiAddress(key) -} - -pub fn random_block_hash(rng: &mut R) -> BlockHash { - let mut hash = [0; 32]; - rng.fill_bytes(&mut hash); - BlockHash(hash) -} - pub struct Handle(String); pub fn composition(name: &str, logs_path: String) -> (TestBodySpecification, Handle) { From 58c5e32dba716af8243276bb771e5f5018f46490 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Mon, 6 Apr 2026 17:12:28 -0400 Subject: [PATCH 46/71] feat(coordinator/tributary): adding changes and new test cases --- coordinator/tributary/src/db.rs | 65 +- coordinator/tributary/src/lib.rs | 4 + coordinator/tributary/src/tests/db.rs | 1265 ++++++++--------- coordinator/tributary/src/tests/mod.rs | 142 +- coordinator/tributary/src/tests/scan_block.rs | 1083 ++++++++------ .../tributary/src/tests/transaction.rs | 384 +++-- coordinator/tributary/src/transaction.rs | 20 +- 7 files changed, 1647 insertions(+), 1316 deletions(-) diff --git a/coordinator/tributary/src/db.rs b/coordinator/tributary/src/db.rs index 1b4896870..cfbdfcc8b 100644 --- a/coordinator/tributary/src/db.rs +++ b/coordinator/tributary/src/db.rs @@ -51,8 +51,8 @@ pub(crate) enum Participating { Everyone, } -pub(crate) fn required_participation(n: u16) -> Result { - Ok(n.checked_mul(2).ok_or("total_weight * 2 overflows u16")? / 3 + 1) +pub(crate) fn required_participation(n: u16) -> u16 { + n.checked_mul(2).expect(&format!("required_participation overflowed: {n} * 2")) / 3 + 1 } impl Topic { @@ -60,17 +60,16 @@ impl Topic { pub(crate) fn next_attempt_topic(self) -> Option { #[expect(clippy::match_same_arms)] match self { - Topic::RemoveParticipant { .. } => None, Topic::DkgConfirmation { attempt, round: _ } => Some(Topic::DkgConfirmation { - attempt: attempt.checked_add(1).unwrap_or(0), + attempt: attempt.checked_add(1)?, round: SigningProtocolRound::Preprocess, }), - Topic::SlashReport => None, Topic::Sign { id, attempt, round: _ } => Some(Topic::Sign { id, - attempt: attempt.checked_add(1).unwrap_or(0), + attempt: attempt.checked_add(1)?, round: SigningProtocolRound::Preprocess, }), + Topic::RemoveParticipant { .. } | Topic::SlashReport => None, } } @@ -78,10 +77,9 @@ impl Topic { pub(crate) fn reattempt_topic(self) -> Option<(u32, Topic)> { #[expect(clippy::match_same_arms)] match self { - Topic::RemoveParticipant { .. } => None, Topic::DkgConfirmation { attempt, round } => match round { SigningProtocolRound::Preprocess => { - let next_attempt = attempt.checked_add(1).unwrap_or(0); + let next_attempt = attempt.checked_add(1)?; Some(( next_attempt, Topic::DkgConfirmation { @@ -92,10 +90,9 @@ impl Topic { } SigningProtocolRound::Share => None, }, - Topic::SlashReport => None, Topic::Sign { id, attempt, round } => match round { SigningProtocolRound::Preprocess => { - let next_attempt = attempt.checked_add(1).unwrap_or(0); + let next_attempt = attempt.checked_add(1)?; Some(( next_attempt, Topic::Sign { id, attempt: next_attempt, round: SigningProtocolRound::Preprocess }, @@ -103,6 +100,7 @@ impl Topic { } SigningProtocolRound::Share => None, }, + Topic::RemoveParticipant { .. } | Topic::SlashReport => None, } } @@ -112,10 +110,8 @@ impl Topic { pub(crate) fn sign_id(self, set: ExternalValidatorSet) -> Option { #[expect(clippy::match_same_arms)] match self { - Topic::RemoveParticipant { .. } => None, - Topic::DkgConfirmation { .. } => None, - Topic::SlashReport => None, Topic::Sign { id, attempt, round: _ } => Some(SignId { session: set.session, id, attempt }), + Topic::RemoveParticipant { .. } | Topic::DkgConfirmation { .. } | Topic::SlashReport => None, } } @@ -131,7 +127,6 @@ impl Topic { ) -> Option { #[expect(clippy::match_same_arms)] match self { - Topic::RemoveParticipant { .. } => None, Topic::DkgConfirmation { attempt, round: _ } => Some({ let id = { let mut id = [0; 32]; @@ -141,8 +136,7 @@ impl Topic { }; SignId { session: set.session, id, attempt } }), - Topic::SlashReport => None, - Topic::Sign { .. } => None, + Topic::RemoveParticipant { .. } | Topic::SlashReport | Topic::Sign { .. } => None, } } @@ -152,20 +146,19 @@ impl Topic { pub(crate) fn preceding_topic(self) -> Option { #[expect(clippy::match_same_arms)] match self { - Topic::RemoveParticipant { .. } => None, Topic::DkgConfirmation { attempt, round } => match round { SigningProtocolRound::Preprocess => None, SigningProtocolRound::Share => { Some(Topic::DkgConfirmation { attempt, round: SigningProtocolRound::Preprocess }) } }, - Topic::SlashReport => None, Topic::Sign { id, attempt, round } => match round { SigningProtocolRound::Preprocess => None, SigningProtocolRound::Share => { Some(Topic::Sign { id, attempt, round: SigningProtocolRound::Preprocess }) } }, + Topic::RemoveParticipant { .. } | Topic::SlashReport => None, } } @@ -175,20 +168,19 @@ impl Topic { pub(crate) fn succeeding_topic(self) -> Option { #[expect(clippy::match_same_arms)] match self { - Topic::RemoveParticipant { .. } => None, Topic::DkgConfirmation { attempt, round } => match round { SigningProtocolRound::Preprocess => { Some(Topic::DkgConfirmation { attempt, round: SigningProtocolRound::Share }) } SigningProtocolRound::Share => None, }, - Topic::SlashReport => None, Topic::Sign { id, attempt, round } => match round { SigningProtocolRound::Preprocess => { Some(Topic::Sign { id, attempt, round: SigningProtocolRound::Share }) } SigningProtocolRound::Share => None, }, + Topic::RemoveParticipant { .. } | Topic::SlashReport => None, } } @@ -210,10 +202,8 @@ impl Topic { pub(crate) fn participating(&self) -> Participating { #[expect(clippy::match_same_arms)] match self { - Topic::RemoveParticipant { .. } => Participating::Everyone, - Topic::DkgConfirmation { .. } => Participating::Participated, - Topic::SlashReport => Participating::Everyone, - Topic::Sign { .. } => Participating::Participated, + Topic::RemoveParticipant { .. } | Topic::SlashReport => Participating::Everyone, + Topic::DkgConfirmation { .. } | Topic::Sign { .. } => Participating::Participated, } } } @@ -280,12 +270,12 @@ db_channel!( // 5 minutes #[cfg(not(feature = "longer-reattempts"))] -const BASE_REATTEMPT_DELAY: u32 = +pub(crate) const BASE_REATTEMPT_DELAY: u32 = (5u32 * 60 * 1000).div_ceil(tributary_sdk::tendermint::TARGET_BLOCK_TIME); // 10 minutes, intended for latent environments like the GitHub CI #[cfg(feature = "longer-reattempts")] -const BASE_REATTEMPT_DELAY: u32 = +pub(crate) const BASE_REATTEMPT_DELAY: u32 = (10u32 * 60 * 1000).div_ceil(tributary_sdk::tendermint::TARGET_BLOCK_TIME); pub(crate) struct TributaryDb; @@ -439,6 +429,11 @@ impl TributaryDb { ) -> DataSet { // This function will only be called once for a (validator, topic) tuple due to how we handle // nonces on transactions (deterministically to the topic) + assert!( + txn.get(Accumulated::::key(set, topic, validator)).is_none(), + "accumulate called twice for the same (validator, topic) tuple: \ + the nonce system should have prevented this" + ); let accumulated_weight = AccumulatedWeight::get(txn, set, topic); if topic.requires_recognition() && accumulated_weight.is_none() { @@ -468,13 +463,7 @@ impl TributaryDb { } } - let required_participation = match required_participation(total_weight) { - Ok(val) => val, - Err(e) => { - serai_env::error!("required_participation({total_weight}) failed: {e}"); - return DataSet::None; - } - }; + let required_participation = required_participation(total_weight); // The complete lack of validation on the data by these NOPs opens the potential for spam here @@ -490,7 +479,9 @@ impl TributaryDb { } // Accumulate the data - accumulated_weight += validator_weight; + accumulated_weight = accumulated_weight.checked_add(validator_weight).expect(&format!( + "accumulated_weight {accumulated_weight} overflowed adding validator_weight {validator_weight}" + )); AccumulatedWeight::set(txn, set, topic, &accumulated_weight); Accumulated::set(txn, set, topic, validator, data); @@ -500,9 +491,11 @@ impl TributaryDb { let reattempt_topic = topic.reattempt_topic(); if let Some((attempt, reattempt_topic)) = reattempt_topic { // Linearly scale the time for the protocol with the attempt number - let blocks_till_reattempt = u64::from(attempt * BASE_REATTEMPT_DELAY); + let blocks_till_reattempt = u64::from(attempt) * u64::from(BASE_REATTEMPT_DELAY); - let recognize_at = block_number + blocks_till_reattempt; + let recognize_at = block_number.checked_add(blocks_till_reattempt).expect(&format!( + "recognize_at overflowed: block_number {block_number} + delay {blocks_till_reattempt}" + )); let mut queued = Reattempt::get(txn, set, recognize_at).unwrap_or(Vec::with_capacity(1)); queued.push(reattempt_topic); Reattempt::set(txn, set, recognize_at, &queued); diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index 96f8c7edf..0279287ec 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -193,6 +193,10 @@ impl ScanBlock<'_, TD, TDT, P> { data: &D, signer: SeraiAddress, ) -> Option<(SignId, HashMap>)> { + assert!( + matches!(topic, Topic::DkgConfirmation { .. }), + "accumulate_dkg_confirmation called with non-DkgConfirmation topic: {topic:?}" + ); match TributaryDb::accumulate::( self.tributary_txn, self.set.set, diff --git a/coordinator/tributary/src/tests/db.rs b/coordinator/tributary/src/tests/db.rs index 2fafabd4c..13c234ba1 100644 --- a/coordinator/tributary/src/tests/db.rs +++ b/coordinator/tributary/src/tests/db.rs @@ -1,6 +1,5 @@ -use rand::{RngCore, rngs::OsRng}; - -use messages::sign::VariantSignId; +use rand::{Rng, RngCore, rngs::OsRng}; +use messages::sign::{SignId, VariantSignId}; use serai_db::{Db, DbTxn, MemDb}; use serai_primitives::{ @@ -8,65 +7,55 @@ use serai_primitives::{ validator_sets::ExternalValidatorSet, test_helpers::{ random_bytes_32, random_bytes_64, random_serai_address, random_block_number, - default_test_validator_set, random_validator_set, random_vec_u8, + default_test_validator_set, random_validator_set, random_vec_u8, random_block_hash, }, }; use crate::{ db::*, - tests::random_transaction_id, - transaction::{GenericDataset, Preprocess, Share, SigningProtocolRound}, + tests::*, + transaction::{RoundPayloads, Preprocess, Share, SigningProtocolRound}, }; -fn all_topics() -> Vec { +/// One of each topic kind, and attempts: at 0, a random attempt, and u32::MAX. +fn all_topics_and_attempts() -> Vec { + let random_attempt = OsRng.gen_range(1u32 .. u32::MAX); vec![ + // RemoveParticipant Topic::RemoveParticipant { participant: random_serai_address(&mut OsRng) }, + // DkgConfirmation Preprocess Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Preprocess }, + Topic::DkgConfirmation { attempt: random_attempt, round: SigningProtocolRound::Preprocess }, + Topic::DkgConfirmation { attempt: u32::MAX, round: SigningProtocolRound::Preprocess }, + // DkgConfirmation Share Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Share }, + Topic::DkgConfirmation { attempt: random_attempt, round: SigningProtocolRound::Share }, + Topic::DkgConfirmation { attempt: u32::MAX, round: SigningProtocolRound::Share }, + // SlashReport Topic::SlashReport, + // Sign Preprocess Topic::Sign { id: random_transaction_id(), attempt: 0, round: SigningProtocolRound::Preprocess, }, - Topic::Sign { id: random_transaction_id(), attempt: 0, round: SigningProtocolRound::Share }, - ] -} - -/// A random Share topic that has a preceding Preprocess topic. -fn random_share_topic_with_preceding() -> Topic { - if OsRng.next_u64() % 2 == 0 { - Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Share } - } else { - Topic::Sign { id: random_transaction_id(), attempt: 0, round: SigningProtocolRound::Share } - } -} - -/// A random topic with `attempt = u32::MAX` and `round = Preprocess` that has -/// `reattempt_topic()` and `next_attempt_topic()` returning `Some`. -fn random_reattemptable_topic_at_max_attempt() -> Topic { - if OsRng.next_u64() % 2 == 0 { - Topic::DkgConfirmation { attempt: u32::MAX, round: SigningProtocolRound::Preprocess } - } else { Topic::Sign { id: random_transaction_id(), - attempt: u32::MAX, + attempt: random_attempt, round: SigningProtocolRound::Preprocess, - } - } -} - -fn all_topics_at_max_attempts() -> Vec { - vec![ - Topic::RemoveParticipant { participant: random_serai_address(&mut OsRng) }, - Topic::DkgConfirmation { attempt: u32::MAX, round: SigningProtocolRound::Preprocess }, - Topic::DkgConfirmation { attempt: u32::MAX, round: SigningProtocolRound::Share }, - Topic::SlashReport, + }, Topic::Sign { id: random_transaction_id(), attempt: u32::MAX, round: SigningProtocolRound::Preprocess, }, + // Sign Share + Topic::Sign { id: random_transaction_id(), attempt: 0, round: SigningProtocolRound::Share }, + Topic::Sign { + id: random_transaction_id(), + attempt: random_attempt, + round: SigningProtocolRound::Share, + }, Topic::Sign { id: random_transaction_id(), attempt: u32::MAX, @@ -75,6 +64,34 @@ fn all_topics_at_max_attempts() -> Vec { ] } +/// Share-round topics only, with attempts: at 0, random, and u32::MAX. +fn all_share_topics_and_attempts() -> Vec { + all_topics_and_attempts() + .into_iter() + .filter(|t| { + matches!( + t, + Topic::DkgConfirmation { round: SigningProtocolRound::Share, .. } | + Topic::Sign { round: SigningProtocolRound::Share, .. } + ) + }) + .collect() +} + +/// Preprocess-round topics only, with attempts: at 0, random, and u32::MAX. +fn all_preprocess_topics_and_attempts() -> Vec { + all_topics_and_attempts() + .into_iter() + .filter(|t| { + matches!( + t, + Topic::DkgConfirmation { round: SigningProtocolRound::Preprocess, .. } | + Topic::Sign { round: SigningProtocolRound::Preprocess, .. } + ) + }) + .collect() +} + type NoEachFn = fn(usize, &DataSet); /// Cross threshold by accumulating from all validators, returning the final result. @@ -114,119 +131,82 @@ where result } -#[test] -fn required_participation() { - use crate::db::required_participation; +mod required_participation_tests { + use super::*; - assert_eq!(required_participation(0), Ok(1)); - // Random value within non-overflow range - let random_n = (OsRng.next_u32() as u16) % (u16::MAX / 2); - assert_eq!(required_participation(random_n), Ok(random_n * 2 / 3 + 1)); + #[test] + fn passes() { + assert_eq!(required_participation(0), 1); - assert!(required_participation(u16::MAX / 2).is_ok()); - assert!(required_participation(u16::MAX / 2 + 1).is_err()); - assert!(required_participation(u16::MAX).is_err()); + // No panics + { + let random_n = (OsRng.next_u32() as u16) % (u16::MAX / 2); + let _ = required_participation(random_n); + let _ = required_participation(u16::MAX / 2); + } + } + + #[test] + #[should_panic = "overflowed"] + fn panics_on_overflow() { + // u16::MAX * 2 overflows u16 + required_participation(u16::MAX); + } } mod topic { - use messages::sign::SignId; use super::*; #[test] fn next_attempt_topic() { - for topic in all_topics() { + for topic in all_topics_and_attempts() { match topic { - Topic::RemoveParticipant { .. } => assert_eq!(topic.next_attempt_topic(), None), Topic::DkgConfirmation { attempt, .. } => assert_eq!( topic.next_attempt_topic(), - Some(Topic::DkgConfirmation { - attempt: attempt + 1, + attempt.checked_add(1).map(|next| Topic::DkgConfirmation { + attempt: next, round: SigningProtocolRound::Preprocess, }) ), - Topic::SlashReport => assert_eq!(topic.next_attempt_topic(), None), Topic::Sign { id, attempt, .. } => assert_eq!( topic.next_attempt_topic(), - Some(Topic::Sign { id, attempt: attempt + 1, round: SigningProtocolRound::Preprocess }) - ), - } - } - - for topic in all_topics_at_max_attempts() { - match topic { - Topic::RemoveParticipant { .. } => assert_eq!(topic.next_attempt_topic(), None), - Topic::DkgConfirmation { .. } => assert_eq!( - topic.next_attempt_topic(), - Some(Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Preprocess }) - ), - Topic::SlashReport => assert_eq!(topic.next_attempt_topic(), None), - Topic::Sign { id, .. } => assert_eq!( - topic.next_attempt_topic(), - Some(Topic::Sign { id, attempt: 0, round: SigningProtocolRound::Preprocess }) + attempt.checked_add(1).map(|next| Topic::Sign { + id, + attempt: next, + round: SigningProtocolRound::Preprocess + }) ), + _ => assert_eq!(topic.next_attempt_topic(), None), } } } #[test] fn reattempt_topic() { - for topic in all_topics() { + for topic in all_topics_and_attempts() { match topic { - Topic::RemoveParticipant { .. } => assert_eq!(topic.reattempt_topic(), None), Topic::DkgConfirmation { attempt, round } => match round { - SigningProtocolRound::Preprocess => { - let next_attempt = attempt + 1; - assert_eq!( - topic.reattempt_topic(), - Some(( - next_attempt, - Topic::DkgConfirmation { - attempt: next_attempt, - round: SigningProtocolRound::Preprocess, - }, - )) - ); - } - SigningProtocolRound::Share => assert_eq!(topic.reattempt_topic(), None), - }, - Topic::SlashReport => assert_eq!(topic.reattempt_topic(), None), - Topic::Sign { id, attempt, round } => match round { - SigningProtocolRound::Preprocess => { - let next_attempt = attempt + 1; - assert_eq!( - topic.reattempt_topic(), - Some(( - next_attempt, - Topic::Sign { id, attempt: next_attempt, round: SigningProtocolRound::Preprocess }, - )) - ); - } - SigningProtocolRound::Share => assert_eq!(topic.reattempt_topic(), None), - }, - } - } - - for topic in all_topics_at_max_attempts() { - match topic { - Topic::RemoveParticipant { .. } => assert_eq!(topic.reattempt_topic(), None), - Topic::DkgConfirmation { round, .. } => match round { SigningProtocolRound::Preprocess => assert_eq!( topic.reattempt_topic(), - Some(( - 0, - Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Preprocess }, - )) + attempt.checked_add(1).map(|next| { + ( + next, + Topic::DkgConfirmation { attempt: next, round: SigningProtocolRound::Preprocess }, + ) + }) ), SigningProtocolRound::Share => assert_eq!(topic.reattempt_topic(), None), }, - Topic::SlashReport => assert_eq!(topic.reattempt_topic(), None), - Topic::Sign { id, round, .. } => match round { + Topic::Sign { id, attempt, round } => match round { SigningProtocolRound::Preprocess => assert_eq!( topic.reattempt_topic(), - Some((0, Topic::Sign { id, attempt: 0, round: SigningProtocolRound::Preprocess })) + attempt.checked_add(1).map(|next| { + (next, Topic::Sign { id, attempt: next, round: SigningProtocolRound::Preprocess }) + }) ), SigningProtocolRound::Share => assert_eq!(topic.reattempt_topic(), None), }, + _ => assert_eq!(topic.reattempt_topic(), None), } } } @@ -234,7 +214,7 @@ mod topic { #[test] fn sign_id() { let set = random_validator_set(&mut OsRng); - for topic in all_topics() { + for topic in all_topics_and_attempts() { match topic { Topic::Sign { id, attempt, round: _ } => { assert_eq!(topic.sign_id(set), Some(SignId { session: set.session, id, attempt })) @@ -247,7 +227,7 @@ mod topic { #[test] fn dkg_confirmation_sign_id() { let set = random_validator_set(&mut OsRng); - for topic in all_topics() { + for topic in all_topics_and_attempts() { match topic { Topic::DkgConfirmation { attempt, round: _ } => assert_eq!( topic.dkg_confirmation_sign_id(set), @@ -268,24 +248,17 @@ mod topic { #[test] fn preceding_topic() { - for topic in all_topics() { + for topic in all_topics_and_attempts() { match topic { - Topic::RemoveParticipant { .. } => assert_eq!(topic.preceding_topic(), None), - Topic::DkgConfirmation { attempt, round } => match round { - SigningProtocolRound::Preprocess => assert_eq!(topic.preceding_topic(), None), - SigningProtocolRound::Share => assert_eq!( - topic.preceding_topic(), - Some(Topic::DkgConfirmation { attempt, round: SigningProtocolRound::Preprocess }) - ), - }, - Topic::SlashReport => assert_eq!(topic.preceding_topic(), None), - Topic::Sign { id, attempt, round } => match round { - SigningProtocolRound::Preprocess => assert_eq!(topic.preceding_topic(), None), - SigningProtocolRound::Share => assert_eq!( - topic.preceding_topic(), - Some(Topic::Sign { id, attempt, round: SigningProtocolRound::Preprocess }) - ), - }, + Topic::DkgConfirmation { attempt, round: SigningProtocolRound::Share } => assert_eq!( + topic.preceding_topic(), + Some(Topic::DkgConfirmation { attempt, round: SigningProtocolRound::Preprocess }) + ), + Topic::Sign { id, attempt, round: SigningProtocolRound::Share } => assert_eq!( + topic.preceding_topic(), + Some(Topic::Sign { id, attempt, round: SigningProtocolRound::Preprocess }) + ), + _ => assert_eq!(topic.preceding_topic(), None), } // preceding and succeeding should be inverses @@ -300,62 +273,50 @@ mod topic { #[test] fn succeeding_topic() { - for topic in all_topics() { + for topic in all_topics_and_attempts() { match topic { - Topic::RemoveParticipant { .. } => assert_eq!(topic.succeeding_topic(), None), - Topic::DkgConfirmation { attempt, round } => match round { - SigningProtocolRound::Preprocess => assert_eq!( - topic.succeeding_topic(), - Some(Topic::DkgConfirmation { attempt, round: SigningProtocolRound::Share }) - ), - - SigningProtocolRound::Share => assert_eq!(topic.succeeding_topic(), None), - }, - Topic::SlashReport => assert_eq!(topic.succeeding_topic(), None), - Topic::Sign { id, attempt, round } => match round { - SigningProtocolRound::Preprocess => assert_eq!( - topic.succeeding_topic(), - Some(Topic::Sign { id, attempt, round: SigningProtocolRound::Share }) - ), - SigningProtocolRound::Share => assert_eq!(topic.succeeding_topic(), None), - }, + Topic::DkgConfirmation { attempt, round: SigningProtocolRound::Preprocess } => assert_eq!( + topic.succeeding_topic(), + Some(Topic::DkgConfirmation { attempt, round: SigningProtocolRound::Share }) + ), + Topic::Sign { id, attempt, round: SigningProtocolRound::Preprocess } => assert_eq!( + topic.succeeding_topic(), + Some(Topic::Sign { id, attempt, round: SigningProtocolRound::Share }) + ), + _ => assert_eq!(topic.succeeding_topic(), None), } } } #[test] fn requires_recognition() { - for topic in all_topics() { + for topic in all_topics_and_attempts() { match topic { - Topic::RemoveParticipant { .. } => assert_eq!(topic.requires_recognition(), false), Topic::DkgConfirmation { attempt, .. } => { assert_eq!(topic.requires_recognition(), attempt != 0) } - Topic::SlashReport => assert_eq!(topic.requires_recognition(), false), Topic::Sign { .. } => assert_eq!(topic.requires_recognition(), true), + _ => assert_eq!(topic.requires_recognition(), false), } } } #[test] fn participating() { - for topic in all_topics() { + for topic in all_topics_and_attempts() { match topic { - Topic::RemoveParticipant { .. } => { + Topic::RemoveParticipant { .. } | Topic::SlashReport => { assert_eq!(topic.participating(), Participating::Everyone) } - Topic::DkgConfirmation { .. } => { + Topic::DkgConfirmation { .. } | Topic::Sign { .. } => { assert_eq!(topic.participating(), Participating::Participated) } - Topic::SlashReport => assert_eq!(topic.participating(), Participating::Everyone), - Topic::Sign { .. } => assert_eq!(topic.participating(), Participating::Participated), } } } } mod tributary_db { - use serai_primitives::test_helpers::random_block_hash; use super::*; #[test] @@ -365,18 +326,13 @@ mod tributary_db { let block_hash1 = random_block_hash(&mut OsRng); let block_number1 = random_block_number(&mut OsRng); - let expected_topic = Topic::Sign { - id: VariantSignId::Cosign(block_number1), - attempt: 0, - round: SigningProtocolRound::Preprocess, - }; + let expected_topic = expected_topic_after_start_cosigning(VariantSignId::Cosign(block_number1)); // Recognizes topic { let mut txn = db.txn(); TributaryDb::start_cosigning(&mut txn, set, block_hash1, block_number1); - assert!(TributaryDb::try_recv_topic_requiring_recognition(&mut txn, set).is_some()); - assert!(TributaryDb::recognized(&txn, set, expected_topic)); + assert_cosigning_invariants(&mut txn, set, block_hash1, block_number1); txn.commit(); } @@ -427,11 +383,7 @@ mod tributary_db { assert!(TributaryDb::recognized( &txn, set, - Topic::Sign { - id: VariantSignId::Cosign(block_number2), - attempt: 0, - round: SigningProtocolRound::Preprocess, - } + expected_topic_after_start_cosigning(VariantSignId::Cosign(block_number2)) )); // Previous topic also remains recognized assert!(TributaryDb::recognized(&txn, set, expected_topic)); @@ -445,19 +397,19 @@ mod tributary_db { let _ = env_logger::try_init(); let set = random_validator_set(&mut OsRng); - let reattemptable_topics: Vec = all_topics() + let reattemptable_topics: Vec = all_topics_and_attempts() .into_iter() .filter_map(|t| t.reattempt_topic().map(|(_, reattempt_topic)| reattempt_topic)) .collect(); serai_env::info!( "start_of_block fuzz: reattemptable_topics={reattemptable_topics:?}, \ - all_topics count={}", - all_topics().len() + all_topics_and_attempts count={}", + all_topics_and_attempts().len() ); for iteration in 0 .. 100 { - for topic in all_topics() { + for topic in all_topics_and_attempts() { // Fresh DB per topic so recognized state doesn't leak between iterations let mut db = MemDb::new(); let mut txn = db.txn(); @@ -533,63 +485,58 @@ mod tributary_db { fn default_accumulate_setup( ) -> (ExternalValidatorSet, SeraiAddress, Vec, u16, u16) { let set = random_validator_set(&mut OsRng); - let validators: Vec = - (0 .. 3).map(|_| random_serai_address(&mut OsRng)).collect(); + let (_, validators, _, total_weight) = setup_test_validators_and_weights(); let validator = validators[0]; - let total_weight = 3; let validator_weight = 1; (set, validator, validators, total_weight, validator_weight) } mod accumulate_preceding_topic { - use super::*; - /// Set up a random Share topic (which requires participation in a preceding - /// Preprocess topic) with 3 validators of weight 1 each. - fn setup() -> (ExternalValidatorSet, SeraiAddress, Vec, u16, u16, Topic) { - let (set, validator, validators, total_weight, validator_weight) = - default_accumulate_setup(); - let share_topic = random_share_topic_with_preceding(); - (set, validator, validators, total_weight, validator_weight, share_topic) - } - #[test] fn no_preceding_data_slashes_validator() { - let (set, validator, validators, total_weight, validator_weight, share_topic) = setup(); - let mut db = MemDb::new(); - let mut txn = db.txn(); + for share_topic in all_share_topics_and_attempts() { + let (set, validator, validators, total_weight, validator_weight) = + default_accumulate_setup(); + let mut db = MemDb::new(); + let mut txn = db.txn(); - // Recognize the share topic so we reach the preceding-topic check - if share_topic.requires_recognition() { - TributaryDb::recognize_topic(&mut txn, set, share_topic); - } + // Recognize the share topic so we reach the preceding-topic check + if share_topic.requires_recognition() { + TributaryDb::recognize_topic(&mut txn, set, share_topic); + } - // Do not store any preceding Preprocess data - // Validator should be slashed with reason: - // "participated in topic without participating in prior" - let result = TributaryDb::accumulate::( - &mut txn, - set, - &validators, - total_weight, - random_block_number(&mut OsRng), - share_topic, - validator, - validator_weight, - &random_bytes_32(&mut OsRng), - ); - txn.commit(); + // Do not store any preceding Preprocess data + // Validator should be slashed with reason: + // "participated in topic without participating in prior" + let result = TributaryDb::accumulate::( + &mut txn, + set, + &validators, + total_weight, + random_block_number(&mut OsRng), + share_topic, + validator, + validator_weight, + &random_bytes_32(&mut OsRng), + ); + txn.commit(); - assert!(matches!(result, DataSet::None)); - assert!(TributaryDb::is_fatally_slashed(&db, set, validator)); + assert!(matches!(result, DataSet::None)); + assert!( + TributaryDb::is_fatally_slashed(&db, set, validator), + "validator should be slashed for not participating in prior: {share_topic:?}" + ); + } } #[test] fn preceding_topic_passes_existence_check() { - // Different types: DkgConfirmation stores Preprocess, accumulates Share - { - let (set, validator, validators, total_weight, validator_weight, share_topic) = setup(); + // Different types: stores Preprocess, accumulates Share + for share_topic in all_share_topics_and_attempts() { + let (set, validator, validators, total_weight, validator_weight) = + default_accumulate_setup(); let mut db = MemDb::new(); let mut txn = db.txn(); @@ -630,16 +577,26 @@ mod tributary_db { assert!(Accumulated::::get(&db, set, share_topic, validator).is_some()); } - // Same types: Sign stores GenericDataset for both preprocess and share + // Same types: stores type of RoundPayloads for both Preprocess and Share. + // Only topics where the preprocess data survives after threshold + // (reattempt exists). + for share_topic in all_share_topics_and_attempts() + .into_iter() + .filter(|t| t.preceding_topic().unwrap().reattempt_topic().is_some()) { - let (set, validator, validators, total_weight, validator_weight, share_topic) = setup(); + let (set, validator, validators, total_weight, validator_weight) = + default_accumulate_setup(); let mut db = MemDb::new(); let mut txn = db.txn(); let preprocess_topic = share_topic.preceding_topic().unwrap(); - // Recognize and accumulate the preprocess to threshold + if preprocess_topic.requires_recognition() { + TributaryDb::recognize_topic(&mut txn, set, preprocess_topic); + } + + // Accumulate the preprocess to threshold accumulate_to_threshold( &mut txn, set, @@ -648,12 +605,12 @@ mod tributary_db { random_block_number(&mut OsRng), preprocess_topic, |_| vec![random_vec_u8(&mut OsRng)], - None::)>, + None::)>, ); - // Accumulate a share with the same GenericDataset type - let share_data: GenericDataset = vec![random_vec_u8(&mut OsRng)]; - let result = TributaryDb::accumulate::( + // Accumulate a share with the same RoundPayloads type + let share_data: RoundPayloads = vec![random_vec_u8(&mut OsRng)]; + let result = TributaryDb::accumulate::( &mut txn, set, &validators, @@ -673,7 +630,7 @@ mod tributary_db { ); assert!(matches!(result, DataSet::None), "below threshold (1 of 3)"); assert_eq!( - Accumulated::::get(&db, set, share_topic, validator), + Accumulated::::get(&db, set, share_topic, validator), Some(share_data) ); } @@ -683,384 +640,383 @@ mod tributary_db { mod accumulate_next_attempt_topic { use super::*; - /// Set up a random reattemptable topic with `attempt = u32::MAX`. - fn setup() -> (ExternalValidatorSet, SeraiAddress, Vec, u16, u16, Topic) { - let (set, validator, validators, total_weight, validator_weight) = - default_accumulate_setup(); - let topic = random_reattemptable_topic_at_max_attempt(); - (set, validator, validators, total_weight, validator_weight, topic) - } - #[test] - fn accumulates_normally_despite_overflow() { - let (set, _validator, validators, total_weight, _validator_weight, topic) = setup(); - let mut db = MemDb::new(); - let block_number = random_block_number(&mut OsRng); - - { - let mut txn = db.txn(); + fn accumulates_to_threshold() { + for topic in all_preprocess_topics_and_attempts() { + let (set, _validator, validators, total_weight, _validator_weight) = + default_accumulate_setup(); + let mut db = MemDb::new(); + let block_number = random_block_number(&mut OsRng); - // DkgConfirmation with attempt = u32::MAX requires recognition - TributaryDb::recognize_topic(&mut txn, set, topic); + { + let mut txn = db.txn(); + if topic.requires_recognition() { + TributaryDb::recognize_topic(&mut txn, set, topic); + } - // Accumulate from all 3 validators to cross threshold - let result = accumulate_to_threshold( - &mut txn, - set, - &validators, - total_weight, - block_number, - topic, - |i| [i as u8; 32], - Some(|i: usize, result: &DataSet| { - if i < 2 { - assert!(matches!(result, DataSet::None)); - } else { - // Third validator crosses the threshold - match result { - DataSet::Participating(data_set) => assert_eq!(data_set.len(), 3), - DataSet::None => panic!("expected Participating after crossing threshold"), + let result = accumulate_to_threshold( + &mut txn, + set, + &validators, + total_weight, + block_number, + topic, + |i| [i as u8; 32], + Some(|i: usize, result: &DataSet| { + if i < 2 { + assert!(matches!(result, DataSet::None)); + } else { + match result { + DataSet::Participating(data_set) => assert_eq!(data_set.len(), 3), + DataSet::None => panic!("expected Participating after crossing threshold"), + } } - } - }), - ); - assert!(matches!(result, DataSet::Participating(_))); - - // reattempt_topic() wraps attempt u32::MAX to 0, so blocks_till_reattempt = 0. - // A reattempt is queued at block_number itself. - assert!(Reattempt::get(&txn, set, block_number).is_some()); - // But not at any subsequent block - for offset in 1 ..= 3 { - assert!(Reattempt::get(&txn, set, block_number.wrapping_add(offset)).is_none()); - } - - txn.commit(); - } - - for (i, v) in validators.iter().enumerate() { - assert!(!TributaryDb::is_fatally_slashed(&db, set, *v)); - assert_eq!(Accumulated::::get(&db, set, topic, *v), Some([i as u8; 32])); - } - - assert_eq!(AccumulatedWeight::get(&db, set, topic), Some(3)); - } - - /// When attempt 0 has already accumulated data, accumulating for attempt u32::MAX should be - /// NOP'd because `next_attempt_topic(u32::MAX)` wraps to attempt 0, which already exists. - #[test] - fn attempt_max_nopd_when_attempt_zero_exists() { - let (set, _validator, validators, total_weight, validator_weight, topic_max) = setup(); - let topic_0 = topic_max.next_attempt_topic().unwrap(); - - assert_eq!( - topic_0, - Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Preprocess }, - ); + }), + ); + assert!(matches!(result, DataSet::Participating(_))); - let mut db = MemDb::new(); + txn.commit(); + } - // First: accumulate for attempt 0 (below threshold, just one validator) - { - let mut txn = db.txn(); - TributaryDb::recognize_topic(&mut txn, set, topic_0); - let result = TributaryDb::accumulate::( - &mut txn, - set, - &validators, - total_weight, - random_block_number(&mut OsRng), - topic_0, - validators[0], - validator_weight, - &random_bytes_32(&mut OsRng), - ); - assert!(matches!(result, DataSet::None)); - txn.commit(); - } + let has_reattempt = topic.reattempt_topic().is_some(); - // Attempt 0 has accumulated weight - assert_eq!(AccumulatedWeight::get(&db, set, topic_0), Some(validator_weight)); + for v in &validators { + assert!(!TributaryDb::is_fatally_slashed(&db, set, *v)); + if has_reattempt { + assert!( + Accumulated::::get(&db, set, topic, *v).is_some(), + "data should be preserved when reattempt exists: {topic:?}" + ); + } else { + assert!( + Accumulated::::get(&db, set, topic, *v).is_none(), + "data should be cleaned up when no reattempt: {topic:?}" + ); + } + } - // Now try to accumulate for attempt u32::MAX - { - let mut txn = db.txn(); - TributaryDb::recognize_topic(&mut txn, set, topic_max); - let result = TributaryDb::accumulate::( - &mut txn, - set, - &validators, - total_weight, - random_block_number(&mut OsRng), - topic_max, - validators[1], - validator_weight, - &random_bytes_32(&mut OsRng), - ); - // NOP'd: next_attempt_topic(u32::MAX) = attempt 0, which already has weight - assert!(matches!(result, DataSet::None)); - txn.commit(); + assert_eq!(AccumulatedWeight::get(&db, set, topic), Some(3)); } - - // Attempt u32::MAX should have no accumulated data (it was NOP'd) - assert!(Accumulated::::get(&db, set, topic_max, validators[1]).is_none()); - // Weight for u32::MAX stays at initial recognized value (0) - assert_eq!(AccumulatedWeight::get(&db, set, topic_max), Some(0)); } + /// Accumulating for a topic proceeds when the next attempt's topic has no + /// weight, regardless of whether an unrelated topic already has weight. #[test] - fn attempt_max_proceeds() { - let (set, _validator, validators, total_weight, validator_weight, topic_max) = setup(); - let topic_0 = topic_max.next_attempt_topic().unwrap(); + fn not_nopd_without_next_attempt_weight() { + for topic in all_preprocess_topics_and_attempts() { + let (set, _validator, validators, total_weight, validator_weight) = + default_accumulate_setup(); - let mut db = MemDb::new(); + let mut db = MemDb::new(); - // First: accumulate for attempt u32::MAX (below threshold) - { - let mut txn = db.txn(); - TributaryDb::recognize_topic(&mut txn, set, topic_max); - let result = TributaryDb::accumulate::( - &mut txn, - set, - &validators, - total_weight, - random_block_number(&mut OsRng), - topic_max, - validators[0], - validator_weight, - &random_bytes_32(&mut OsRng), - ); - assert!(matches!(result, DataSet::None)); - txn.commit(); - } + // Accumulate for an unrelated topic so some weight exists in the DB + let unrelated = Topic::SlashReport; + { + let mut txn = db.txn(); + let result = TributaryDb::accumulate::( + &mut txn, + set, + &validators, + total_weight, + random_block_number(&mut OsRng), + unrelated, + validators[0], + validator_weight, + &random_bytes_32(&mut OsRng), + ); + assert!(matches!(result, DataSet::None)); + txn.commit(); + } - assert_eq!(AccumulatedWeight::get(&db, set, topic_max), Some(validator_weight)); + assert_eq!(AccumulatedWeight::get(&db, set, unrelated), Some(validator_weight)); - let data = random_bytes_32(&mut OsRng); + // Accumulating for our topic proceeds (not NOP'd by unrelated weight) + let data = random_bytes_32(&mut OsRng); + { + let mut txn = db.txn(); + if topic.requires_recognition() { + TributaryDb::recognize_topic(&mut txn, set, topic); + } + let result = TributaryDb::accumulate::( + &mut txn, + set, + &validators, + total_weight, + random_block_number(&mut OsRng), + topic, + validators[1], + validator_weight, + &data, + ); + assert!(matches!(result, DataSet::None), "below threshold (1 of 3)"); + txn.commit(); + } - // Now accumulate for attempt 0 - { - let mut txn = db.txn(); - TributaryDb::recognize_topic(&mut txn, set, topic_0); - let result = TributaryDb::accumulate::( - &mut txn, - set, - &validators, - total_weight, - random_block_number(&mut OsRng), - topic_0, - validators[1], - validator_weight, - &data, - ); - // Proceeds: next_attempt_topic(0) = attempt 1, which has no weight - assert!(matches!(result, DataSet::None)); - txn.commit(); + // Data was stored (not NOP'd) + assert_eq!(Accumulated::::get(&db, set, topic, validators[1]), Some(data)); + assert_eq!(AccumulatedWeight::get(&db, set, topic), Some(validator_weight)); } - - // Attempt 0 accumulated successfully - assert_eq!(Accumulated::::get(&db, set, topic_0, validators[1]), Some(data)); - assert_eq!(AccumulatedWeight::get(&db, set, topic_0), Some(validator_weight)); } } mod accumulate_reattempt_topic { use super::*; - /// Set up a random reattemptable topic with `attempt = u32::MAX`. - fn setup() -> (ExternalValidatorSet, SeraiAddress, Vec, u16, u16, Topic) { - let (set, validator, validators, total_weight, validator_weight) = - default_accumulate_setup(); - let topic = random_reattemptable_topic_at_max_attempt(); - (set, validator, validators, total_weight, validator_weight, topic) - } - #[test] - fn reattempt_wraps_to_zero_on_overflow() { - let (set, _validator, validators, total_weight, _validator_weight, topic) = setup(); - let mut db = MemDb::new(); - let block_number = 1_000_000u64; + fn data_preserved_or_cleaned_up_based_on_reattempt() { + for topic in all_preprocess_topics_and_attempts() { + let (set, _validator, validators, total_weight, _validator_weight) = + default_accumulate_setup(); + let mut db = MemDb::new(); + let block_number = 1_000_000u64; - { - let mut txn = db.txn(); - TributaryDb::recognize_topic(&mut txn, set, topic); + { + let mut txn = db.txn(); + if topic.requires_recognition() { + TributaryDb::recognize_topic(&mut txn, set, topic); + } - let result = accumulate_to_threshold( - &mut txn, - set, - &validators, - total_weight, - block_number, - topic, - |i| [i as u8; 32], - None::, - ); - assert!(matches!(result, DataSet::Participating(_))); - txn.commit(); - } + let result = accumulate_to_threshold( + &mut txn, + set, + &validators, + total_weight, + block_number, + topic, + |i| [i as u8; 32], + None::, + ); + assert!(matches!(result, DataSet::Participating(_))); + txn.commit(); + } - // Overflow wraps attempt to 0, so blocks_till_reattempt = 0 * BASE_DELAY = 0. - // Reattempt is queued at block_number itself. - assert!(Reattempt::get(&db, set, block_number).is_some()); - // But not at any subsequent block - assert!(Reattempt::get(&db, set, block_number + 1).is_none()); + if topic.reattempt_topic().is_some() { + for (i, v) in validators.iter().enumerate() { + assert_eq!( + Accumulated::::get(&db, set, topic, *v), + Some([i as u8; 32]), + "data should be preserved when reattempt exists: {topic:?}" + ); + } + } else { + assert!( + Reattempt::get(&db, set, block_number).is_none(), + "no reattempt should be queued: {topic:?}" + ); + for v in &validators { + assert!( + Accumulated::::get(&db, set, topic, *v).is_none(), + "data should be cleaned up when no reattempt: {topic:?}" + ); + } + } + } } + /// Reattempt scheduling panics on overflow instead of silently scheduling + /// at an unreachable block. #[test] - fn data_preserved_when_overflow_wraps() { - let (set, _validator, validators, total_weight, _validator_weight, topic) = setup(); + fn reattempt_schedule_panics_on_overflow() { + let (set, _validator, validators, total_weight, _validator_weight) = + default_accumulate_setup(); + + // attempt just below u32::MAX so reattempt_topic() returns Some(u32::MAX) + let topic = + Topic::DkgConfirmation { attempt: u32::MAX - 1, round: SigningProtocolRound::Preprocess }; + assert_eq!(topic.reattempt_topic().unwrap().0, u32::MAX); + + // block_number near u64::MAX forces checked_add to overflow + let block_number = u64::MAX - 1; + let mut db = MemDb::new(); + let mut txn = db.txn(); + TributaryDb::recognize_topic(&mut txn, set, topic); - { - let mut txn = db.txn(); - TributaryDb::recognize_topic(&mut txn, set, topic); + let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { accumulate_to_threshold( &mut txn, set, &validators, total_weight, - random_block_number(&mut OsRng), + block_number, topic, |i| [i as u8; 32], None::, ); - txn.commit(); - } + })); - // reattempt_topic() wraps to attempt 0, so data is preserved for the reattempt - for (i, v) in validators.iter().enumerate() { - assert_eq!(Accumulated::::get(&db, set, topic, *v), Some([i as u8; 32])); - } + assert!(result.is_err(), "should panic on reattempt block number overflow"); } #[test] - fn data_preserved_with_normal_attempt() { - let set = default_test_validator_set(); - let validators: Vec = - (0 .. 3).map(|_| random_serai_address(&mut OsRng)).collect(); - let total_weight = 3; + fn succeeding_topic_recognized_after_threshold() { + for topic in all_preprocess_topics_and_attempts() { + let (set, _validator, validators, total_weight, _validator_weight) = + default_accumulate_setup(); + let mut db = MemDb::new(); - // attempt = 0 so reattempt_topic() returns Some - let topic = Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Preprocess }; - assert!(topic.reattempt_topic().is_some()); + let succeeding = topic.succeeding_topic().unwrap(); - let mut db = MemDb::new(); + { + let mut txn = db.txn(); + if topic.requires_recognition() { + TributaryDb::recognize_topic(&mut txn, set, topic); + } + accumulate_to_threshold( + &mut txn, + set, + &validators, + total_weight, + random_block_number(&mut OsRng), + topic, + |i| [i as u8; 32], + None::, + ); + txn.commit(); + } - { - let mut txn = db.txn(); - // attempt 0 Preprocess doesn't require recognition - accumulate_to_threshold( - &mut txn, - set, - &validators, - total_weight, - random_block_number(&mut OsRng), - topic, - |i| [i as u8; 32], - None::, + assert_eq!( + AccumulatedWeight::get(&db, set, succeeding), + Some(0), + "succeeding topic should be recognized after threshold: {topic:?}" ); - txn.commit(); - } - - // reattempt_topic() is Some, so data is preserved for the reattempt - for (i, v) in validators.iter().enumerate() { - assert_eq!(Accumulated::::get(&db, set, topic, *v), Some([i as u8; 32]),); } } + } + + /// Tests the invariant documented at fn accumulate: + /// "This function will only be called once for a (validator, topic) tuple" + mod duplicate_accumulate { + use super::*; + /// Calling accumulate twice for the same (validator, topic) panics, + /// enforcing the invariant that the nonce system prevents duplicate calls. #[test] - fn succeeding_topic_recognized_with_overflow_wrap() { - let (set, _validator, validators, total_weight, _validator_weight, topic) = setup(); + #[should_panic = "accumulate called twice for the same (validator, topic) tuple"] + fn double_call_before_threshold_panics() { + let topic = Topic::RemoveParticipant { participant: random_serai_address(&mut OsRng) }; + let (set, validator, validators, total_weight, validator_weight) = + default_accumulate_setup(); let mut db = MemDb::new(); + let mut txn = db.txn(); - let succeeding = topic.succeeding_topic().unwrap(); - assert_eq!( - succeeding, - Topic::DkgConfirmation { attempt: u32::MAX, round: SigningProtocolRound::Share } + // First call succeeds + TributaryDb::accumulate::>( + &mut txn, + set, + &validators, + total_weight, + random_block_number(&mut OsRng), + topic, + validator, + validator_weight, + &vec![1, 2, 3], ); - { - let mut txn = db.txn(); - TributaryDb::recognize_topic(&mut txn, set, topic); - accumulate_to_threshold( - &mut txn, - set, - &validators, - total_weight, - random_block_number(&mut OsRng), - topic, - |i| [i as u8; 32], - None::, - ); - txn.commit(); - } - - // The succeeding topic is recognized - assert_eq!(AccumulatedWeight::get(&db, set, succeeding), Some(0)); + // Second call with same (validator, topic) should panic + TributaryDb::accumulate::>( + &mut txn, + set, + &validators, + total_weight, + random_block_number(&mut OsRng), + topic, + validator, + validator_weight, + &vec![4, 5, 6], + ); } + /// After threshold with a reattempt topic, Accumulated entries are + /// preserved (for the reattempt protocol), so the duplicate assert fires. #[test] - fn sign_topic_reattempt_wraps_on_overflow() { - let set = default_test_validator_set(); - let validators: Vec = - (0 .. 3).map(|_| random_serai_address(&mut OsRng)).collect(); - let total_weight = 3; - let block_number = 500_000u64; - - let topic = Topic::Sign { - id: VariantSignId::Cosign(42), - attempt: u32::MAX, - round: SigningProtocolRound::Preprocess, - }; - // Overflow wraps to attempt 0 - assert_eq!( - topic.reattempt_topic(), - Some(( - 0, - Topic::Sign { - id: VariantSignId::Cosign(42), - attempt: 0, - round: SigningProtocolRound::Preprocess, - } - )) + #[should_panic = "accumulate called twice for the same (validator, topic) tuple"] + fn double_call_after_threshold_with_reattempt_panics() { + // DkgConfirmation Preprocess has a reattempt topic, so entries survive post-threshold + let topic = Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Preprocess }; + let (set, _validator, validators, total_weight, _validator_weight) = + default_accumulate_setup(); + let mut db = MemDb::new(); + let mut txn = db.txn(); + let block_number = random_block_number(&mut OsRng); + + TributaryDb::recognize_topic(&mut txn, set, topic); + + accumulate_to_threshold::, _, _>( + &mut txn, + set, + &validators, + total_weight, + block_number, + topic, + |i| vec![i as u8], + None::>)>, ); + // Entries preserved for reattempt, duplicate panics + TributaryDb::accumulate::>( + &mut txn, + set, + &validators, + total_weight, + block_number, + topic, + validators[0], + 1, + &vec![99], + ); + } + + /// After threshold without a reattempt topic, Accumulated entries are + /// cleaned up. The duplicate call does not hit the assertion (key is gone) + /// and instead falls through to the weight >= threshold NOP. + #[test] + fn double_call_after_threshold_without_reattempt_is_nop() { + // RemoveParticipant has no reattempt, so entries are cleaned up post-threshold + let topic = Topic::RemoveParticipant { participant: random_serai_address(&mut OsRng) }; + let (set, _validator, validators, total_weight, _validator_weight) = + default_accumulate_setup(); let mut db = MemDb::new(); + let mut txn = db.txn(); + let block_number = random_block_number(&mut OsRng); - { - let mut txn = db.txn(); - TributaryDb::recognize_topic(&mut txn, set, topic); - accumulate_to_threshold( - &mut txn, - set, - &validators, - total_weight, - block_number, - topic, - |i| [i as u8; 32], - None::, - ); - txn.commit(); - } + accumulate_to_threshold::, _, _>( + &mut txn, + set, + &validators, + total_weight, + block_number, + topic, + |i| vec![i as u8], + None::>)>, + ); - // Reattempt queued at block_number (delay = 0 for attempt 0) - assert!(Reattempt::get(&db, set, block_number).is_some()); - for offset in 1 ..= 2000 { - assert!(Reattempt::get(&db, set, block_number + offset).is_none()); - } + let weight_after_threshold = AccumulatedWeight::get(&txn, set, topic).unwrap(); - // Data preserved for reattempt - for (i, v) in validators.iter().enumerate() { - assert_eq!(Accumulated::::get(&db, set, topic, *v), Some([i as u8; 32])); - } + // Entry was cleaned up, so assertion doesn't fire. + // Falls through to the `accumulated_weight >= required_participation` NOP. + let result = TributaryDb::accumulate::>( + &mut txn, + set, + &validators, + total_weight, + block_number, + topic, + validators[0], + 1, + &vec![99], + ); - // Succeeding topic (Share) still recognized - let succeeding = topic.succeeding_topic().unwrap(); - assert_eq!(AccumulatedWeight::get(&db, set, succeeding), Some(0)); + assert!(matches!(result, DataSet::None), "should be NOP after threshold"); + assert_eq!( + AccumulatedWeight::get(&txn, set, topic).unwrap(), + weight_after_threshold, + "weight should not change" + ); } } mod fuzz { - use proptest::prelude::*; use super::*; /// Verify all DB invariants after a single `TributaryDb::accumulate` call. @@ -1084,7 +1040,7 @@ mod tributary_db { validator_in_list: bool, result: &DataSet>, ) { - let required = crate::db::required_participation(total_weight); + let required = required_participation(total_weight); let post_slashed = TributaryDb::is_fatally_slashed(db, set, validator); let post_weight = AccumulatedWeight::get(db, set, topic); @@ -1110,20 +1066,7 @@ mod tributary_db { return; } - // Branch 3: required_participation overflows. - let Ok(required) = required else { - assert!(matches!(result, DataSet::None)); - assert_eq!( - post_weight, pre_weight, - "weight unchanged when required_participation overflows" - ); - if !pre_slashed { - assert!(!post_slashed, "should not be slashed on overflow NOP"); - } - return; - }; - - // Branch 4: Already accumulated past the threshold - NOP. + // Branch 3: Already accumulated past the threshold - NOP. if weight_before >= required { assert!(matches!(result, DataSet::None)); assert_eq!(post_weight, pre_weight, "weight unchanged when past threshold"); @@ -1158,15 +1101,11 @@ mod tributary_db { // 7a: Reattempt should be queued if topic is reattemptable. if let Some((reattempt_attempt, reattempt_topic)) = topic.reattempt_topic() { - #[cfg(not(feature = "longer-reattempts"))] - const BASE_REATTEMPT_DELAY: u32 = - (5u32 * 60 * 1000).div_ceil(tributary_sdk::tendermint::TARGET_BLOCK_TIME); - #[cfg(feature = "longer-reattempts")] - const BASE_REATTEMPT_DELAY: u32 = - (10u32 * 60 * 1000).div_ceil(tributary_sdk::tendermint::TARGET_BLOCK_TIME); - - let blocks_till = u64::from(reattempt_attempt * BASE_REATTEMPT_DELAY); - let recognize_at = block_number + blocks_till; + let blocks_till = u64::from(reattempt_attempt) + .checked_mul(u64::from(BASE_REATTEMPT_DELAY)) + .expect("reattempt delay overflowed u64"); + let recognize_at = + block_number.checked_add(blocks_till).expect("reattempt block number overflowed u64"); let queued = Reattempt::get(db, set, recognize_at); assert!(queued.is_some(), "reattempt should be queued at block {recognize_at}"); @@ -1257,120 +1196,130 @@ mod tributary_db { } } - proptest! { - #![proptest_config(ProptestConfig::with_cases(1000))] - - #[test] - fn fuzz_accumulate( - has_initial_weight in any::(), - initial_weight in 0u16..u16::MAX, - total_weight in 1u16..u16::MAX, - - has_next_topic_weight in any::(), - next_topic_initial_weight in 0u16..u16::MAX, - - has_preceding_topic_accumulated in any::(), - - topic_variant in 0u8..5, - attempt in 0u32..100, - round in 0u8..2, - cosign_block in any::(), - batch_id in any::<[u8; 32]>(), - validator_weight in 1u16..u16::MAX, - block_number in 1u64..u64::MAX, - data in prop::collection::vec(any::(), 0..64), - - num_validators in 1u16..u16::MAX, - cur_validator in 0u16..u16::MAX, - validator_in_list in any::(), - ) { - let round = - if round == 0 { SigningProtocolRound::Preprocess } else { SigningProtocolRound::Share }; - - let topic = match topic_variant % 5 { - 0 => Topic::RemoveParticipant { participant: random_serai_address(&mut OsRng) }, - 1 => Topic::DkgConfirmation { attempt: attempt % 100, round }, - 2 => Topic::SlashReport, - 3 => Topic::Sign { - id: VariantSignId::Cosign(cosign_block), - attempt: attempt % 100, - round, - }, - _ => { - Topic::Sign { id: VariantSignId::Batch(batch_id), attempt: attempt % 100, round } - } - }; + #[test] + fn fuzz_accumulate() { + for _ in 0 .. 1000 { + let has_initial_weight = OsRng.gen::(); + let initial_weight = OsRng.gen_range(0u16 .. u16::MAX); + let total_weight = OsRng.gen_range(1u16 .. u16::MAX); - let mut db = MemDb::new(); - let set = default_test_validator_set(); + let has_next_topic_weight = OsRng.gen::(); + let next_topic_initial_weight = OsRng.gen_range(0u16 .. u16::MAX); - let validators: Vec = - (0 .. num_validators).map(|_i| random_serai_address(&mut OsRng)).collect(); + let has_preceding_topic_accumulated = OsRng.gen::(); - let validator_weight = validator_weight.min(total_weight).max(1); + let topic_variant = OsRng.gen_range(0u8 .. 5); + let attempt = OsRng.gen_range(0u32 .. 100); + let round = if OsRng.gen::() { + SigningProtocolRound::Preprocess + } else { + SigningProtocolRound::Share + }; + let cosign_block = OsRng.next_u64(); + let batch_id: [u8; 32] = OsRng.gen(); + let validator_weight = OsRng.gen_range(1u16 .. u16::MAX); + let block_number = OsRng.gen_range(1u64 .. u64::MAX); + let data: Vec = (0 .. OsRng.gen_range(0usize .. 64)).map(|_| OsRng.gen()).collect(); + + let num_validators = OsRng.gen_range(1u16 .. u16::MAX); + let cur_validator = OsRng.gen_range(0u16 .. u16::MAX); + let validator_in_list = OsRng.gen::(); + + let topic = match topic_variant % 5 { + 0 => Topic::RemoveParticipant { participant: random_serai_address(&mut OsRng) }, + 1 => Topic::DkgConfirmation { attempt: attempt % 100, round }, + 2 => Topic::SlashReport, + 3 => { + Topic::Sign { id: VariantSignId::Cosign(cosign_block), attempt: attempt % 100, round } + } + _ => Topic::Sign { id: VariantSignId::Batch(batch_id), attempt: attempt % 100, round }, + }; - let mut txn = db.txn(); + let mut db = MemDb::new(); + let set = default_test_validator_set(); - if has_initial_weight { - AccumulatedWeight::set(&mut txn, set, topic, &initial_weight); - } + let validators: Vec = + (0 .. num_validators).map(|_i| random_serai_address(&mut OsRng)).collect(); - if has_next_topic_weight { - if let Some(next_attempt_topic) = topic.next_attempt_topic() { - AccumulatedWeight::set(&mut txn, set, next_attempt_topic, &next_topic_initial_weight); - } - } + let validator_weight = validator_weight.min(total_weight).max(1); - // When validator_in_list is false, the accumulating validator is an outsider - // not present in the validators slice. This exercises the `participated = false` - // branch when the threshold is crossed. - let cur_validator = (cur_validator as usize) % validators.len(); - let validator = if validator_in_list { - validators[cur_validator] - } else { - random_serai_address(&mut OsRng) - }; - - if has_preceding_topic_accumulated { - if let Some(preceding_topic) = topic.preceding_topic() { - Accumulated::set(&mut txn, set, preceding_topic, validator, &data) - } - } + let db_clone = db.clone(); + let mut txn = db.txn(); - let pre_weight = AccumulatedWeight::get(&txn, set, topic); - let pre_slashed = TributaryDb::is_fatally_slashed(&txn, set, validator); - - let result = TributaryDb::accumulate::>( - &mut txn, - set, - &validators, - total_weight, - block_number, - topic, - validator, - validator_weight, - &data, - ); + if has_initial_weight { + AccumulatedWeight::set(&mut txn, set, topic, &initial_weight); + } - txn.commit(); - - verify_accumulate_invariants( - &db, - set, - total_weight, - block_number, - topic, - validator, - validator_weight, - &data, - pre_weight, - pre_slashed, - has_preceding_topic_accumulated, - has_next_topic_weight, - validator_in_list, - &result, - ); + if has_next_topic_weight { + if let Some(next_attempt_topic) = topic.next_attempt_topic() { + AccumulatedWeight::set(&mut txn, set, next_attempt_topic, &next_topic_initial_weight); + } } + + // When validator_in_list is false, the accumulating validator is an outsider + // not present in the validators slice. This exercises the `participated = false` + // branch when the threshold is crossed. + let cur_validator = (cur_validator as usize) % validators.len(); + let validator = if validator_in_list { + validators[cur_validator] + } else { + random_serai_address(&mut OsRng) + }; + + if has_preceding_topic_accumulated { + if let Some(preceding_topic) = topic.preceding_topic() { + Accumulated::set(&mut txn, set, preceding_topic, validator, &data) + } + } + + let pre_weight = AccumulatedWeight::get(&txn, set, topic); + let pre_slashed = TributaryDb::is_fatally_slashed(&txn, set, validator); + + let catch_result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + let result = TributaryDb::accumulate::>( + &mut txn, + set, + &validators, + total_weight, + block_number, + topic, + validator, + validator_weight, + &data, + ); + + txn.commit(); + + verify_accumulate_invariants( + &db_clone, + set, + total_weight, + block_number, + topic, + validator, + validator_weight, + &data, + pre_weight, + pre_slashed, + has_preceding_topic_accumulated, + has_next_topic_weight, + validator_in_list, + &result, + ); + })); + + if let Err(panic) = catch_result { + let msg = panic + .downcast_ref::() + .map(|s| s.as_str()) + .or_else(|| panic.downcast_ref::<&str>().copied()) + .unwrap_or(""); + if msg.contains("overflowed") { + continue; + } + std::panic::resume_unwind(panic); + } + } } } } diff --git a/coordinator/tributary/src/tests/mod.rs b/coordinator/tributary/src/tests/mod.rs index 3f30ec39d..93344ab3c 100644 --- a/coordinator/tributary/src/tests/mod.rs +++ b/coordinator/tributary/src/tests/mod.rs @@ -1,18 +1,30 @@ +use std::collections::HashMap; + use ciphersuite::group::GroupEncoding; use ciphersuite::WrappedGroup; use dalek_ff_group::{Ristretto, RistrettoPoint}; use messages::sign::VariantSignId; -use rand::{CryptoRng, RngCore}; +use rand::{CryptoRng, Rng, RngCore}; use rand_core::OsRng; use serai_primitives::{ address::SeraiAddress, - test_helpers::{random_bytes_32, default_test_validator_set}, + test_helpers::{ + random_bytes_32, random_bytes_64, random_serai_address, random_vec_u8, + default_test_validator_set, + }, }; use tributary_sdk::P2p; use zeroize::Zeroizing; +use dkg::Participant; +use serai_coordinator_substrate::NewSetInformation; + +use crate::{ + db::{ActivelyCosigning, TributaryDb}, + transaction::{Signed, SigningProtocolRound, Transaction}, +}; pub mod transaction; pub mod db; @@ -45,6 +57,132 @@ pub(crate) fn random_serai_address_and_key( (key, SeraiAddress(key.to_bytes())) } +use crate::db::Topic; + +pub(crate) fn random_signed(rng: &mut R) -> Signed { + let signed = tributary_sdk::tests::random_signed(&mut *rng); + Signed { signer: signed.signer, signature: signed.signature } +} + +/// One of each signed transaction kind, using the provided `Signed` value. +pub(crate) fn all_signed_transactions_with(signed: Signed) -> Vec { + vec![ + Transaction::RemoveParticipant { participant: random_serai_address(&mut OsRng), signed }, + Transaction::DkgParticipation { participation: random_vec_u8(&mut OsRng), signed }, + Transaction::DkgConfirmationPreprocess { + attempt: 0, + preprocess: random_bytes_64(&mut OsRng), + signed, + }, + Transaction::DkgConfirmationShare { attempt: 0, share: random_bytes_32(&mut OsRng), signed }, + Transaction::Sign { + id: VariantSignId::Transaction(random_bytes_32(&mut OsRng)), + attempt: 0, + round: SigningProtocolRound::Preprocess, + data: vec![random_vec_u8(&mut OsRng)], + signed, + }, + Transaction::SlashReport { slash_points: (0 .. 3).map(|_| OsRng.next_u32()).collect(), signed }, + ] +} + pub(crate) fn random_transaction_id() -> VariantSignId { VariantSignId::Transaction(random_bytes_32(&mut OsRng)) } + +/// The expected topic to be recognized after start_cosigning runs. +pub(crate) fn expected_topic_after_start_cosigning(id: VariantSignId) -> Topic { + Topic::Sign { id, attempt: 0, round: SigningProtocolRound::Preprocess } +} + +/// Assert the DB invariants established by `TributaryDb::start_cosigning`: +/// - `ActivelyCosigning` is set to the given block hash. +/// - The cosign topic is recognized (AccumulatedWeight initialized). +/// - The cosign topic was queued for recognition (RecognizedTopics). +pub(crate) fn assert_cosigning_invariants( + txn: &mut impl serai_db::DbTxn, + set: serai_primitives::validator_sets::ExternalValidatorSet, + block_hash: serai_primitives::BlockHash, + block_number: u64, +) { + let expected_topic = expected_topic_after_start_cosigning(VariantSignId::Cosign(block_number)); + + assert_eq!( + ActivelyCosigning::get(txn, set), + Some(block_hash), + "ActivelyCosigning should be set to the block hash after start_cosigning" + ); + assert!( + TributaryDb::recognized(txn, set, expected_topic), + "cosign topic should be recognized after start_cosigning" + ); + assert_eq!( + TributaryDb::try_recv_topic_requiring_recognition(txn, set), + Some(expected_topic), + "cosign topic should be queued for recognition after start_cosigning" + ); +} + +pub(crate) fn new_test_set_info(validators: &[(SeraiAddress, u16)]) -> NewSetInformation { + let mut participant_indexes = HashMap::new(); + let mut reverse_lookup = HashMap::new(); + let mut i = 1u16; + for (address, weight) in validators { + let mut indices = Vec::new(); + for _ in 0 .. *weight { + let p = Participant::new(i).unwrap(); + indices.push(p); + reverse_lookup.insert(p, *address); + i += 1; + } + participant_indexes.insert(*address, indices); + } + + NewSetInformation { + set: default_test_validator_set(), + serai_block: random_bytes_32(&mut OsRng), + declaration_time: OsRng.next_u64(), + threshold: OsRng.gen_range(0 ..= u16::MAX), + validators: validators.to_vec(), + evrf_public_keys: vec![], + participant_indexes, + participant_indexes_reverse_lookup: reverse_lookup, + } +} + +/// Common test setup: 3 random validators each with weight 1, total_weight = 3. +pub(crate) fn setup_test_validators_and_weights( +) -> (Vec<(SeraiAddress, u16)>, Vec, HashMap, u16) { + let validator_data = vec![ + (random_serai_address(&mut OsRng), 1u16), + (random_serai_address(&mut OsRng), 1), + (random_serai_address(&mut OsRng), 1), + ]; + let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); + + let mut weights = HashMap::new(); + for (address, weight) in &validator_data { + weights.insert(*address, *weight); + } + + (validator_data, validators, weights, 3) +} + +/// Like `setup_test_validators_and_weights`, but each validator also has a real key +/// so tests can produce valid `Signed` values via `new_signed`. +pub(crate) fn setup_test_validators_and_weights_with_keys() -> ( + Vec<(RistrettoPoint, SeraiAddress)>, + Vec<(SeraiAddress, u16)>, + Vec, + HashMap, + u16, +) { + let keys_addrs: Vec<(RistrettoPoint, SeraiAddress)> = + (0 .. 3).map(|_| random_serai_address_and_key(&mut OsRng)).collect(); + let validator_data: Vec<(SeraiAddress, u16)> = + keys_addrs.iter().map(|(_, addr)| (*addr, 1u16)).collect(); + let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); + let weights: HashMap = validator_data.iter().copied().collect(); + + (keys_addrs, validator_data, validators, weights, 3) +} diff --git a/coordinator/tributary/src/tests/scan_block.rs b/coordinator/tributary/src/tests/scan_block.rs index c9a29c00b..3fc9d2b51 100644 --- a/coordinator/tributary/src/tests/scan_block.rs +++ b/coordinator/tributary/src/tests/scan_block.rs @@ -1,11 +1,14 @@ use core::marker::PhantomData; use std::collections::HashMap; -use rand::{Rng, RngCore}; +use rand::RngCore; use rand_core::OsRng; -use serai_substrate_tests::{random_block_hash, random_serai_address}; +use serai_primitives::test_helpers::{ + random_block_hash, random_block_number, random_bytes_32, random_serai_address, random_vec_of_len, +}; -use ciphersuite::group::GroupEncoding; -use dalek_ff_group::RistrettoPoint; +use ciphersuite::{group::GroupEncoding, WrappedGroup}; +use dalek_ff_group::{Ristretto, RistrettoPoint}; +use schnorr::SchnorrSignature; use serai_primitives::address::SeraiAddress; @@ -18,71 +21,25 @@ use serai_db::{Db, DbTxn, MemDb}; use serai_cosign_types::CosignIntent; use serai_coordinator_substrate::NewSetInformation; -use tributary_sdk::{Block, BlockHeader, Transaction as TributaryTransaction, P2p}; +use tributary_sdk::{ + Block, BlockHeader, Transaction as TributaryTransaction, Evidence, tendermint::tx::TendermintTx, +}; use crate::{ + CosignIntents, DkgConfirmationMessages, ProcessorMessages, ScanBlock, SubstrateBlockPlans, db::{ AccumulatedWeight, ActivelyCosigning, CosignIntents as DbCosignIntents, LatestSubstrateBlockToCosign, Topic, TributaryDb, }, - tests::{default_test_validator_set, random_serai_address_and_key}, + transaction::{SigningProtocolRound, Signed, Transaction}, + tests::{ + all_signed_transactions_with, assert_cosigning_invariants, MockP2p, default_test_validator_set, + expected_topic_after_start_cosigning, setup_test_validators_and_weights, + setup_test_validators_and_weights_with_keys, random_serai_address_and_key, new_test_set_info, + }, }; -use crate::transaction::{SigningProtocolRound, Signed, Transaction}; -use crate::{CosignIntents, DkgConfirmationMessages, ProcessorMessages, ScanBlock, SubstrateBlockPlans}; - -#[derive(Clone)] -struct MockP2p; -impl P2p for MockP2p { - fn broadcast(&self, _: [u8; 32], _: Vec) -> impl Send + core::future::Future { - async move { unimplemented!() } - } -} - -fn get_test_validators_and_weights_setup( -) -> (Vec<(SeraiAddress, u16)>, Vec, HashMap, u16) { - let validator_data = vec![ - (random_serai_address(&mut OsRng), 1u16), - (random_serai_address(&mut OsRng), 1), - (random_serai_address(&mut OsRng), 1), - ]; - let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); - - let mut weights = HashMap::new(); - for (address, weight) in &validator_data { - weights.insert(*address, *weight); - } - - (validator_data, validators, weights, 3) -} - -fn new_test_set_info(validators: &[(SeraiAddress, u16)]) -> NewSetInformation { - let mut participant_indexes = HashMap::new(); - let mut reverse_lookup = HashMap::new(); - let mut i = 1u16; - for (address, weight) in validators { - let mut indices = Vec::new(); - for _ in 0 .. *weight { - let p = Participant::new(i).unwrap(); - indices.push(p); - reverse_lookup.insert(p, *address); - i += 1; - } - participant_indexes.insert(*address, indices); - } - - NewSetInformation { - set: default_test_validator_set(), - serai_block: random_block_hash(&mut OsRng).0, - declaration_time: OsRng.next_u64(), - threshold: OsRng.gen_range(0 ..= u16::MAX), - validators: validators.to_vec(), - evrf_public_keys: vec![], - participant_indexes, - participant_indexes_reverse_lookup: reverse_lookup, - } -} -fn make_scan_block<'a, TDT: DbTxn>( +fn new_scan_block<'a, TDT: DbTxn>( txn: &'a mut TDT, set_info: &'a NewSetInformation, validators: &'a [SeraiAddress], @@ -100,120 +57,113 @@ fn make_scan_block<'a, TDT: DbTxn>( } } -/// Create a Signed with the given signer key and a dummy signature. -fn make_signed(signer: RistrettoPoint) -> Signed { - Signed { signer, ..Signed::default() } +/// Create a Signed with the given signer key and a random signature. +fn new_signed(signer: RistrettoPoint) -> Signed { + Signed { + signer, + signature: SchnorrSignature { + R: Ristretto::generator() * ::F::random(&mut OsRng), + s: ::F::random(&mut OsRng), + }, + } } -mod potentially_start_cosign { - use super::*; +#[test] +fn potentially_start_cosign() { + let (validator_data, validators, weights, total_weight) = setup_test_validators_and_weights(); + let set_info = new_test_set_info(&validator_data); + let set = set_info.set; - #[test] - fn potentially_start_cosign() { - let set = default_test_validator_set(); - let (validator_data, validators, weights, total_weight) = - get_test_validators_and_weights_setup(); - let set_info = new_test_set_info(&validator_data); + // Already actively cosigning: should not replace the actively cosigning block + { + let mut db = MemDb::new(); + let initial_block_hash = random_block_hash(&mut OsRng); - // No TributaryDb::latest_substrate_block_to_cosign block: no-op { - let mut db = MemDb::new(); let mut txn = db.txn(); - { - let mut scan_block = - make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.potentially_start_cosign(); - } - assert!(ActivelyCosigning::get(&mut txn, set).is_none()); + TributaryDb::start_cosigning(&mut txn, set, initial_block_hash, OsRng.next_u64()); + let new_block_hash = random_block_hash(&mut OsRng); + LatestSubstrateBlockToCosign::set(&mut txn, set, &new_block_hash); + txn.commit(); } - // Already cosigning: should not replace the actively cosigning block + let mut txn = db.txn(); { - let mut db = MemDb::new(); - let initial_block_hash = random_block_hash(&mut OsRng); - - { - let mut txn = db.txn(); - TributaryDb::start_cosigning(&mut txn, set, initial_block_hash, OsRng.next_u64()); - let new_block_hash = random_block_hash(&mut OsRng); - LatestSubstrateBlockToCosign::set(&mut txn, set, &new_block_hash); - txn.commit(); - } - - let mut txn = db.txn(); - { - let mut scan_block = - make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.potentially_start_cosign(); - } - - // Did not replace initial_block_hash for new_block_hash - assert_eq!(ActivelyCosigning::get(&mut txn, set), Some(initial_block_hash)); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.potentially_start_cosign(); } - // Already cosigned: no-op + // Did not replace initial_block_hash for new_block_hash + assert_eq!(ActivelyCosigning::get(&mut txn, set), Some(initial_block_hash)); + } + + // No TributaryDb::latest_substrate_block_to_cosign block: no-op + { + let mut db = MemDb::new(); + let mut txn = db.txn(); { - let mut db = MemDb::new(); - let initial_block_hash = random_block_hash(&mut OsRng); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.potentially_start_cosign(); + } + assert!(ActivelyCosigning::get(&mut txn, set).is_none()); + } - { - let mut txn = db.txn(); - LatestSubstrateBlockToCosign::set(&mut txn, set, &initial_block_hash); - TributaryDb::mark_cosigned(&mut txn, set, initial_block_hash); - txn.commit(); - } + // Already cosigned: no-op + { + let mut db = MemDb::new(); + let initial_block_hash = random_block_hash(&mut OsRng); + { let mut txn = db.txn(); - { - let mut scan_block = - make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.potentially_start_cosign(); - } - - assert!(ActivelyCosigning::get(&mut txn, set).is_none()); + LatestSubstrateBlockToCosign::set(&mut txn, set, &initial_block_hash); + TributaryDb::mark_cosigned(&mut txn, set, initial_block_hash); + txn.commit(); } - // Ready to cosign: starts cosigning and sends processor message + let mut txn = db.txn(); { - let mut db = MemDb::new(); - let block_hash = random_block_hash(&mut OsRng); - let mut global_session = [0; 32]; - OsRng.fill_bytes(global_session.as_mut()); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.potentially_start_cosign(); + } - let intent = - CosignIntent { global_session, block_number: OsRng.next_u64(), block_hash, notable: false }; + assert!(ActivelyCosigning::get(&mut txn, set).is_none()); + } - { - let mut txn = db.txn(); - LatestSubstrateBlockToCosign::set(&mut txn, set, &block_hash); - CosignIntents::provide(&mut txn, set, &intent); - txn.commit(); - } + // Ready to cosign: starts cosigning and sends processor message + { + let mut db = MemDb::new(); + let block_hash = random_block_hash(&mut OsRng); + let global_session = random_bytes_32(&mut OsRng); + + let intent = CosignIntent { + global_session, + block_number: random_block_number(&mut OsRng), + block_hash, + notable: false, + }; + { let mut txn = db.txn(); - { - let mut scan_block = - make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.potentially_start_cosign(); - } + LatestSubstrateBlockToCosign::set(&mut txn, set, &block_hash); + CosignIntents::provide(&mut txn, set, &intent); + txn.commit(); + } - assert_eq!(ActivelyCosigning::get(&mut txn, set), Some(block_hash)); - assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); + let mut txn = db.txn(); + { + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.potentially_start_cosign(); } - } - #[test] - #[should_panic(expected = "provided CosignIntent wasn't saved by its block hash")] - fn potentially_start_cosign_panics_on_differing_intent_blockhash() { - let set = default_test_validator_set(); - let (validator_data, validators, weights, total_weight) = - get_test_validators_and_weights_setup(); - let set_info = new_test_set_info(&validator_data); + assert_cosigning_invariants(&mut txn, set, block_hash, intent.block_number); + assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); + } + // Panics when stored intent's block_hash differs from latest_substrate_block_to_cosign + { let mut db = MemDb::new(); let block_hash = random_block_hash(&mut OsRng); - let mut global_session = [0; 32]; - OsRng.fill_bytes(global_session.as_mut()); + let global_session = random_bytes_32(&mut OsRng); { let mut txn = db.txn(); @@ -227,118 +177,108 @@ mod potentially_start_cosign { block_hash, &CosignIntent { global_session, - block_number: OsRng.next_u64(), + block_number: random_block_number(&mut OsRng), // but the intent's block_hash field is a new_block_hash - block_hash: new_block_hash, // triggering the assert_eq!(intent.block_hash, latest_substrate_block_to_cosign) panic + block_hash: new_block_hash, notable: false, }, ); txn.commit(); } - { + let result = std::panic::catch_unwind(move || { let mut txn = db.txn(); - let mut scan_block = - make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.potentially_start_cosign(); - } + }); + let err = result.expect_err("should panic on differing intent block hash"); + let msg = err.downcast_ref::().expect("panic payload should be a String"); + assert!( + msg.contains("provided CosignIntent wasn't saved by its block hash"), + "unexpected panic message: {msg}" + ); } } #[test] fn accumulate_dkg_confirmation() { - // Use 3 validators with weight 1 each so threshold math is deterministic: - // total_weight = 3, required_participation = 3 * 2 = 6 / 3 = 2 + 1 = 3 - let v0 = random_serai_address(&mut OsRng); - let v1 = random_serai_address(&mut OsRng); - let v2 = random_serai_address(&mut OsRng); - let validator_data = vec![(v0, 1u16), (v1, 1), (v2, 1)]; - let validators = vec![v0, v1, v2]; - let weights: HashMap = validator_data.iter().copied().collect(); - let total_weight = 3u16; + let (validator_data, validators, weights, total_weight) = setup_test_validators_and_weights(); + let (v1, v2, v3) = (validators[0], validators[1], validators[2]); let set_info = new_test_set_info(&validator_data); let set = set_info.set; let topic = Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Preprocess }; - // Below threshold: returns None until enough weight accumulates + // Panics if the topic isn't DkgConfirmation { let mut db = MemDb::new(); let mut txn = db.txn(); - let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - - let block_number = OsRng.next_u64(); - - let mut data0 = vec![0u8; 4]; - OsRng.fill_bytes(&mut data0); - - assert!(scan_block.accumulate_dkg_confirmation(block_number, topic, &data0, v0).is_none()); - - let mut data1 = vec![0u8; 4]; - OsRng.fill_bytes(&mut data1); - - assert!(scan_block.accumulate_dkg_confirmation(block_number, topic, &data1, v1).is_none()); + let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.accumulate_dkg_confirmation( + random_block_number(&mut OsRng), + Topic::RemoveParticipant { participant: random_serai_address(&mut OsRng) }, + &random_vec_of_len(&mut OsRng, 4), + validators[0], + ); + })); - txn.commit(); + assert!(result.is_err(), "should panic when called with a non-DkgConfirmation topic"); } // Threshold crossed: third accumulation returns SignId + correctly mapped data { let mut db = MemDb::new(); let mut txn = db.txn(); + let block_number = random_block_number(&mut OsRng); - let mut data0 = vec![0u8; 4]; - OsRng.fill_bytes(&mut data0); - let mut data1 = vec![0u8; 4]; - OsRng.fill_bytes(&mut data1); - let mut data2 = vec![0u8; 4]; - OsRng.fill_bytes(&mut data2); - - let result; { - let mut scan_block = - make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + let data1 = random_vec_of_len(&mut OsRng, 4); + let data2 = random_vec_of_len(&mut OsRng, 4); + let data3 = random_vec_of_len(&mut OsRng, 4); - assert!(scan_block.accumulate_dkg_confirmation(1, topic, &data0, v0).is_none()); - assert!(scan_block.accumulate_dkg_confirmation(1, topic, &data1, v1).is_none()); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - result = scan_block.accumulate_dkg_confirmation(1, topic, &data2, v2); - } - let (sign_id, data_set) = result.unwrap(); - - // SignId must match what dkg_confirmation_sign_id produces - assert_eq!(sign_id, topic.dkg_confirmation_sign_id(set).unwrap()); + assert!(scan_block.accumulate_dkg_confirmation(block_number, topic, &data1, v1).is_none()); + assert!(scan_block.accumulate_dkg_confirmation(block_number, topic, &data2, v2).is_none()); + let result = scan_block.accumulate_dkg_confirmation(block_number, topic, &data3, v3); - // Participants are 1-indexed by list position, not by weight-based indices - assert_eq!(data_set.len(), 3); - assert_eq!(data_set[&Participant::new(1).unwrap()], data0); - assert_eq!(data_set[&Participant::new(2).unwrap()], data1); - assert_eq!(data_set[&Participant::new(3).unwrap()], data2); - } + let (sign_id, data_set) = result.expect("third accumulation should cross threshold"); - // Past threshold: further accumulations are no-ops - { - let mut db = MemDb::new(); - let mut txn = db.txn(); + assert_eq!( + sign_id, + topic.dkg_confirmation_sign_id(set).unwrap(), + "SignId must match what dkg_confirmation_sign_id produces" + ); - let mut data0 = vec![0u8; 4]; - OsRng.fill_bytes(&mut data0); - let mut data1 = vec![0u8; 4]; - OsRng.fill_bytes(&mut data1); - let mut data2 = vec![0u8; 4]; - OsRng.fill_bytes(&mut data2); - let mut data_extra = vec![0u8; 4]; - OsRng.fill_bytes(&mut data_extra); + // Participants are 1-indexed by list position, not by weight-based indices + assert_eq!(data_set.len(), 3); + assert_eq!(data_set[&Participant::new(1).unwrap()], data1); + assert_eq!(data_set[&Participant::new(2).unwrap()], data2); + assert_eq!(data_set[&Participant::new(3).unwrap()], data3); + } + // Past threshold: further accumulations from a new validator are no-ops { - let mut scan_block = - make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.accumulate_dkg_confirmation(1, topic, &data0, v0); - scan_block.accumulate_dkg_confirmation(1, topic, &data1, v1); - scan_block.accumulate_dkg_confirmation(1, topic, &data2, v2); - - // Already past threshold - this returns None - assert!(scan_block.accumulate_dkg_confirmation(1, topic, &data_extra, v0).is_none()); + // Add a 4th validator so we have a fresh signer after threshold is crossed. + // total_weight=4, required_participation = 3, so v0+v1+v2 cross threshold. + let v4 = random_serai_address(&mut OsRng); + let mut validator_data_4 = validator_data.clone(); + validator_data_4.push((v4, 1)); + let validators_4: Vec = validator_data_4.iter().map(|(a, _)| *a).collect(); + let mut weights_4 = weights.clone(); + weights_4.insert(v4, 1); + let set_info_4 = new_test_set_info(&validator_data_4); + + let data4 = random_vec_of_len(&mut OsRng, 4); + + { + let mut scan_block = new_scan_block(&mut txn, &set_info_4, &validators_4, 4, &weights_4); + assert!( + scan_block.accumulate_dkg_confirmation(block_number, topic, &data4, v4).is_none(), + "accumulation after threshold should be a NOP" + ); + } } } } @@ -347,38 +287,37 @@ mod handle_application_tx { use super::*; #[test] - fn dont_handle_from_fatally_slashed() { + fn dont_handle_signed_kind_from_fatally_slashed() { let set = default_test_validator_set(); - let (validator_data, validators, weights, total_weight) = - get_test_validators_and_weights_setup(); + let (validator_data, validators, weights, total_weight) = setup_test_validators_and_weights(); let set_info = new_test_set_info(&validator_data); let default_signer = SeraiAddress(Signed::default().signer().to_bytes()); let mut db = MemDb::new(); - // Don't handle transactions from those fatally slashed. { let mut txn = db.txn(); TributaryDb::fatal_slash(&mut txn, set, default_signer, "test reason"); txn.commit(); } - let mut txn = db.txn(); - let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + for tx in all_signed_transactions_with(Signed::default()) { + let mut txn = db.txn(); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.handle_application_tx( - OsRng.next_u64(), - Transaction::DkgParticipation { participation: vec![1, 2, 3], signed: Signed::default() }, - ); + scan_block.handle_application_tx(random_block_number(&mut OsRng), tx.clone()); - assert!(ProcessorMessages::try_recv(&mut txn, set).is_none()); + assert!( + ProcessorMessages::try_recv(&mut txn, set).is_none(), + "fatally slashed signer should be ignored for {tx:?}" + ); + } } #[test] - fn handle_remove_participant_tx_type() { + fn remove_participant() { let set = default_test_validator_set(); - let (validator_data, validators, weights, total_weight) = - get_test_validators_and_weights_setup(); + let (validator_data, validators, weights, total_weight) = setup_test_validators_and_weights(); let set_info = new_test_set_info(&validator_data); let default_signer = SeraiAddress(Signed::default().signer().to_bytes()); @@ -386,69 +325,21 @@ mod handle_application_tx { { let mut db = MemDb::new(); let mut txn = db.txn(); - let mut scan_block = - make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); let nonexistent = random_serai_address(&mut OsRng); scan_block.handle_application_tx( - OsRng.next_u64(), + random_block_number(&mut OsRng), Transaction::RemoveParticipant { participant: nonexistent, signed: Signed::default() }, ); assert!(TributaryDb::is_fatally_slashed(&mut txn, set, default_signer)); } - // Valid RemoveParticipant with a signer who IS a validator accumulates weight - { - // Fresh db so the signer isn't fatally slashed from the sub-test above - let mut db = MemDb::new(); - let mut txn = db.txn(); - - // Generate a signer that's actually in the validator set - let (signer_key, signer_addr) = random_serai_address_and_key(&mut OsRng); - let signer_weight = 1u16; - - let mut extended_validator_data = validator_data.clone(); - extended_validator_data.push((signer_addr, signer_weight)); - let extended_validators: Vec = - extended_validator_data.iter().map(|(a, _)| *a).collect(); - let mut extended_weights = weights.clone(); - extended_weights.insert(signer_addr, signer_weight); - let extended_total_weight = total_weight + signer_weight; - let extended_set_info = new_test_set_info(&extended_validator_data); - - let mut scan_block = make_scan_block( - &mut txn, - &extended_set_info, - &extended_validators, - extended_total_weight, - &extended_weights, - ); - - // Target one of the original validators (not the signer) - let target = validators[OsRng.gen_range(0 ..= validators.len() - 1)]; - - scan_block.handle_application_tx( - OsRng.next_u64(), - Transaction::RemoveParticipant { participant: target, signed: make_signed(signer_key) }, - ); - - assert!(AccumulatedWeight::get( - &mut txn, - set, - Topic::RemoveParticipant { participant: target } - ) - .is_some()); - } - - // When enough validators vote to remove a participant, the threshold is crossed - // and the participant is fatally slashed (DataSet::Participating branch) + // Valid RemoveParticipant accumulates weight and eventually crosses threshold { - let mut db = MemDb::new(); - let mut txn = db.txn(); - - // All 3 validators need real keys so they can sign + // All validators have real keys so they can sign let (key0, addr0) = random_serai_address_and_key(&mut OsRng); let (key1, addr1) = random_serai_address_and_key(&mut OsRng); let (key2, addr2) = random_serai_address_and_key(&mut OsRng); @@ -459,88 +350,93 @@ mod handle_application_tx { let set_info = new_test_set_info(&validator_data); let target = addr0; - let block_number = OsRng.next_u64(); + let block_number = random_block_number(&mut OsRng); + + let mut db = MemDb::new(); + let mut txn = db.txn(); - // First two votes accumulate but don't cross the threshold + // First vote: topic is recognized, target not yet slashed { - let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 3, &weights); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, 3, &weights); scan_block.handle_application_tx( block_number, - Transaction::RemoveParticipant { participant: target, signed: make_signed(key1) }, - ); - scan_block.handle_application_tx( - block_number, - Transaction::RemoveParticipant { participant: target, signed: make_signed(key2) }, + Transaction::RemoveParticipant { participant: target, signed: new_signed(key0) }, ); } - assert!(!TributaryDb::is_fatally_slashed(&mut txn, set, target)); + assert!( + TributaryDb::recognized(&mut txn, set, Topic::RemoveParticipant { participant: target }), + "RemoveParticipant topic should be recognized after handling the tx" + ); + assert!( + !TributaryDb::is_fatally_slashed(&mut txn, set, target), + "target should not be fatally slashed after one vote" + ); - // Third vote crosses the threshold — target gets fatally slashed + // Threshold crossed, target gets fatally slashed { - let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 3, &weights); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, 3, &weights); scan_block.handle_application_tx( block_number, - Transaction::RemoveParticipant { participant: target, signed: make_signed(key0) }, + Transaction::RemoveParticipant { participant: target, signed: new_signed(key1) }, + ); + scan_block.handle_application_tx( + block_number, + Transaction::RemoveParticipant { participant: target, signed: new_signed(key2) }, ); } - assert!(TributaryDb::is_fatally_slashed(&mut txn, set, target)); + assert!( + TributaryDb::is_fatally_slashed(&mut txn, set, target), + "target should be fatally slashed after threshold is crossed" + ); } } #[test] - fn handle_dkg_participation_tx_type() { + fn dkg_participation() { let mut db = MemDb::new(); - let set = default_test_validator_set(); - // Use a real validator key so the signer exists in participant_indexes - let (signer_key, signer_addr) = random_serai_address_and_key(&mut OsRng); - let validator_data = vec![ - (signer_addr, 1u16), - (random_serai_address(&mut OsRng), 1), - (random_serai_address(&mut OsRng), 1), - ]; - let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); - let weights: HashMap = validator_data.iter().copied().collect(); + let set = default_test_validator_set(); + let (keys_addrs, validator_data, validators, weights, total_weight) = + setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); + let (signer_key, _) = keys_addrs[0]; let mut txn = db.txn(); - let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 3, &weights); - scan_block.handle_application_tx( - OsRng.next_u64(), - Transaction::DkgParticipation { - participation: vec![1, 2, 3], - signed: make_signed(signer_key), - }, - ); + { + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.handle_application_tx( + random_block_number(&mut OsRng), + Transaction::DkgParticipation { + participation: vec![1, 2, 3], + signed: new_signed(signer_key), + }, + ); + } assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); } #[test] - fn handle_dkg_confirmation_preprocess_tx_type() { + fn dkg_confirmation_preprocess() { let set = default_test_validator_set(); - - let (key0, addr0) = random_serai_address_and_key(&mut OsRng); - let (key1, addr1) = random_serai_address_and_key(&mut OsRng); - let (key2, addr2) = random_serai_address_and_key(&mut OsRng); - let validator_data = vec![(addr0, 1u16), (addr1, 1), (addr2, 1)]; - let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); - let weights: HashMap = validator_data.iter().copied().collect(); + let (keys_addrs, validator_data, validators, weights, total_weight) = + setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); + let (key0, key1, key2) = (keys_addrs[0].0, keys_addrs[1].0, keys_addrs[2].0); // Below threshold: no DkgConfirmationMessages sent { let mut db = MemDb::new(); let mut txn = db.txn(); - let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 3, &weights); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_application_tx( - OsRng.next_u64(), + random_block_number(&mut OsRng), Transaction::DkgConfirmationPreprocess { attempt: OsRng.next_u32(), preprocess: [1u8; 64], - signed: make_signed(key0), + signed: new_signed(key0), }, ); @@ -552,14 +448,15 @@ mod handle_application_tx { let mut db = MemDb::new(); let mut txn = db.txn(); { - let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 3, &weights); + let mut scan_block = + new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); for (key, preprocess) in [(key0, [1u8; 64]), (key1, [2u8; 64]), (key2, [3u8; 64])] { scan_block.handle_application_tx( 1, Transaction::DkgConfirmationPreprocess { attempt: 0, preprocess, - signed: make_signed(key), + signed: new_signed(key), }, ); } @@ -570,30 +467,26 @@ mod handle_application_tx { } #[test] - fn handle_dkg_confirmation_share_tx_type() { + fn dkg_confirmation_share() { let set = default_test_validator_set(); - - let (key0, addr0) = random_serai_address_and_key(&mut OsRng); - let (_, addr1) = random_serai_address_and_key(&mut OsRng); - let (_, addr2) = random_serai_address_and_key(&mut OsRng); - let validator_data = vec![(addr0, 1u16), (addr1, 1), (addr2, 1)]; - let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); - let weights: HashMap = validator_data.iter().copied().collect(); + let (keys_addrs, validator_data, validators, weights, total_weight) = + setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); + let (key0, addr0) = keys_addrs[0]; - // Share without preceding preprocess participation → fatal slash + // Share without preceding preprocess participation -> fatal slash // (the accumulate preceding_topic check slashes the signer) { let mut db = MemDb::new(); let mut txn = db.txn(); - let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 3, &weights); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_application_tx( 1, Transaction::DkgConfirmationShare { attempt: 0, share: [10u8; 32], - signed: make_signed(key0), + signed: new_signed(key0), }, ); @@ -601,7 +494,7 @@ mod handle_application_tx { } } - /// Verify that the full preprocess→share flow works for DkgConfirmation. + /// Verify that the full preprocess->share flow works for DkgConfirmation. /// /// Previously, this panicked because `accumulate<[u8; 32]>` (share) used typed deserialization /// on the preceding preprocess topic stored as `[u8; 64]`. Fixed by using a raw key-existence @@ -609,41 +502,37 @@ mod handle_application_tx { #[test] fn dkg_confirmation_preprocess_then_share_flow() { let set = default_test_validator_set(); - - let (key0, addr0) = random_serai_address_and_key(&mut OsRng); - let (key1, addr1) = random_serai_address_and_key(&mut OsRng); - let (key2, addr2) = random_serai_address_and_key(&mut OsRng); - let validator_data = vec![(addr0, 1u16), (addr1, 1), (addr2, 1)]; - let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); - let weights: HashMap = validator_data.iter().copied().collect(); + let (keys_addrs, validator_data, validators, weights, total_weight) = + setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); + let (key0, key1, key2) = (keys_addrs[0].0, keys_addrs[1].0, keys_addrs[2].0); let mut db = MemDb::new(); let mut txn = db.txn(); - // All 3 validators submit preprocesses (threshold crossed → DkgConfirmationMessages sent) + // All 3 validators submit preprocesses (threshold crossed -> DkgConfirmationMessages sent) { - let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 3, &weights); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); for (key, preprocess) in [(key0, [1u8; 64]), (key1, [2u8; 64]), (key2, [3u8; 64])] { scan_block.handle_application_tx( 1, Transaction::DkgConfirmationPreprocess { attempt: 0, preprocess, - signed: make_signed(key), + signed: new_signed(key), }, ); } } assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_some()); - // All 3 validators submit shares (threshold crossed → DkgConfirmationMessages sent) + // All 3 validators submit shares (threshold crossed -> DkgConfirmationMessages sent) { - let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 3, &weights); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); for (key, share) in [(key0, [10u8; 32]), (key1, [20u8; 32]), (key2, [30u8; 32])] { scan_block.handle_application_tx( 1, - Transaction::DkgConfirmationShare { attempt: 0, share, signed: make_signed(key) }, + Transaction::DkgConfirmationShare { attempt: 0, share, signed: new_signed(key) }, ); } } @@ -651,18 +540,20 @@ mod handle_application_tx { } #[test] - fn handle_cosign_tx_type() { + fn cosign() { let set = default_test_validator_set(); - let (validator_data, validators, weights, total_weight) = - get_test_validators_and_weights_setup(); + let (validator_data, validators, weights, total_weight) = setup_test_validators_and_weights(); let set_info = new_test_set_info(&validator_data); let block_hash = random_block_hash(&mut OsRng); - let mut global_session = [0u8; 32]; - OsRng.fill_bytes(&mut global_session); + let global_session = random_bytes_32(&mut OsRng); - let intent = - CosignIntent { global_session, block_number: OsRng.next_u64(), block_hash, notable: false }; + let intent = CosignIntent { + global_session, + block_number: random_block_number(&mut OsRng), + block_hash, + notable: false, + }; // Sets LatestSubstrateBlockToCosign and starts cosigning { @@ -674,8 +565,7 @@ mod handle_application_tx { } let mut txn = db.txn(); - let mut scan_block = - make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_application_tx(1, Transaction::Cosign { substrate_block_hash: block_hash }); @@ -697,8 +587,7 @@ mod handle_application_tx { } let mut txn = db.txn(); - let mut scan_block = - make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block .handle_application_tx(1, Transaction::Cosign { substrate_block_hash: second_hash }); @@ -709,10 +598,9 @@ mod handle_application_tx { } #[test] - fn handle_cosigned_tx_type() { + fn cosigned() { let set = default_test_validator_set(); - let (validator_data, validators, weights, total_weight) = - get_test_validators_and_weights_setup(); + let (validator_data, validators, weights, total_weight) = setup_test_validators_and_weights(); let set_info = new_test_set_info(&validator_data); // Marks block as cosigned @@ -723,8 +611,7 @@ mod handle_application_tx { assert!(!TributaryDb::cosigned(&mut txn, set, block_hash)); - let mut scan_block = - make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block .handle_application_tx(1, Transaction::Cosigned { substrate_block_hash: block_hash }); @@ -746,8 +633,7 @@ mod handle_application_tx { let mut txn = db.txn(); assert_eq!(ActivelyCosigning::get(&mut txn, set), Some(block_hash)); - let mut scan_block = - make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block .handle_application_tx(1, Transaction::Cosigned { substrate_block_hash: block_hash }); @@ -768,8 +654,7 @@ mod handle_application_tx { } let mut txn = db.txn(); - let mut scan_block = - make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block .handle_application_tx(1, Transaction::Cosigned { substrate_block_hash: other_hash }); @@ -780,10 +665,9 @@ mod handle_application_tx { } #[test] - fn handle_substrate_block_tx_type() { + fn substrate_block() { let set = default_test_validator_set(); - let (validator_data, validators, weights, total_weight) = - get_test_validators_and_weights_setup(); + let (validator_data, validators, weights, total_weight) = setup_test_validators_and_weights(); let set_info = new_test_set_info(&validator_data); let mut db = MemDb::new(); @@ -797,65 +681,53 @@ mod handle_application_tx { } let mut txn = db.txn(); - let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_application_tx(1, Transaction::SubstrateBlock { hash: block_hash }); for plan in &plans { - let topic = Topic::Sign { - id: VariantSignId::Transaction(*plan), - attempt: 0, - round: SigningProtocolRound::Preprocess, - }; + let topic = expected_topic_after_start_cosigning(VariantSignId::Transaction(*plan)); assert!(AccumulatedWeight::get(&mut txn, set, topic).is_some()); } } #[test] - fn handle_batch_tx_type() { + fn batch() { let set = default_test_validator_set(); - let (validator_data, validators, weights, total_weight) = - get_test_validators_and_weights_setup(); + let (validator_data, validators, weights, total_weight) = setup_test_validators_and_weights(); let set_info = new_test_set_info(&validator_data); let mut db = MemDb::new(); let batch_hash = [42u8; 32]; let mut txn = db.txn(); - let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_application_tx(1, Transaction::Batch { hash: batch_hash }); - let topic = Topic::Sign { - id: VariantSignId::Batch(batch_hash), - attempt: 0, - round: SigningProtocolRound::Preprocess, - }; + let topic = expected_topic_after_start_cosigning(VariantSignId::Batch(batch_hash)); assert!(AccumulatedWeight::get(&mut txn, set, topic).is_some()); } #[test] - fn handle_sign_tx_type() { + fn sign() { let set = default_test_validator_set(); - - let (key0, addr0) = random_serai_address_and_key(&mut OsRng); - let (key1, addr1) = random_serai_address_and_key(&mut OsRng); - let (key2, addr2) = random_serai_address_and_key(&mut OsRng); - let validator_data = vec![(addr0, 1u16), (addr1, 1), (addr2, 1)]; - let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); - let weights: HashMap = validator_data.iter().copied().collect(); + let (keys_addrs, validator_data, validators, weights, total_weight) = + setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); + let (key0, addr0) = keys_addrs[0]; + let (key1, key2) = (keys_addrs[1].0, keys_addrs[2].0); let sign_id = VariantSignId::Transaction([42; 32]); - let topic = Topic::Sign { id: sign_id, attempt: 0, round: SigningProtocolRound::Preprocess }; + let topic = expected_topic_after_start_cosigning(sign_id); - // Wrong data length: signer has weight 1 but submits 2 entries → fatal slash + // Wrong data length: signer has weight 1 but submits 2 entries -> fatal slash { let mut db = MemDb::new(); let mut txn = db.txn(); TributaryDb::recognize_topic(&mut txn, set, topic); - let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 3, &weights); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_application_tx( 1, Transaction::Sign { @@ -863,7 +735,7 @@ mod handle_application_tx { attempt: 0, round: SigningProtocolRound::Preprocess, data: vec![vec![1], vec![2]], - signed: make_signed(key0), + signed: new_signed(key0), }, ); @@ -877,7 +749,8 @@ mod handle_application_tx { TributaryDb::recognize_topic(&mut txn, set, topic); { - let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 3, &weights); + let mut scan_block = + new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); for key in [key0, key1, key2] { scan_block.handle_application_tx( 1, @@ -886,7 +759,7 @@ mod handle_application_tx { attempt: 0, round: SigningProtocolRound::Preprocess, data: vec![vec![1, 2, 3]], - signed: make_signed(key), + signed: new_signed(key), }, ); } @@ -896,27 +769,95 @@ mod handle_application_tx { } } + /// Exercises the Sign Share -> Participating path (line 559: `Shares { id, shares: data_set }`). + /// Requires first accumulating preprocesses to threshold (which recognizes the Share topic + /// and stores preceding data), then accumulating shares to threshold. #[test] - fn handle_slash_report_tx_type() { + fn sign_share_sends_shares_message() { let set = default_test_validator_set(); + let (keys_addrs, validator_data, validators, weights, total_weight) = + setup_test_validators_and_weights_with_keys(); + let set_info = new_test_set_info(&validator_data); + let (key0, key1, key2) = (keys_addrs[0].0, keys_addrs[1].0, keys_addrs[2].0); - let (key0, addr0) = random_serai_address_and_key(&mut OsRng); - let (_, addr1) = random_serai_address_and_key(&mut OsRng); - let (_, addr2) = random_serai_address_and_key(&mut OsRng); - let validator_data = vec![(addr0, 1u16), (addr1, 1), (addr2, 1)]; - let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); - let weights: HashMap = validator_data.iter().copied().collect(); + let sign_id = VariantSignId::Transaction([42; 32]); + let preprocess_topic = expected_topic_after_start_cosigning(sign_id); + let share_topic = Topic::Sign { id: sign_id, attempt: 0, round: SigningProtocolRound::Share }; + + let mut db = MemDb::new(); + let mut txn = db.txn(); + + // Recognize the Preprocess topic + TributaryDb::recognize_topic(&mut txn, set, preprocess_topic); + + // Step 1: All validators submit preprocesses, crossing threshold. + // This auto-recognizes the Share topic (succeeding_topic) and stores preprocess data. + { + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + for key in [key0, key1, key2] { + scan_block.handle_application_tx( + 1, + Transaction::Sign { + id: sign_id, + attempt: 0, + round: SigningProtocolRound::Preprocess, + data: vec![vec![1, 2, 3]], + signed: new_signed(key), + }, + ); + } + } + + // Drain the Preprocesses message from step 1 + assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); + + // Share topic should now be recognized + assert!(AccumulatedWeight::get(&mut txn, set, share_topic).is_some()); + + // Step 2: All validators submit shares, crossing threshold -> sends Shares message. + { + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + for key in [key0, key1, key2] { + scan_block.handle_application_tx( + 2, + Transaction::Sign { + id: sign_id, + attempt: 0, + round: SigningProtocolRound::Share, + data: vec![vec![4, 5, 6]], + signed: new_signed(key), + }, + ); + } + } + + // The Shares message should have been sent + let msg = ProcessorMessages::try_recv(&mut txn, set); + assert!(msg.is_some(), "expected Shares processor message"); + + // No validators should be slashed + for v in &validators { + assert!(!TributaryDb::is_fatally_slashed(&mut txn, set, *v)); + } + } + + #[test] + fn slash_report() { + let set = default_test_validator_set(); + let (keys_addrs, validator_data, validators, weights, total_weight) = + setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); + let (key0, addr0) = keys_addrs[0]; - // Wrong length: 3 validators but only 2 slash points → fatal slash + // Wrong length: 3 validators but only 2 slash points -> fatal slash { let mut db = MemDb::new(); let mut txn = db.txn(); - let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 3, &weights); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_application_tx( 1, - Transaction::SlashReport { slash_points: vec![0, 0], signed: make_signed(key0) }, + Transaction::SlashReport { slash_points: vec![0, 0], signed: new_signed(key0) }, ); assert!(TributaryDb::is_fatally_slashed(&mut txn, set, addr0)); @@ -926,11 +867,11 @@ mod handle_application_tx { { let mut db = MemDb::new(); let mut txn = db.txn(); - let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 3, &weights); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_application_tx( 1, - Transaction::SlashReport { slash_points: vec![0, 0, 0], signed: make_signed(key0) }, + Transaction::SlashReport { slash_points: vec![0, 0, 0], signed: new_signed(key0) }, ); assert!(AccumulatedWeight::get(&mut txn, set, Topic::SlashReport).is_some()); @@ -954,11 +895,11 @@ mod handle_application_tx { // Each reporter says: first 3 validators have 0 points, 4th has 100 // required_participation = 4*2/3+1 = 3, so 3 submissions cross the threshold { - let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 4, &weights); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, 4, &weights); for key in [key0, key1, key2] { scan_block.handle_application_tx( 1, - Transaction::SlashReport { slash_points: vec![0, 0, 0, 100], signed: make_signed(key) }, + Transaction::SlashReport { slash_points: vec![0, 0, 0, 100], signed: new_signed(key) }, ); } } @@ -967,6 +908,66 @@ mod handle_application_tx { } } + /// Exercises the even-length median branch (`(this_validator.len() / 2) - 1`) in + /// the SlashReport handler by using 5 validators where `required_participation = 4` (even). + #[test] + fn slash_report_even_reporter_count_median() { + let set = default_test_validator_set(); + + // 5 validators of weight 1 -> required_participation = 5*2/3+1 = 4 + let keys_addrs: Vec<(RistrettoPoint, SeraiAddress)> = + (0 .. 5).map(|_| random_serai_address_and_key(&mut OsRng)).collect(); + let validator_data: Vec<(SeraiAddress, u16)> = + keys_addrs.iter().map(|(_, addr)| (*addr, 1u16)).collect(); + let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); + let weights: HashMap = validator_data.iter().copied().collect(); + let set_info = new_test_set_info(&validator_data); + + let mut db = MemDb::new(); + let mut txn = db.txn(); + + // 4 reporters submit different opinions about validator 4 (index 4). + // Reports (for all 5 validator positions): + // reporter 0: [0, 0, 0, 0, 10] + // reporter 1: [0, 0, 0, 0, 20] + // reporter 2: [0, 0, 0, 0, 30] + // reporter 3: [0, 0, 0, 0, 40] + // + // Sorted values for validator 4: [10, 20, 30, 40] (len=4, even) + // Even median index: (4 / 2) - 1 = 1 -> median = 20 + // + // f = (5-1)/3 = 1, amortization baseline = sorted_medians[5-1-1] = sorted_medians[3] = 0 + // amortized: [0, 0, 0, 0, 20]. Non-zero entries: [20] for validator 4. + let slash_reports = vec![ + vec![0u32, 0, 0, 0, 10], + vec![0, 0, 0, 0, 20], + vec![0, 0, 0, 0, 30], + vec![0, 0, 0, 0, 40], + ]; + + { + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, 5, &weights); + for (i, report) in slash_reports.iter().enumerate() { + let (key, _) = keys_addrs[i]; + scan_block.handle_application_tx( + 1, + Transaction::SlashReport { slash_points: report.clone(), signed: new_signed(key) }, + ); + } + } + + // Threshold was crossed with 4 reporters (even) -> even median branch exercised. + // Verify the signing topic was recognized and a message was sent. + let sign_topic = expected_topic_after_start_cosigning(VariantSignId::SlashReport); + assert!( + AccumulatedWeight::get(&mut txn, set, sign_topic).is_some(), + "SlashReport sign topic should be recognized" + ); + + let msg = ProcessorMessages::try_recv(&mut txn, set); + assert!(msg.is_some(), "expected SignSlashReport processor message"); + } + mod fuzz_slash_report { use super::*; use proptest::prelude::*; @@ -1025,7 +1026,7 @@ mod handle_application_tx { proptest! { #![proptest_config(ProptestConfig::with_cases(200))] - /// Fuzz the SlashReport → Participating path with randomized slash point vectors. + /// Fuzz the SlashReport -> Participating path with randomized slash point vectors. /// /// Uses 4 validators (f=1) so the threshold-crossing path is reachable. /// All 4 submit identical reports so the median equals the input. @@ -1054,13 +1055,13 @@ mod handle_application_tx { let expected = expected_slash_report(4, &reports); { - let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 4, &weights); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, 4, &weights); for (key, _) in [(key0, &addr0), (key1, &addr1), (key2, &addr2), (key3, &addr3)] { scan_block.handle_application_tx( 1, Transaction::SlashReport { slash_points: slash_points.clone(), - signed: make_signed(key), + signed: new_signed(key), }, ); } @@ -1068,7 +1069,7 @@ mod handle_application_tx { match expected { Some(result) if !result.is_empty() => { - // Non-empty slash report → message should be sent + // Non-empty slash report -> message should be sent prop_assert!( ProcessorMessages::try_recv(&mut txn, set).is_some(), "expected ProcessorMessage for non-empty slash report {:?}", @@ -1076,7 +1077,7 @@ mod handle_application_tx { ); } _ => { - // Empty or f==0 → no message sent (slash report is empty, nothing to sign) + // Empty or f==0 -> no message sent (slash report is empty, nothing to sign) // The code still sends the message even for empty reports due to the assert // passing with len=0 <= f. Verify it gets sent regardless. // @@ -1090,11 +1091,7 @@ mod handle_application_tx { } // Verify the SlashReport signing topic was recognized - let sign_topic = Topic::Sign { - id: VariantSignId::SlashReport, - attempt: 0, - round: SigningProtocolRound::Preprocess, - }; + let sign_topic = expected_topic_after_start_cosigning(VariantSignId::SlashReport); prop_assert!( AccumulatedWeight::get(&mut txn, set, sign_topic).is_some(), "SlashReport sign topic should be recognized" @@ -1126,14 +1123,14 @@ mod handle_application_tx { let mut txn = db.txn(); { - let mut scan_block = make_scan_block(&mut txn, &set_info, &validators, 7, &weights); + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, 7, &weights); for (i, report) in reports.iter().enumerate() { let (key, _) = keys_addrs[i]; scan_block.handle_application_tx( 1, Transaction::SlashReport { slash_points: report.clone(), - signed: make_signed(key), + signed: new_signed(key), }, ); } @@ -1150,17 +1147,13 @@ mod handle_application_tx { } } - // Participating path was reached → message and topic recognition + // Participating path was reached -> message and topic recognition prop_assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); - let sign_topic = Topic::Sign { - id: VariantSignId::SlashReport, - attempt: 0, - round: SigningProtocolRound::Preprocess, - }; + let sign_topic = expected_topic_after_start_cosigning(VariantSignId::SlashReport); prop_assert!(AccumulatedWeight::get(&mut txn, set, sign_topic).is_some()); } - /// Fuzz the wrong-length path: slash_points.len() != validators.len() → fatal slash + /// Fuzz the wrong-length path: slash_points.len() != validators.len() -> fatal slash #[test] fn fuzz_slash_report_wrong_length( num_validators in 4usize..10, @@ -1186,12 +1179,12 @@ mod handle_application_tx { { let mut scan_block = - make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_application_tx( 1, Transaction::SlashReport { slash_points: vec![0; wrong_len], - signed: make_signed(signer_key), + signed: new_signed(signer_key), }, ); } @@ -1216,47 +1209,49 @@ mod handle_block { fn processes_application_transactions() { let mut db = MemDb::new(); let set = default_test_validator_set(); - let batch_hash = [42; 32]; - let (validator_data, validators, weights, total_weight) = - get_test_validators_and_weights_setup(); + let batch_hash = random_bytes_32(&mut OsRng); + let (validator_data, validators, weights, total_weight) = setup_test_validators_and_weights(); let set_info = new_test_set_info(&validator_data); let block = Block { - header: BlockHeader { parent: [0; 32], transactions: [0; 32] }, + header: BlockHeader { + parent: random_bytes_32(&mut OsRng), + transactions: random_bytes_32(&mut OsRng), + }, transactions: vec![TributaryTransaction::Application(Transaction::Batch { hash: batch_hash, })], }; - let mut txn = db.txn(); - let scan_block = make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + { + let mut txn = db.txn(); + let scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.handle_block(1, block); - txn.commit(); + scan_block.handle_block(1, block); + txn.commit(); + } - let topic = Topic::Sign { - id: VariantSignId::Batch(batch_hash), - attempt: 0, - round: SigningProtocolRound::Preprocess, - }; - assert!(TributaryDb::recognized(&db, set, topic)); + let expected_topic = expected_topic_after_start_cosigning(VariantSignId::Batch(batch_hash)); + assert!(TributaryDb::recognized(&db, set, expected_topic)); } #[test] fn empty_block_only_calls_start_of_block() { let mut db = MemDb::new(); let set = default_test_validator_set(); - let (validator_data, validators, weights, total_weight) = - get_test_validators_and_weights_setup(); + let (validator_data, validators, weights, total_weight) = setup_test_validators_and_weights(); let set_info = new_test_set_info(&validator_data); let block = Block { - header: BlockHeader { parent: [0; 32], transactions: [0; 32] }, + header: BlockHeader { + parent: random_bytes_32(&mut OsRng), + transactions: random_bytes_32(&mut OsRng), + }, transactions: vec![], }; let mut txn = db.txn(); - let scan_block = make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + let scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_block(1, block); txn.commit(); @@ -1272,12 +1267,14 @@ mod handle_block { let set = default_test_validator_set(); let batch_hash_a = [10; 32]; let batch_hash_b = [20; 32]; - let (validator_data, validators, weights, total_weight) = - get_test_validators_and_weights_setup(); + let (validator_data, validators, weights, total_weight) = setup_test_validators_and_weights(); let set_info = new_test_set_info(&validator_data); let block = Block { - header: BlockHeader { parent: [0; 32], transactions: [0; 32] }, + header: BlockHeader { + parent: random_bytes_32(&mut OsRng), + transactions: random_bytes_32(&mut OsRng), + }, transactions: vec![ TributaryTransaction::Application(Transaction::Batch { hash: batch_hash_a }), TributaryTransaction::Application(Transaction::Batch { hash: batch_hash_b }), @@ -1285,18 +1282,162 @@ mod handle_block { }; let mut txn = db.txn(); - let scan_block = make_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + let scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_block(1, block); txn.commit(); for hash in [batch_hash_a, batch_hash_b] { - let topic = Topic::Sign { - id: VariantSignId::Batch(hash), - attempt: 0, - round: SigningProtocolRound::Preprocess, - }; + let topic = expected_topic_after_start_cosigning(VariantSignId::Batch(hash)); assert!(TributaryDb::recognized(&db, set, topic)); } } + + /// Construct a borsh-encoded `SignedMessage` for `TendermintNetwork`. + /// + /// The network's types are: ValidatorId = [u8; 32], Block = TendermintBlock, Signature = [u8; 64]. + /// We manually build the borsh encoding rather than depending on the internal tendermint types. + fn make_signed_message_bytes(sender: [u8; 32]) -> Vec { + let mut bytes = Vec::new(); + // Message fields: + bytes.extend_from_slice(&sender); // sender: [u8; 32] + bytes.extend_from_slice(&0u64.to_le_bytes()); // block: BlockNumber(0) + bytes.extend_from_slice(&0u32.to_le_bytes()); // round: RoundNumber(0) + bytes.push(1); // Data::Prevote variant index + bytes.push(0); // Option::None (no block id) + // Signature: + bytes.extend_from_slice(&[0u8; 64]); // sig: [u8; 64] + bytes + } + + #[test] + fn slash_evidence_invalid_precommit() { + let mut db = MemDb::new(); + let set = default_test_validator_set(); + let (_, addr0) = random_serai_address_and_key(&mut OsRng); + let validator_data = vec![(addr0, 1u16)]; + let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); + let weights: HashMap = validator_data.iter().copied().collect(); + let set_info = new_test_set_info(&validator_data); + + let evidence_bytes = make_signed_message_bytes(addr0.0); + let block = Block { + header: BlockHeader { + parent: random_bytes_32(&mut OsRng), + transactions: random_bytes_32(&mut OsRng), + }, + transactions: vec![TributaryTransaction::Tendermint(TendermintTx::SlashEvidence( + Evidence::InvalidPrecommit(evidence_bytes), + ))], + }; + + let mut txn = db.txn(); + let scan_block = new_scan_block(&mut txn, &set_info, &validators, 1, &weights); + scan_block.handle_block(1, block); + txn.commit(); + + assert!(TributaryDb::is_fatally_slashed(&db, set, addr0)); + } + + #[test] + fn slash_evidence_invalid_valid_round() { + let mut db = MemDb::new(); + let set = default_test_validator_set(); + let (_, addr0) = random_serai_address_and_key(&mut OsRng); + let validator_data = vec![(addr0, 1u16)]; + let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); + let weights: HashMap = validator_data.iter().copied().collect(); + let set_info = new_test_set_info(&validator_data); + + let evidence_bytes = make_signed_message_bytes(addr0.0); + let block = Block { + header: BlockHeader { + parent: random_bytes_32(&mut OsRng), + transactions: random_bytes_32(&mut OsRng), + }, + transactions: vec![TributaryTransaction::Tendermint(TendermintTx::SlashEvidence( + Evidence::InvalidValidRound(evidence_bytes), + ))], + }; + + let mut txn = db.txn(); + let scan_block = new_scan_block(&mut txn, &set_info, &validators, 1, &weights); + scan_block.handle_block(1, block); + txn.commit(); + + assert!(TributaryDb::is_fatally_slashed(&db, set, addr0)); + } + + #[test] + fn slash_evidence_conflicting_messages() { + let mut db = MemDb::new(); + let set = default_test_validator_set(); + let (_, addr0) = random_serai_address_and_key(&mut OsRng); + let validator_data = vec![(addr0, 1u16)]; + let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); + let weights: HashMap = validator_data.iter().copied().collect(); + let set_info = new_test_set_info(&validator_data); + + // Both messages have the same sender; the slash uses the first message's sender + let first = make_signed_message_bytes(addr0.0); + let second = make_signed_message_bytes(addr0.0); + let block = Block { + header: BlockHeader { + parent: random_bytes_32(&mut OsRng), + transactions: random_bytes_32(&mut OsRng), + }, + transactions: vec![TributaryTransaction::Tendermint(TendermintTx::SlashEvidence( + Evidence::ConflictingMessages(first, second), + ))], + }; + + let mut txn = db.txn(); + let scan_block = new_scan_block(&mut txn, &set_info, &validators, 1, &weights); + scan_block.handle_block(1, block); + txn.commit(); + + assert!(TributaryDb::is_fatally_slashed(&db, set, addr0)); + } + + /// Verifies handle_block processes both Tendermint and Application transactions in one block. + #[test] + fn mixed_tendermint_and_application_txs() { + let mut db = MemDb::new(); + let set = default_test_validator_set(); + let (_, addr0) = random_serai_address_and_key(&mut OsRng); + let (_, addr1) = random_serai_address_and_key(&mut OsRng); + let validator_data = vec![(addr0, 1u16), (addr1, 1)]; + let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); + let weights: HashMap = validator_data.iter().copied().collect(); + let set_info = new_test_set_info(&validator_data); + + let batch_hash = [99; 32]; + let evidence_bytes = make_signed_message_bytes(addr0.0); + + let block = Block { + header: BlockHeader { + parent: random_bytes_32(&mut OsRng), + transactions: random_bytes_32(&mut OsRng), + }, + transactions: vec![ + // Tendermint SlashEvidence first + TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(Evidence::InvalidPrecommit( + evidence_bytes, + ))), + // Then an Application transaction + TributaryTransaction::Application(Transaction::Batch { hash: batch_hash }), + ], + }; + + let mut txn = db.txn(); + let scan_block = new_scan_block(&mut txn, &set_info, &validators, 2, &weights); + scan_block.handle_block(1, block); + txn.commit(); + + // Tendermint evidence slashed addr0 + assert!(TributaryDb::is_fatally_slashed(&db, set, addr0)); + // Application tx was still processed + let topic = expected_topic_after_start_cosigning(VariantSignId::Batch(batch_hash)); + assert!(TributaryDb::recognized(&db, set, topic)); + } } diff --git a/coordinator/tributary/src/tests/transaction.rs b/coordinator/tributary/src/tests/transaction.rs index fd959f570..76d19b706 100644 --- a/coordinator/tributary/src/tests/transaction.rs +++ b/coordinator/tributary/src/tests/transaction.rs @@ -1,53 +1,76 @@ use core::ops::Deref as _; +use std::io::{self, Cursor, Read, Write}; -use rand::{CryptoRng, RngCore, rngs::OsRng}; +use blake2::{digest::typenum::U32, Digest as _, Blake2b}; +use borsh::{BorshDeserialize, BorshSerialize}; +use rand::{CryptoRng, Rng, RngCore, rngs::OsRng}; -use ciphersuite::{group::Group as _, *}; -use dalek_ff_group::Ristretto; - -use serai_primitives::validator_sets::KeyShares; -use serai_primitives::test_helpers::{ - random_bytes_32, random_bytes_64, random_vec_u8, random_serai_address, random_block_hash, - random_genesis, +use ciphersuite::{ + group::{Group as _, GroupEncoding, ff::PrimeField}, + *, }; +use dalek_ff_group::Ristretto; use messages::sign::VariantSignId; - +use serai_primitives::{test_helpers::*, validator_sets::KeyShares}; use tributary_sdk::{ ReadWrite, transaction::{Transaction as TransactionTrait, TransactionError, TransactionKind}, }; -use crate::{db::Topic, tests::random_key}; -use crate::transaction::{SigningProtocolRound, Signed, Transaction}; - -/// Create a random serai-coordinator-tributary `Signed` from a tributary-sdk `Signed` -fn random_signed(rng: &mut R) -> Signed { - let signed = tributary_sdk::tests::random_signed(&mut *rng); - Signed { signer: signed.signer, signature: signed.signature } -} +use crate::{ + db::Topic, + tests::{random_key, random_signed}, + transaction::{Signed, SigningProtocolRound, Transaction}, +}; -/// One of each signed transaction kind with random values. -fn all_signed_transactions() -> Vec { +/// One of each signed transaction kind, and attempts: at 0, a random attempt, and u32::MAX. +fn all_signed_transactions_and_attempts() -> Vec { + let random_attempt = OsRng.gen_range(1u32 .. u32::MAX); vec![ + // RemoveParticipant Transaction::RemoveParticipant { participant: random_serai_address(&mut OsRng), signed: random_signed(&mut OsRng), }, + // DkgParticipation Transaction::DkgParticipation { participation: random_vec_u8(&mut OsRng), signed: random_signed(&mut OsRng), }, + // DkgConfirmationPreprocess Transaction::DkgConfirmationPreprocess { attempt: 0, preprocess: random_bytes_64(&mut OsRng), signed: random_signed(&mut OsRng), }, + Transaction::DkgConfirmationPreprocess { + attempt: random_attempt, + preprocess: random_bytes_64(&mut OsRng), + signed: random_signed(&mut OsRng), + }, + Transaction::DkgConfirmationPreprocess { + attempt: u32::MAX, + preprocess: random_bytes_64(&mut OsRng), + signed: random_signed(&mut OsRng), + }, + // DkgConfirmationShare Transaction::DkgConfirmationShare { attempt: 0, share: random_bytes_32(&mut OsRng), signed: random_signed(&mut OsRng), }, + Transaction::DkgConfirmationShare { + attempt: random_attempt, + share: random_bytes_32(&mut OsRng), + signed: random_signed(&mut OsRng), + }, + Transaction::DkgConfirmationShare { + attempt: u32::MAX, + share: random_bytes_32(&mut OsRng), + signed: random_signed(&mut OsRng), + }, + // Sign Preprocess Transaction::Sign { id: VariantSignId::Transaction(random_bytes_32(&mut OsRng)), attempt: 0, @@ -55,6 +78,21 @@ fn all_signed_transactions() -> Vec { data: vec![random_vec_u8(&mut OsRng)], signed: random_signed(&mut OsRng), }, + Transaction::Sign { + id: VariantSignId::Transaction(random_bytes_32(&mut OsRng)), + attempt: random_attempt, + round: SigningProtocolRound::Preprocess, + data: vec![random_vec_u8(&mut OsRng)], + signed: random_signed(&mut OsRng), + }, + Transaction::Sign { + id: VariantSignId::Transaction(random_bytes_32(&mut OsRng)), + attempt: u32::MAX, + round: SigningProtocolRound::Preprocess, + data: vec![random_vec_u8(&mut OsRng)], + signed: random_signed(&mut OsRng), + }, + // Sign Share Transaction::Sign { id: VariantSignId::Batch(random_bytes_32(&mut OsRng)), attempt: 0, @@ -62,6 +100,21 @@ fn all_signed_transactions() -> Vec { data: vec![random_vec_u8(&mut OsRng), random_vec_u8(&mut OsRng)], signed: random_signed(&mut OsRng), }, + Transaction::Sign { + id: VariantSignId::Batch(random_bytes_32(&mut OsRng)), + attempt: random_attempt, + round: SigningProtocolRound::Share, + data: vec![random_vec_u8(&mut OsRng), random_vec_u8(&mut OsRng)], + signed: random_signed(&mut OsRng), + }, + Transaction::Sign { + id: VariantSignId::Batch(random_bytes_32(&mut OsRng)), + attempt: u32::MAX, + round: SigningProtocolRound::Share, + data: vec![random_vec_u8(&mut OsRng), random_vec_u8(&mut OsRng)], + signed: random_signed(&mut OsRng), + }, + // SlashReport Transaction::SlashReport { slash_points: (0 .. 3).map(|_| OsRng.next_u32()).collect(), signed: random_signed(&mut OsRng), @@ -81,7 +134,7 @@ fn all_provided_transactions() -> Vec { /// One of each of all transaction kinds. fn all_transactions() -> Vec { - let mut txs = all_signed_transactions(); + let mut txs = all_signed_transactions_and_attempts(); txs.extend(all_provided_transactions()); txs } @@ -103,13 +156,9 @@ fn signing_protocol_round_nonce() { mod signed { use super::*; - use ciphersuite::group::{GroupEncoding, ff::PrimeField}; #[test] fn borsh_serialize_and_deserialize() { - use std::io::{self, Read, Write}; - use borsh::{BorshSerialize, BorshDeserialize}; - // Check the format of `Signed` { let signed = random_signed(&mut OsRng); @@ -139,7 +188,7 @@ mod signed { ); let deserialized: Signed = borsh::from_slice(&serialized).unwrap(); - let mut cursor = std::io::Cursor::new(&serialized); + let mut cursor = Cursor::new(&serialized); assert_eq!( deserialized, Signed::deserialize_reader(&mut cursor).unwrap(), @@ -192,7 +241,7 @@ mod signed { { let serialized = borsh::to_vec(&random_signed(&mut OsRng)).unwrap(); let truncated = &serialized[.. 5]; - let mut cursor = std::io::Cursor::new(truncated); + let mut cursor = Cursor::new(truncated); let result = Signed::deserialize_reader(&mut cursor); assert!(result.is_err(), "truncated data should fail to deserialize"); } @@ -201,7 +250,7 @@ mod signed { { let serialized = borsh::to_vec(&random_signed(&mut OsRng)).unwrap(); let signer_only = &serialized[.. 32]; - let mut cursor = std::io::Cursor::new(signer_only); + let mut cursor = Cursor::new(signer_only); let result = Signed::deserialize_reader(&mut cursor); assert!(result.is_err(), "signer-only data without signature should fail to deserialize"); } @@ -231,119 +280,175 @@ mod signed { mod transaction { use super::*; - #[test] - fn readwrite() { - for mut tx in all_transactions() { - let serialized = ReadWrite::serialize(&tx); + mod readwrite { + use super::*; - let expected = match &tx { - Transaction::RemoveParticipant { participant, signed } => { - let mut expected = vec![0u8]; - expected.extend(&participant.0); - expected.extend(borsh::to_vec(signed).unwrap()); - expected - } - Transaction::DkgParticipation { participation, signed } => { - let mut expected = vec![1u8]; - expected.extend(&(participation.len() as u32).to_le_bytes()); - expected.extend(participation); - expected.extend(borsh::to_vec(signed).unwrap()); - expected - } - Transaction::DkgConfirmationPreprocess { attempt, preprocess, signed } => { - let mut expected = vec![2u8]; - expected.extend(&attempt.to_le_bytes()); - expected.extend(preprocess); - expected.extend(borsh::to_vec(signed).unwrap()); - expected - } - Transaction::DkgConfirmationShare { attempt, share, signed } => { - let mut expected = vec![3u8]; - expected.extend(&attempt.to_le_bytes()); - expected.extend(share); - expected.extend(borsh::to_vec(signed).unwrap()); - expected - } - Transaction::Cosign { substrate_block_hash } => { - let mut expected = vec![4u8]; - expected.extend(&substrate_block_hash.0); - expected - } - Transaction::Cosigned { substrate_block_hash } => { - let mut expected = vec![5u8]; - expected.extend(&substrate_block_hash.0); - expected - } - Transaction::SubstrateBlock { hash } => { - let mut expected = vec![6u8]; - expected.extend(&hash.0); - expected - } - Transaction::Batch { hash } => { - let mut expected = vec![7u8]; - expected.extend(hash); - expected - } - Transaction::Sign { id, attempt, round, data, signed } => { - let mut expected = vec![8u8]; - // Independently encode VariantSignId - match id { - VariantSignId::Cosign(v) => { - expected.push(0u8); - expected.extend(&v.to_le_bytes()); - } - VariantSignId::Batch(h) => { - expected.push(1u8); - expected.extend(h); + #[test] + fn serialize_and_deserialize() { + for mut tx in all_transactions() { + let serialized = ReadWrite::serialize(&tx); + + let expected = match &tx { + Transaction::RemoveParticipant { participant, signed } => { + let mut expected = vec![0u8]; + expected.extend(&participant.0); + expected.extend(borsh::to_vec(signed).unwrap()); + expected + } + Transaction::DkgParticipation { participation, signed } => { + let mut expected = vec![1u8]; + expected.extend(&(participation.len() as u32).to_le_bytes()); + expected.extend(participation); + expected.extend(borsh::to_vec(signed).unwrap()); + expected + } + Transaction::DkgConfirmationPreprocess { attempt, preprocess, signed } => { + let mut expected = vec![2u8]; + expected.extend(&attempt.to_le_bytes()); + expected.extend(preprocess); + expected.extend(borsh::to_vec(signed).unwrap()); + expected + } + Transaction::DkgConfirmationShare { attempt, share, signed } => { + let mut expected = vec![3u8]; + expected.extend(&attempt.to_le_bytes()); + expected.extend(share); + expected.extend(borsh::to_vec(signed).unwrap()); + expected + } + Transaction::Cosign { substrate_block_hash } => { + let mut expected = vec![4u8]; + expected.extend(&substrate_block_hash.0); + expected + } + Transaction::Cosigned { substrate_block_hash } => { + let mut expected = vec![5u8]; + expected.extend(&substrate_block_hash.0); + expected + } + Transaction::SubstrateBlock { hash } => { + let mut expected = vec![6u8]; + expected.extend(&hash.0); + expected + } + Transaction::Batch { hash } => { + let mut expected = vec![7u8]; + expected.extend(hash); + expected + } + Transaction::Sign { id, attempt, round, data, signed } => { + let mut expected = vec![8u8]; + // Independently encode VariantSignId + match id { + VariantSignId::Cosign(v) => { + expected.push(0u8); + expected.extend(&v.to_le_bytes()); + } + VariantSignId::Batch(h) => { + expected.push(1u8); + expected.extend(h); + } + VariantSignId::SlashReport => { + expected.push(2u8); + } + VariantSignId::Transaction(h) => { + expected.push(3u8); + expected.extend(h); + } } - VariantSignId::SlashReport => { - expected.push(2u8); + expected.extend(&attempt.to_le_bytes()); + match round { + SigningProtocolRound::Preprocess => expected.push(0u8), + SigningProtocolRound::Share => expected.push(1u8), } - VariantSignId::Transaction(h) => { - expected.push(3u8); - expected.extend(h); + // Use the RoundPayloads type of Vec to fit for both rounds + expected.extend(&(data.len() as u32).to_le_bytes()); + for d in data { + expected.extend(&(d.len() as u32).to_le_bytes()); + expected.extend(d); } + expected.extend(borsh::to_vec(signed).unwrap()); + expected } - expected.extend(&attempt.to_le_bytes()); - match round { - SigningProtocolRound::Preprocess => expected.push(0u8), - SigningProtocolRound::Share => expected.push(1u8), - } - // Vec> - expected.extend(&(data.len() as u32).to_le_bytes()); - for d in data { - expected.extend(&(d.len() as u32).to_le_bytes()); - expected.extend(d); - } - expected.extend(borsh::to_vec(signed).unwrap()); - expected - } - Transaction::SlashReport { slash_points, signed } => { - let mut expected = vec![9u8]; - expected.extend(&(slash_points.len() as u32).to_le_bytes()); - for &p in slash_points { - expected.extend(&p.to_le_bytes()); + Transaction::SlashReport { slash_points, signed } => { + let mut expected = vec![9u8]; + expected.extend(&(slash_points.len() as u32).to_le_bytes()); + for &p in slash_points { + expected.extend(&p.to_le_bytes()); + } + expected.extend(borsh::to_vec(signed).unwrap()); + expected } - expected.extend(borsh::to_vec(signed).unwrap()); - expected - } - }; + }; - assert_eq!(serialized, expected, "format mismatch for {tx:?}"); + assert_eq!(serialized, expected, "format mismatch for {tx:?}"); - let deserialized = Transaction::read(&mut serialized.as_slice()).unwrap(); - assert_eq!(tx, deserialized); + let deserialized = Transaction::read(&mut serialized.as_slice()).unwrap(); + assert_eq!(tx, deserialized); - match tx.kind() { - TransactionKind::Signed(_, _) => { + if let TransactionKind::Signed(_, _) = tx.kind() { tx.sign(&mut OsRng, random_genesis(&mut OsRng), &random_key(&mut OsRng)); let serialized = ReadWrite::serialize(&tx); let deserialized = Transaction::read(&mut serialized.as_slice()).unwrap(); assert_eq!(tx, deserialized, "ReadWrite failed after signing for {tx:?}"); } - _ => {} } } + + /// Regression test: `Transaction::read` must use `deserialize_reader`, not `borsh::from_reader`. + /// + /// `borsh::from_reader` asserts the reader is exhausted after deserialization. When multiple + /// transactions are serialized into a single stream (as happens in `Block::read`), the first + /// `from_reader` call would fail because subsequent transactions' bytes remain in the reader. + #[test] + fn sequential_reads_from_shared_reader() { + let txs = all_transactions(); + + let mut buf = Vec::new(); + buf.extend(&u32::try_from(txs.len()).unwrap().to_le_bytes()); + for tx in &txs { + tx.write(&mut buf).unwrap(); + } + + let mut cursor = Cursor::new(&buf); + let mut count = [0u8; 4]; + cursor.read_exact(&mut count).unwrap(); + + for (i, expected) in txs.iter().enumerate() { + let actual = Transaction::read(&mut cursor) + .unwrap_or_else(|e| panic!("failed to read transaction {i} from shared reader: {e}")); + assert_eq!(&actual, expected, "transaction {i} mismatch after sequential read"); + } + + let mut leftover = [0u8; 1]; + assert_eq!( + cursor.read(&mut leftover).unwrap(), + 0, + "reader should be exhausted after reading all transactions" + ); + } + + /// Counterpart to `sequential_reads_from_shared_reader`: proves `borsh::from_reader` rejects + /// a reader that has leftover bytes, which is why `Transaction::read` must use + /// `deserialize_reader` instead. + #[test] + fn borsh_from_reader_rejects_shared_reader_with_trailing_bytes() { + let txs = all_transactions(); + + let mut buf = Vec::new(); + buf.extend(&u32::try_from(txs.len()).unwrap().to_le_bytes()); + for tx in &txs { + tx.write(&mut buf).unwrap(); + } + + let mut cursor = Cursor::new(&buf); + let mut count = [0u8; 4]; + cursor.read_exact(&mut count).unwrap(); + + // borsh::from_reader should fail because subsequent tx bytes remain after the first + let result: io::Result = borsh::from_reader(&mut cursor); + assert!(result.is_err(), "borsh::from_reader should reject a reader with trailing bytes"); + } } mod kind { @@ -362,7 +467,7 @@ mod transaction { out } - for mut tx in all_signed_transactions() { + for mut tx in all_signed_transactions_and_attempts() { tx.sign(&mut OsRng, genesis, &key); let (expected_order, expected_nonce) = match &tx { @@ -372,12 +477,7 @@ mod transaction { (order, 0) } Transaction::DkgParticipation { .. } => (borsh_label(b"DkgParticipation"), 0), - Transaction::DkgConfirmationPreprocess { attempt, .. } => { - let mut order = borsh_label(b"DkgConfirmation"); - order.extend(&attempt.to_le_bytes()); - (order, 1) - } - // NOTE: same order AND nonce as DkgConfirmationPreprocess + Transaction::DkgConfirmationPreprocess { attempt, .. } | Transaction::DkgConfirmationShare { attempt, .. } => { let mut order = borsh_label(b"DkgConfirmation"); order.extend(&attempt.to_le_bytes()); @@ -411,7 +511,7 @@ mod transaction { (order, nonce) } Transaction::SlashReport { .. } => (borsh_label(b"SlashReport"), 0), - other => panic!("all_signed_transactions returned non-signed tx: {other:?}"), + other => panic!("all_signed_transactions_and_attempts returned non-signed tx: {other:?}"), }; match tx.kind() { @@ -454,8 +554,6 @@ mod transaction { #[test] fn hash_format_and_determinism() { - use blake2::{digest::typenum::U32, Digest as _, Blake2b}; - let key = random_key(&mut OsRng); let genesis = random_genesis(&mut OsRng); @@ -497,7 +595,11 @@ mod transaction { let mut tx2 = tx.clone(); tx1.sign(&mut OsRng, genesis, &key); tx2.sign(&mut OsRng, genesis, &key); - assert_eq!(tx1.hash(), tx2.hash(), "Hashes should be equal despite different signatures"); + assert_eq!( + tx1.hash(), + tx2.hash(), + "Hashes should be equal despite different nonces and signatures" + ); } } } @@ -587,14 +689,11 @@ mod transaction { Transaction::DkgConfirmationShare { attempt, .. } => { Some(Topic::DkgConfirmation { attempt: *attempt, round: SigningProtocolRound::Share }) } - Transaction::Cosign { .. } | - Transaction::Cosigned { .. } | - Transaction::SubstrateBlock { .. } | - Transaction::Batch { .. } => None, Transaction::Sign { id, attempt, round, .. } => { Some(Topic::Sign { id: *id, attempt: *attempt, round: *round }) } Transaction::SlashReport { .. } => Some(Topic::SlashReport), + _ => None, }; assert_eq!(tx.topic(), expected, "Wrong topic for {tx:?}"); } @@ -610,7 +709,7 @@ mod transaction { let genesis = random_genesis(&mut OsRng); // Sets correct signer and produces verifiable signature - for mut tx in all_signed_transactions() { + for mut tx in all_signed_transactions_and_attempts() { tx.sign(&mut OsRng, genesis, &key); let sig_hash = tx.sig_hash(genesis); @@ -633,13 +732,14 @@ mod transaction { tx.sign(&mut OsRng, genesis, &key); let mut wrong_genesis = random_genesis(&mut OsRng); + // guaranteed to be the wrong genesis if wrong_genesis == genesis { wrong_genesis[0] ^= 1; } let wrong_challenge = tx.sig_hash(wrong_genesis); - if let TransactionKind::Signed(_, trib_signed) = tx.kind() { + if let TransactionKind::Signed(_, tributary_signed) = tx.kind() { assert_eq!( - trib_signed.signature.verify(trib_signed.signer, wrong_challenge), + tributary_signed.signature.verify(tributary_signed.signer, wrong_challenge), false, "Signature should not verify with wrong genesis" ); diff --git a/coordinator/tributary/src/transaction.rs b/coordinator/tributary/src/transaction.rs index b05b23537..5065f235c 100644 --- a/coordinator/tributary/src/transaction.rs +++ b/coordinator/tributary/src/transaction.rs @@ -94,12 +94,17 @@ impl Default for Signed { } } -/// The type used for preprocesses in the signing protocol. +/// The type used for preprocess payloads in the signing protocol. pub type Preprocess = [u8; 64]; -/// The type used for shares in the signing protocol. +/// The type used for share payloads in the signing protocol. pub type Share = [u8; 32]; -/// The type used for either shares or preprocesses in the signing protocol. -pub type GenericDataset = Vec>; +/// A generic, less constrained type used for either share or preprocess payloads +/// in the signing protocol. +pub type GenericSignPayload = Vec; +/// One serialized payload per key share held by the sending validator. +/// The outer Vec has one entry per key share; each inner Vec is a +/// serialized preprocess (64 bytes) or share (32 bytes), depending on `round`. +pub type RoundPayloads = Vec; /// The Tributary transaction definition used by Serai #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] @@ -218,8 +223,9 @@ pub enum Transaction { /// The data itself /// /// There will be `n` blobs of data where `n` is the amount of key shares the validator sending - /// this transaction has. - data: Vec>, + /// this transaction has, and each blob is a serialized preprocess (64 bytes) or share + /// (32 bytes), uniform across all entries as determined by `round`. + data: RoundPayloads, /// The transaction's signer and signature signed: Signed, }, @@ -235,7 +241,7 @@ pub enum Transaction { impl ReadWrite for Transaction { fn read(reader: &mut R) -> io::Result { - borsh::from_reader(reader) + borsh::BorshDeserialize::deserialize_reader(reader) } fn write(&self, writer: &mut W) -> io::Result<()> { From 55aab58554398ce0ea5167a56c1274b12def5855 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Tue, 7 Apr 2026 16:09:54 -0400 Subject: [PATCH 47/71] feat(coordinator/tributary): finish the coverage adding scanning block and scanning tributary tests --- coordinator/tributary/Cargo.toml | 2 +- coordinator/tributary/src/lib.rs | 5 +- coordinator/tributary/src/tests/db.rs | 16 +- coordinator/tributary/src/tests/mod.rs | 213 ++- coordinator/tributary/src/tests/scan_block.rs | 1385 ++++++++--------- .../tributary/src/tests/scan_tributary.rs | 229 +++ .../tributary/src/tests/transaction.rs | 127 +- coordinator/tributary/src/tests/tributary.rs | 78 + 8 files changed, 1176 insertions(+), 879 deletions(-) create mode 100644 coordinator/tributary/src/tests/scan_tributary.rs create mode 100644 coordinator/tributary/src/tests/tributary.rs diff --git a/coordinator/tributary/Cargo.toml b/coordinator/tributary/Cargo.toml index 1c7e8f0ee..805e61381 100644 --- a/coordinator/tributary/Cargo.toml +++ b/coordinator/tributary/Cargo.toml @@ -46,7 +46,7 @@ serai-env = { path = "../../common/env", version = "0.1.0" } env_logger = { version = "0.10", default-features = false, features = ["humantime"] } rand = { version = "0.8", default-features = false, features = ["std"] } rand_chacha = { version = "0.3", default-features = false, features = ["std"] } -proptest = "1" +tendermint = { package = "tendermint-machine", path = "../tributary-sdk/tendermint" } tributary-sdk = { path = "../tributary-sdk", features = ["tests"] } tokio = { version = "1", default-features = false, features = ["rt", "time", "macros", "rt-multi-thread"] } serai-task = { path = "../../common/task", features = ["test-helpers"] } diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index 0279287ec..23b44c375 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -655,10 +655,9 @@ impl ContinuallyRan for ScanTributaryTask { .unwrap_or((0, self.tributary.genesis())); let mut made_progress = false; - while let Some(next) = self.tributary.block_after(&last_block_hash) { - let block = self.tributary.block(&next).unwrap(); + while let Some(block_hash) = self.tributary.block_after(&last_block_hash) { + let block = self.tributary.block(&block_hash).unwrap(); let block_number = last_block_number + 1; - let block_hash = block.hash(); // Make sure we have all of the provided transactions for this block for tx in &block.transactions { diff --git a/coordinator/tributary/src/tests/db.rs b/coordinator/tributary/src/tests/db.rs index 13c234ba1..3bb1d94fd 100644 --- a/coordinator/tributary/src/tests/db.rs +++ b/coordinator/tributary/src/tests/db.rs @@ -12,7 +12,7 @@ use serai_primitives::{ }; use crate::{ - db::*, + db::{*, ProcessorMessages, DkgConfirmationMessages}, tests::*, transaction::{RoundPayloads, Preprocess, Share, SigningProtocolRound}, }; @@ -326,7 +326,8 @@ mod tributary_db { let block_hash1 = random_block_hash(&mut OsRng); let block_number1 = random_block_number(&mut OsRng); - let expected_topic = expected_topic_after_start_cosigning(VariantSignId::Cosign(block_number1)); + let expected_topic = + expected_initially_recognized_sign_topic(VariantSignId::Cosign(block_number1)); // Recognizes topic { @@ -383,7 +384,7 @@ mod tributary_db { assert!(TributaryDb::recognized( &txn, set, - expected_topic_after_start_cosigning(VariantSignId::Cosign(block_number2)) + expected_initially_recognized_sign_topic(VariantSignId::Cosign(block_number2)) )); // Previous topic also remains recognized assert!(TributaryDb::recognized(&txn, set, expected_topic)); @@ -408,7 +409,7 @@ mod tributary_db { all_topics_and_attempts().len() ); - for iteration in 0 .. 100 { + for _iteration in 0 .. 100 { for topic in all_topics_and_attempts() { // Fresh DB per topic so recognized state doesn't leak between iterations let mut db = MemDb::new(); @@ -420,7 +421,7 @@ mod tributary_db { reattemptable_topics.iter().copied().filter(|_| OsRng.next_u64() % 2 == 0).collect(); serai_env::trace!( - "iteration={iteration}, topic={topic:?}, block_number={block_number}, \ + "iteration={_iteration}, topic={topic:?}, block_number={block_number}, \ reattempts={reattempts:?}" ); @@ -452,8 +453,7 @@ mod tributary_db { } // No extra messages should remain in either queue - assert!(ProcessorMessages::try_recv(&mut txn, set).is_none()); - assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_none()); + assert_no_pending_messages(&mut txn, set); txn.commit(); } @@ -485,7 +485,7 @@ mod tributary_db { fn default_accumulate_setup( ) -> (ExternalValidatorSet, SeraiAddress, Vec, u16, u16) { let set = random_validator_set(&mut OsRng); - let (_, validators, _, total_weight) = setup_test_validators_and_weights(); + let (_, _, validators, _, total_weight) = setup_test_validators_and_weights_with_keys(); let validator = validators[0]; let validator_weight = 1; (set, validator, validators, total_weight, validator_weight) diff --git a/coordinator/tributary/src/tests/mod.rs b/coordinator/tributary/src/tests/mod.rs index 93344ab3c..6ad3cfd55 100644 --- a/coordinator/tributary/src/tests/mod.rs +++ b/coordinator/tributary/src/tests/mod.rs @@ -11,20 +11,21 @@ use rand_core::OsRng; use serai_primitives::{ address::SeraiAddress, test_helpers::{ - random_bytes_32, random_bytes_64, random_serai_address, random_vec_u8, + random_block_hash, random_bytes_32, random_bytes_64, random_serai_address, random_vec_u8, default_test_validator_set, }, }; -use tributary_sdk::P2p; +use tributary_sdk::{P2p, tendermint::TendermintBlock}; +use tendermint::{ + SignedMessage, Message, Data, + ext::{BlockNumber, RoundNumber}, +}; use zeroize::Zeroizing; use dkg::Participant; use serai_coordinator_substrate::NewSetInformation; -use crate::{ - db::{ActivelyCosigning, TributaryDb}, - transaction::{Signed, SigningProtocolRound, Transaction}, -}; +use crate::*; pub mod transaction; pub mod db; @@ -57,24 +58,48 @@ pub(crate) fn random_serai_address_and_key( (key, SeraiAddress(key.to_bytes())) } -use crate::db::Topic; - pub(crate) fn random_signed(rng: &mut R) -> Signed { let signed = tributary_sdk::tests::random_signed(&mut *rng); Signed { signer: signed.signer, signature: signed.signature } } -/// One of each signed transaction kind, using the provided `Signed` value. -pub(crate) fn all_signed_transactions_with(signed: Signed) -> Vec { +/// One of each signed transaction kind, and attempts: at 0, a random attempt, and u32::MAX. +pub(crate) fn all_signed_transactions_and_attempts(signed: Signed) -> Vec { + let random_attempt = OsRng.gen_range(1u32 .. u32::MAX); vec![ + // RemoveParticipant Transaction::RemoveParticipant { participant: random_serai_address(&mut OsRng), signed }, + // DkgParticipation Transaction::DkgParticipation { participation: random_vec_u8(&mut OsRng), signed }, + // DkgConfirmationPreprocess Transaction::DkgConfirmationPreprocess { attempt: 0, preprocess: random_bytes_64(&mut OsRng), signed, }, + Transaction::DkgConfirmationPreprocess { + attempt: random_attempt, + preprocess: random_bytes_64(&mut OsRng), + signed, + }, + Transaction::DkgConfirmationPreprocess { + attempt: u32::MAX, + preprocess: random_bytes_64(&mut OsRng), + signed, + }, + // DkgConfirmationShare Transaction::DkgConfirmationShare { attempt: 0, share: random_bytes_32(&mut OsRng), signed }, + Transaction::DkgConfirmationShare { + attempt: random_attempt, + share: random_bytes_32(&mut OsRng), + signed, + }, + Transaction::DkgConfirmationShare { + attempt: u32::MAX, + share: random_bytes_32(&mut OsRng), + signed, + }, + // Sign Preprocess Transaction::Sign { id: VariantSignId::Transaction(random_bytes_32(&mut OsRng)), attempt: 0, @@ -82,16 +107,85 @@ pub(crate) fn all_signed_transactions_with(signed: Signed) -> Vec { data: vec![random_vec_u8(&mut OsRng)], signed, }, + Transaction::Sign { + id: VariantSignId::Transaction(random_bytes_32(&mut OsRng)), + attempt: random_attempt, + round: SigningProtocolRound::Preprocess, + data: vec![random_vec_u8(&mut OsRng)], + signed, + }, + Transaction::Sign { + id: VariantSignId::Transaction(random_bytes_32(&mut OsRng)), + attempt: u32::MAX, + round: SigningProtocolRound::Preprocess, + data: vec![random_vec_u8(&mut OsRng)], + signed, + }, + // Sign Share + Transaction::Sign { + id: VariantSignId::Batch(random_bytes_32(&mut OsRng)), + attempt: 0, + round: SigningProtocolRound::Share, + data: vec![random_vec_u8(&mut OsRng), random_vec_u8(&mut OsRng)], + signed, + }, + Transaction::Sign { + id: VariantSignId::Batch(random_bytes_32(&mut OsRng)), + attempt: random_attempt, + round: SigningProtocolRound::Share, + data: vec![random_vec_u8(&mut OsRng), random_vec_u8(&mut OsRng)], + signed, + }, + Transaction::Sign { + id: VariantSignId::Batch(random_bytes_32(&mut OsRng)), + attempt: u32::MAX, + round: SigningProtocolRound::Share, + data: vec![random_vec_u8(&mut OsRng), random_vec_u8(&mut OsRng)], + signed, + }, + // SlashReport Transaction::SlashReport { slash_points: (0 .. 3).map(|_| OsRng.next_u32()).collect(), signed }, ] } +/// One of each provided transaction kind. +pub(crate) fn all_provided_transactions() -> Vec { + vec![ + Transaction::Cosign { substrate_block_hash: random_block_hash(&mut OsRng) }, + Transaction::Cosigned { substrate_block_hash: random_block_hash(&mut OsRng) }, + Transaction::SubstrateBlock { hash: random_block_hash(&mut OsRng) }, + Transaction::Batch { hash: random_block_hash(&mut OsRng).0 }, + ] +} + +/// One of each of all transaction kinds. +pub(crate) fn all_transactions() -> Vec { + let mut txs = all_signed_transactions_and_attempts(random_signed(&mut OsRng)); + txs.extend(all_provided_transactions()); + txs +} + +/// Assert that no messages remain in either the processor or DKG confirmation queues. +pub(crate) fn assert_no_pending_messages( + txn: &mut impl serai_db::DbTxn, + set: serai_primitives::validator_sets::ExternalValidatorSet, +) { + assert!( + crate::ProcessorMessages::try_recv(txn, set).is_none(), + "unexpected remaining ProcessorMessage", + ); + assert!( + crate::DkgConfirmationMessages::try_recv(txn, set).is_none(), + "unexpected remaining DkgConfirmationMessage", + ); +} + pub(crate) fn random_transaction_id() -> VariantSignId { VariantSignId::Transaction(random_bytes_32(&mut OsRng)) } /// The expected topic to be recognized after start_cosigning runs. -pub(crate) fn expected_topic_after_start_cosigning(id: VariantSignId) -> Topic { +pub(crate) fn expected_initially_recognized_sign_topic(id: VariantSignId) -> Topic { Topic::Sign { id, attempt: 0, round: SigningProtocolRound::Preprocess } } @@ -105,7 +199,8 @@ pub(crate) fn assert_cosigning_invariants( block_hash: serai_primitives::BlockHash, block_number: u64, ) { - let expected_topic = expected_topic_after_start_cosigning(VariantSignId::Cosign(block_number)); + let expected_topic = + expected_initially_recognized_sign_topic(VariantSignId::Cosign(block_number)); assert_eq!( ActivelyCosigning::get(txn, set), @@ -113,16 +208,65 @@ pub(crate) fn assert_cosigning_invariants( "ActivelyCosigning should be set to the block hash after start_cosigning" ); assert!( - TributaryDb::recognized(txn, set, expected_topic), + RecognizedTopics::recognized(txn, set, expected_topic), "cosign topic should be recognized after start_cosigning" ); assert_eq!( - TributaryDb::try_recv_topic_requiring_recognition(txn, set), + RecognizedTopics::try_recv_topic_requiring_recognition(txn, set), Some(expected_topic), "cosign topic should be queued for recognition after start_cosigning" ); } +/// Construct a borsh-encoded `SignedMessage` for `TendermintNetwork`. +pub(crate) fn make_signed_message_bytes(sender: [u8; 32]) -> Vec { + let msg = Message::<[u8; 32], TendermintBlock, [u8; 64]> { + sender, + block: BlockNumber(0), + round: RoundNumber(0), + data: Data::Prevote(None), + }; + borsh::to_vec(&SignedMessage { msg, sig: [0u8; 64] }).unwrap() +} + +/// Drain expected messages produced by the given transactions, then assert both queues are empty. +/// +/// Some transactions produce messages on first submission (e.g. DkgParticipation, Cosign). +/// This function drains those expected messages before calling `assert_no_pending_messages`. +pub(crate) fn assert_block_side_effects( + txn: &mut impl serai_db::DbTxn, + set: serai_primitives::validator_sets::ExternalValidatorSet, + transactions: &[tributary_sdk::Transaction], +) { + for tx in transactions { + match tx { + tributary_sdk::Transaction::Application(app_tx) => match app_tx { + Transaction::DkgParticipation { .. } => { + assert!( + crate::ProcessorMessages::try_recv(txn, set).is_some(), + "DkgParticipation should produce a processor message", + ); + } + Transaction::Cosign { .. } => { + assert!( + crate::ProcessorMessages::try_recv(txn, set).is_some(), + "Cosign should produce a processor message", + ); + } + Transaction::SlashReport { .. } => { + assert!( + RecognizedTopics::recognized(txn, set, Topic::SlashReport), + "SlashReport topic should be recognized", + ); + } + _ => {} + }, + tributary_sdk::Transaction::Tendermint(_) => {} + } + } + assert_no_pending_messages(txn, set); +} + pub(crate) fn new_test_set_info(validators: &[(SeraiAddress, u16)]) -> NewSetInformation { let mut participant_indexes = HashMap::new(); let mut reverse_lookup = HashMap::new(); @@ -150,27 +294,10 @@ pub(crate) fn new_test_set_info(validators: &[(SeraiAddress, u16)]) -> NewSetInf } } -/// Common test setup: 3 random validators each with weight 1, total_weight = 3. -pub(crate) fn setup_test_validators_and_weights( -) -> (Vec<(SeraiAddress, u16)>, Vec, HashMap, u16) { - let validator_data = vec![ - (random_serai_address(&mut OsRng), 1u16), - (random_serai_address(&mut OsRng), 1), - (random_serai_address(&mut OsRng), 1), - ]; - let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); - - let mut weights = HashMap::new(); - for (address, weight) in &validator_data { - weights.insert(*address, *weight); - } - - (validator_data, validators, weights, 3) -} - -/// Like `setup_test_validators_and_weights`, but each validator also has a real key -/// so tests can produce valid `Signed` values via `new_signed`. -pub(crate) fn setup_test_validators_and_weights_with_keys() -> ( +/// Generate `n` random validators (weight 1 each) with keys, returning all derived collections. +pub(crate) fn setup_n_validators_with_keys( + n: usize, +) -> ( Vec<(RistrettoPoint, SeraiAddress)>, Vec<(SeraiAddress, u16)>, Vec, @@ -178,11 +305,23 @@ pub(crate) fn setup_test_validators_and_weights_with_keys() -> ( u16, ) { let keys_addrs: Vec<(RistrettoPoint, SeraiAddress)> = - (0 .. 3).map(|_| random_serai_address_and_key(&mut OsRng)).collect(); + (0 .. n).map(|_| random_serai_address_and_key(&mut OsRng)).collect(); let validator_data: Vec<(SeraiAddress, u16)> = keys_addrs.iter().map(|(_, addr)| (*addr, 1u16)).collect(); let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); let weights: HashMap = validator_data.iter().copied().collect(); + let total_weight = n as u16; - (keys_addrs, validator_data, validators, weights, 3) + (keys_addrs, validator_data, validators, weights, total_weight) +} + +/// Common test setup with 3 random validators each with weight 1, total_weight = 3. +pub(crate) fn setup_test_validators_and_weights_with_keys() -> ( + Vec<(RistrettoPoint, SeraiAddress)>, + Vec<(SeraiAddress, u16)>, + Vec, + HashMap, + u16, +) { + setup_n_validators_with_keys(3) } diff --git a/coordinator/tributary/src/tests/scan_block.rs b/coordinator/tributary/src/tests/scan_block.rs index 3fc9d2b51..cc399285d 100644 --- a/coordinator/tributary/src/tests/scan_block.rs +++ b/coordinator/tributary/src/tests/scan_block.rs @@ -1,44 +1,21 @@ use core::marker::PhantomData; -use std::collections::HashMap; -use rand::RngCore; -use rand_core::OsRng; -use serai_primitives::test_helpers::{ - random_block_hash, random_block_number, random_bytes_32, random_serai_address, random_vec_of_len, -}; -use ciphersuite::{group::GroupEncoding, WrappedGroup}; -use dalek_ff_group::{Ristretto, RistrettoPoint}; use schnorr::SchnorrSignature; -use serai_primitives::address::SeraiAddress; - -use messages::sign::VariantSignId; - -use dkg::Participant; - use serai_db::{Db, DbTxn, MemDb}; - +use serai_primitives::test_helpers::{random_block_hash, random_block_number, random_vec_of_len}; use serai_cosign_types::CosignIntent; -use serai_coordinator_substrate::NewSetInformation; - use tributary_sdk::{ Block, BlockHeader, Transaction as TributaryTransaction, Evidence, tendermint::tx::TendermintTx, }; use crate::{ - CosignIntents, DkgConfirmationMessages, ProcessorMessages, ScanBlock, SubstrateBlockPlans, - db::{ - AccumulatedWeight, ActivelyCosigning, CosignIntents as DbCosignIntents, - LatestSubstrateBlockToCosign, Topic, TributaryDb, - }, - transaction::{SigningProtocolRound, Signed, Transaction}, - tests::{ - all_signed_transactions_with, assert_cosigning_invariants, MockP2p, default_test_validator_set, - expected_topic_after_start_cosigning, setup_test_validators_and_weights, - setup_test_validators_and_weights_with_keys, random_serai_address_and_key, new_test_set_info, - }, + CosignIntents, DkgConfirmationMessages, ProcessorMessages, RecognizedTopics, ScanBlock, + SubstrateBlockPlans, db::CosignIntents as DbCosignIntents, }; +use super::*; + fn new_scan_block<'a, TDT: DbTxn>( txn: &'a mut TDT, set_info: &'a NewSetInformation, @@ -70,7 +47,8 @@ fn new_signed(signer: RistrettoPoint) -> Signed { #[test] fn potentially_start_cosign() { - let (validator_data, validators, weights, total_weight) = setup_test_validators_and_weights(); + let (_, validator_data, validators, weights, total_weight) = + setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); let set = set_info.set; @@ -83,7 +61,7 @@ fn potentially_start_cosign() { let mut txn = db.txn(); TributaryDb::start_cosigning(&mut txn, set, initial_block_hash, OsRng.next_u64()); let new_block_hash = random_block_hash(&mut OsRng); - LatestSubstrateBlockToCosign::set(&mut txn, set, &new_block_hash); + TributaryDb::set_latest_substrate_block_to_cosign(&mut txn, set, new_block_hash); txn.commit(); } @@ -94,7 +72,7 @@ fn potentially_start_cosign() { } // Did not replace initial_block_hash for new_block_hash - assert_eq!(ActivelyCosigning::get(&mut txn, set), Some(initial_block_hash)); + assert_eq!(TributaryDb::actively_cosigning(&mut txn, set), Some(initial_block_hash)); } // No TributaryDb::latest_substrate_block_to_cosign block: no-op @@ -105,7 +83,7 @@ fn potentially_start_cosign() { let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.potentially_start_cosign(); } - assert!(ActivelyCosigning::get(&mut txn, set).is_none()); + assert!(TributaryDb::actively_cosigning(&mut txn, set).is_none()); } // Already cosigned: no-op @@ -115,7 +93,7 @@ fn potentially_start_cosign() { { let mut txn = db.txn(); - LatestSubstrateBlockToCosign::set(&mut txn, set, &initial_block_hash); + TributaryDb::set_latest_substrate_block_to_cosign(&mut txn, set, initial_block_hash); TributaryDb::mark_cosigned(&mut txn, set, initial_block_hash); txn.commit(); } @@ -126,7 +104,7 @@ fn potentially_start_cosign() { scan_block.potentially_start_cosign(); } - assert!(ActivelyCosigning::get(&mut txn, set).is_none()); + assert!(TributaryDb::actively_cosigning(&mut txn, set).is_none()); } // Ready to cosign: starts cosigning and sends processor message @@ -144,7 +122,7 @@ fn potentially_start_cosign() { { let mut txn = db.txn(); - LatestSubstrateBlockToCosign::set(&mut txn, set, &block_hash); + TributaryDb::set_latest_substrate_block_to_cosign(&mut txn, set, block_hash); CosignIntents::provide(&mut txn, set, &intent); txn.commit(); } @@ -167,7 +145,7 @@ fn potentially_start_cosign() { { let mut txn = db.txn(); - LatestSubstrateBlockToCosign::set(&mut txn, set, &block_hash); + TributaryDb::set_latest_substrate_block_to_cosign(&mut txn, set, block_hash); let new_block_hash = random_block_hash(&mut OsRng); DbCosignIntents::set( @@ -202,7 +180,8 @@ fn potentially_start_cosign() { #[test] fn accumulate_dkg_confirmation() { - let (validator_data, validators, weights, total_weight) = setup_test_validators_and_weights(); + let (_, validator_data, validators, weights, total_weight) = + setup_test_validators_and_weights_with_keys(); let (v1, v2, v3) = (validators[0], validators[1], validators[2]); let set_info = new_test_set_info(&validator_data); let set = set_info.set; @@ -289,7 +268,8 @@ mod handle_application_tx { #[test] fn dont_handle_signed_kind_from_fatally_slashed() { let set = default_test_validator_set(); - let (validator_data, validators, weights, total_weight) = setup_test_validators_and_weights(); + let (_, validator_data, validators, weights, total_weight) = + setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); let default_signer = SeraiAddress(Signed::default().signer().to_bytes()); @@ -301,11 +281,14 @@ mod handle_application_tx { txn.commit(); } - for tx in all_signed_transactions_with(Signed::default()) { + for tx in all_signed_transactions_and_attempts(Signed::default()) { let mut txn = db.txn(); - let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.handle_application_tx(random_block_number(&mut OsRng), tx.clone()); + { + let mut scan_block = + new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.handle_application_tx(random_block_number(&mut OsRng), tx.clone()); + } assert!( ProcessorMessages::try_recv(&mut txn, set).is_none(), @@ -317,7 +300,8 @@ mod handle_application_tx { #[test] fn remove_participant() { let set = default_test_validator_set(); - let (validator_data, validators, weights, total_weight) = setup_test_validators_and_weights(); + let (_, validator_data, validators, weights, total_weight) = + setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); let default_signer = SeraiAddress(Signed::default().signer().to_bytes()); @@ -339,15 +323,11 @@ mod handle_application_tx { // Valid RemoveParticipant accumulates weight and eventually crosses threshold { - // All validators have real keys so they can sign - let (key0, addr0) = random_serai_address_and_key(&mut OsRng); - let (key1, addr1) = random_serai_address_and_key(&mut OsRng); - let (key2, addr2) = random_serai_address_and_key(&mut OsRng); - - let validator_data = vec![(addr0, 1u16), (addr1, 1), (addr2, 1)]; - let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); - let weights: HashMap = validator_data.iter().copied().collect(); + let (keys_addrs, validator_data, validators, weights, _) = setup_n_validators_with_keys(3); let set_info = new_test_set_info(&validator_data); + let (key0, addr0) = keys_addrs[0]; + let (key1, _) = keys_addrs[1]; + let (key2, _) = keys_addrs[2]; let target = addr0; let block_number = random_block_number(&mut OsRng); @@ -364,7 +344,11 @@ mod handle_application_tx { ); } assert!( - TributaryDb::recognized(&mut txn, set, Topic::RemoveParticipant { participant: target }), + RecognizedTopics::recognized( + &mut txn, + set, + Topic::RemoveParticipant { participant: target } + ), "RemoveParticipant topic should be recognized after handling the tx" ); assert!( @@ -425,45 +409,40 @@ mod handle_application_tx { let set_info = new_test_set_info(&validator_data); let (key0, key1, key2) = (keys_addrs[0].0, keys_addrs[1].0, keys_addrs[2].0); + let mut db = MemDb::new(); + let mut txn = db.txn(); + let block_number = random_block_number(&mut OsRng); + // Below threshold: no DkgConfirmationMessages sent { - let mut db = MemDb::new(); - let mut txn = db.txn(); let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_application_tx( - random_block_number(&mut OsRng), + block_number, Transaction::DkgConfirmationPreprocess { - attempt: OsRng.next_u32(), - preprocess: [1u8; 64], + attempt: 0, + preprocess: random_bytes_64(&mut OsRng), signed: new_signed(key0), }, ); - - assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_none()); } + assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_none()); // Threshold crossed: sends DkgConfirmationMessages (Preprocesses) { - let mut db = MemDb::new(); - let mut txn = db.txn(); - { - let mut scan_block = - new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - for (key, preprocess) in [(key0, [1u8; 64]), (key1, [2u8; 64]), (key2, [3u8; 64])] { - scan_block.handle_application_tx( - 1, - Transaction::DkgConfirmationPreprocess { - attempt: 0, - preprocess, - signed: new_signed(key), - }, - ); - } + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + for key in [key1, key2] { + scan_block.handle_application_tx( + block_number, + Transaction::DkgConfirmationPreprocess { + attempt: 0, + preprocess: random_bytes_64(&mut OsRng), + signed: new_signed(key), + }, + ); } - - assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_some()); } + assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_some()); } #[test] @@ -473,76 +452,95 @@ mod handle_application_tx { setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); let (key0, addr0) = keys_addrs[0]; + let (key1, key2) = (keys_addrs[1].0, keys_addrs[2].0); // Share without preceding preprocess participation -> fatal slash - // (the accumulate preceding_topic check slashes the signer) { let mut db = MemDb::new(); let mut txn = db.txn(); let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_application_tx( - 1, + random_block_number(&mut OsRng), Transaction::DkgConfirmationShare { attempt: 0, - share: [10u8; 32], + share: random_bytes_32(&mut OsRng), signed: new_signed(key0), }, ); - assert!(TributaryDb::is_fatally_slashed(&mut txn, set, addr0)); + assert!( + TributaryDb::is_fatally_slashed(&mut txn, set, addr0), + "share without preceding preprocess should fatally slash" + ); } - } - - /// Verify that the full preprocess->share flow works for DkgConfirmation. - /// - /// Previously, this panicked because `accumulate<[u8; 32]>` (share) used typed deserialization - /// on the preceding preprocess topic stored as `[u8; 64]`. Fixed by using a raw key-existence - /// check for the preceding topic instead. - #[test] - fn dkg_confirmation_preprocess_then_share_flow() { - let set = default_test_validator_set(); - let (keys_addrs, validator_data, validators, weights, total_weight) = - setup_test_validators_and_weights_with_keys(); - let set_info = new_test_set_info(&validator_data); - let (key0, key1, key2) = (keys_addrs[0].0, keys_addrs[1].0, keys_addrs[2].0); + // Full preprocess->share flow let mut db = MemDb::new(); let mut txn = db.txn(); + let block_number = random_block_number(&mut OsRng); // All 3 validators submit preprocesses (threshold crossed -> DkgConfirmationMessages sent) { let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - for (key, preprocess) in [(key0, [1u8; 64]), (key1, [2u8; 64]), (key2, [3u8; 64])] { + for key in [key0, key1, key2] { scan_block.handle_application_tx( - 1, + block_number, Transaction::DkgConfirmationPreprocess { attempt: 0, - preprocess, + preprocess: random_bytes_64(&mut OsRng), signed: new_signed(key), }, ); } } - assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_some()); + assert!( + DkgConfirmationMessages::try_recv(&mut txn, set).is_some(), + "preprocesses crossing threshold should produce DkgConfirmationMessages" + ); + + // Below threshold: no DkgConfirmationMessages sent + { + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.handle_application_tx( + block_number, + Transaction::DkgConfirmationShare { + attempt: 0, + share: random_bytes_32(&mut OsRng), + signed: new_signed(key0), + }, + ); + } + assert!( + DkgConfirmationMessages::try_recv(&mut txn, set).is_none(), + "single share should not produce DkgConfirmationMessages" + ); - // All 3 validators submit shares (threshold crossed -> DkgConfirmationMessages sent) + // Threshold crossed: sends DkgConfirmationMessages (Shares) { let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - for (key, share) in [(key0, [10u8; 32]), (key1, [20u8; 32]), (key2, [30u8; 32])] { + for key in [key1, key2] { scan_block.handle_application_tx( - 1, - Transaction::DkgConfirmationShare { attempt: 0, share, signed: new_signed(key) }, + block_number, + Transaction::DkgConfirmationShare { + attempt: 0, + share: random_bytes_32(&mut OsRng), + signed: new_signed(key), + }, ); } } - assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_some()); + assert!( + DkgConfirmationMessages::try_recv(&mut txn, set).is_some(), + "shares crossing threshold should produce DkgConfirmationMessages" + ); } #[test] fn cosign() { let set = default_test_validator_set(); - let (validator_data, validators, weights, total_weight) = setup_test_validators_and_weights(); + let (_, validator_data, validators, weights, total_weight) = + setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); let block_hash = random_block_hash(&mut OsRng); @@ -567,10 +565,13 @@ mod handle_application_tx { let mut txn = db.txn(); let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.handle_application_tx(1, Transaction::Cosign { substrate_block_hash: block_hash }); + scan_block.handle_application_tx( + random_block_number(&mut OsRng), + Transaction::Cosign { substrate_block_hash: block_hash }, + ); - assert_eq!(LatestSubstrateBlockToCosign::get(&mut txn, set), Some(block_hash)); - assert_eq!(ActivelyCosigning::get(&mut txn, set), Some(block_hash)); + assert_eq!(TributaryDb::latest_substrate_block_to_cosign(&mut txn, set), Some(block_hash)); + assert_eq!(TributaryDb::actively_cosigning(&mut txn, set), Some(block_hash)); assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); } @@ -582,39 +583,44 @@ mod handle_application_tx { { let mut txn = db.txn(); - TributaryDb::start_cosigning(&mut txn, set, first_hash, 1); + TributaryDb::start_cosigning(&mut txn, set, first_hash, OsRng.next_u64()); txn.commit(); } let mut txn = db.txn(); let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block - .handle_application_tx(1, Transaction::Cosign { substrate_block_hash: second_hash }); + scan_block.handle_application_tx( + random_block_number(&mut OsRng), + Transaction::Cosign { substrate_block_hash: second_hash }, + ); - assert_eq!(LatestSubstrateBlockToCosign::get(&mut txn, set), Some(second_hash)); - assert_eq!(ActivelyCosigning::get(&mut txn, set), Some(first_hash)); + assert_eq!(TributaryDb::latest_substrate_block_to_cosign(&mut txn, set), Some(second_hash)); + assert_eq!(TributaryDb::actively_cosigning(&mut txn, set), Some(first_hash)); } } #[test] fn cosigned() { let set = default_test_validator_set(); - let (validator_data, validators, weights, total_weight) = setup_test_validators_and_weights(); + let (_, validator_data, validators, weights, total_weight) = + setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); // Marks block as cosigned { let mut db = MemDb::new(); - let block_hash = random_block_hash(&mut OsRng); let mut txn = db.txn(); + let block_hash = random_block_hash(&mut OsRng); - assert!(!TributaryDb::cosigned(&mut txn, set, block_hash)); - - let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - - scan_block - .handle_application_tx(1, Transaction::Cosigned { substrate_block_hash: block_hash }); + { + let mut scan_block = + new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.handle_application_tx( + random_block_number(&mut OsRng), + Transaction::Cosigned { substrate_block_hash: block_hash }, + ); + } assert!(TributaryDb::cosigned(&mut txn, set, block_hash)); } @@ -626,19 +632,22 @@ mod handle_application_tx { { let mut txn = db.txn(); - TributaryDb::start_cosigning(&mut txn, set, block_hash, 1); + TributaryDb::start_cosigning(&mut txn, set, block_hash, OsRng.next_u64()); txn.commit(); } let mut txn = db.txn(); - assert_eq!(ActivelyCosigning::get(&mut txn, set), Some(block_hash)); + assert_eq!(TributaryDb::actively_cosigning(&mut txn, set), Some(block_hash)); - let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - - scan_block - .handle_application_tx(1, Transaction::Cosigned { substrate_block_hash: block_hash }); - - assert!(ActivelyCosigning::get(&mut txn, set).is_none()); + { + let mut scan_block = + new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.handle_application_tx( + random_block_number(&mut OsRng), + Transaction::Cosigned { substrate_block_hash: block_hash }, + ); + } + assert!(TributaryDb::actively_cosigning(&mut txn, set).is_none()); } // Does not finish active cosign when block doesn't match @@ -649,17 +658,20 @@ mod handle_application_tx { { let mut txn = db.txn(); - TributaryDb::start_cosigning(&mut txn, set, active_hash, 1); + TributaryDb::start_cosigning(&mut txn, set, active_hash, OsRng.next_u64()); txn.commit(); } let mut txn = db.txn(); - let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - - scan_block - .handle_application_tx(1, Transaction::Cosigned { substrate_block_hash: other_hash }); - - assert_eq!(ActivelyCosigning::get(&mut txn, set), Some(active_hash)); + { + let mut scan_block = + new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.handle_application_tx( + random_block_number(&mut OsRng), + Transaction::Cosigned { substrate_block_hash: other_hash }, + ); + } + assert_eq!(TributaryDb::actively_cosigning(&mut txn, set), Some(active_hash)); assert!(TributaryDb::cosigned(&mut txn, set, other_hash)); } } @@ -667,12 +679,13 @@ mod handle_application_tx { #[test] fn substrate_block() { let set = default_test_validator_set(); - let (validator_data, validators, weights, total_weight) = setup_test_validators_and_weights(); + let (_, validator_data, validators, weights, total_weight) = + setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); let mut db = MemDb::new(); let block_hash = random_block_hash(&mut OsRng); - let plans = vec![[10u8; 32], [20u8; 32]]; + let plans = vec![random_bytes_32(&mut OsRng), random_bytes_32(&mut OsRng)]; { let mut txn = db.txn(); @@ -681,32 +694,390 @@ mod handle_application_tx { } let mut txn = db.txn(); - let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - - scan_block.handle_application_tx(1, Transaction::SubstrateBlock { hash: block_hash }); + { + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.handle_application_tx( + random_block_number(&mut OsRng), + Transaction::SubstrateBlock { hash: block_hash }, + ); + } for plan in &plans { - let topic = expected_topic_after_start_cosigning(VariantSignId::Transaction(*plan)); - assert!(AccumulatedWeight::get(&mut txn, set, topic).is_some()); + let topic = expected_initially_recognized_sign_topic(VariantSignId::Transaction(*plan)); + assert!(RecognizedTopics::recognized(&mut txn, set, topic)); } } #[test] fn batch() { let set = default_test_validator_set(); - let (validator_data, validators, weights, total_weight) = setup_test_validators_and_weights(); + let (_, validator_data, validators, weights, total_weight) = + setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); let mut db = MemDb::new(); - let batch_hash = [42u8; 32]; + let batch_hash = random_bytes_32(&mut OsRng); let mut txn = db.txn(); - let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + { + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.handle_application_tx( + random_block_number(&mut OsRng), + Transaction::Batch { hash: batch_hash }, + ); + } + + let topic = expected_initially_recognized_sign_topic(VariantSignId::Batch(batch_hash)); + assert!(RecognizedTopics::recognized(&mut txn, set, topic)); + } + + mod slash_report { + use super::*; + + #[test] + fn odd_slash_points() { + let set = default_test_validator_set(); + let (keys_addrs, validator_data, validators, weights, total_weight) = + setup_test_validators_and_weights_with_keys(); + let set_info = new_test_set_info(&validator_data); + let (key0, addr0) = keys_addrs[0]; + + // Wrong length: 3 validators but mismatched slash points -> fatal slash + for wrong_points in [ + vec![], + // 1 or 2 + vec![0; 1 + (OsRng.next_u32() as usize % 2)], + // 4..100 + vec![0; 4 + (OsRng.next_u32() as usize % 97)], + ] { + let mut db = MemDb::new(); + let mut txn = db.txn(); + + { + let mut scan_block = + new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.handle_application_tx( + random_block_number(&mut OsRng), + Transaction::SlashReport { + slash_points: wrong_points.clone(), + signed: new_signed(key0), + }, + ); + } + assert!( + TributaryDb::is_fatally_slashed(&mut txn, set, addr0), + "expected fatal slash for slash_points length {}, but wasn't slashed", + wrong_points.len() + ); + } + + let (keys_addrs, validator_data, validators, weights, total_weight) = + setup_n_validators_with_keys(4); + let set_info = new_test_set_info(&validator_data); + + let mut db = MemDb::new(); + let mut txn = db.txn(); + + // Valid length: accumulates weight + { + let mut scan_block = + new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.handle_application_tx( + random_block_number(&mut OsRng), + Transaction::SlashReport { + slash_points: vec![0, 0, 0, 100], + signed: new_signed(keys_addrs[0].0), + }, + ); + } + assert!(RecognizedTopics::recognized(&mut txn, set, Topic::SlashReport)); + assert!(ProcessorMessages::try_recv(&mut txn, set).is_none()); + + // Threshold crossed: computes median slash report and sends SignSlashReport message + { + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, 4, &weights); + for key in [keys_addrs[1].0, keys_addrs[2].0] { + scan_block.handle_application_tx( + random_block_number(&mut OsRng), + // Each reporter says: first 3 validators have 0 points, 4th has 100 + Transaction::SlashReport { slash_points: vec![0, 0, 0, 100], signed: new_signed(key) }, + ); + } + } + let sign_topic = expected_initially_recognized_sign_topic(VariantSignId::SlashReport); + assert!(RecognizedTopics::recognized(&mut txn, set, sign_topic,)); + assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); + } + + /// Exercises the even-length median branch (`(this_validator.len() / 2) - 1`) in + /// the SlashReport handler by using 5 validators where `required_participation = 4` (even). + #[test] + fn even_slash_points() { + let set = default_test_validator_set(); + + // 5 validators of weight 1 -> required_participation = 5*2/3+1 = 4 + let (keys_addrs, validator_data, validators, weights, _) = setup_n_validators_with_keys(5); + let set_info = new_test_set_info(&validator_data); + + let mut db = MemDb::new(); + let mut txn = db.txn(); + + // 4 reporters submit different opinions about validator 4 (index 4). + // Reports (for all 5 validator positions): + // reporter 0: [0, 0, 0, 0, 10] + // reporter 1: [0, 0, 0, 0, 20] + // reporter 2: [0, 0, 0, 0, 30] + // reporter 3: [0, 0, 0, 0, 40] + // + // Sorted values for validator 4: [10, 20, 30, 40] (len=4, even) + // Even median index: (4 / 2) - 1 = 1 -> median = 20 + // + // f = (5-1)/3 = 1, amortization baseline = sorted_medians[5-1-1] = sorted_medians[3] = 0 + // amortized: [0, 0, 0, 0, 20]. Non-zero entries: [20] for validator 4. + let slash_reports = vec![ + vec![0u32, 0, 0, 0, 10], + vec![0, 0, 0, 0, 20], + vec![0, 0, 0, 0, 30], + vec![0, 0, 0, 0, 40], + ]; + + { + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, 5, &weights); + for (i, report) in slash_reports.iter().enumerate() { + let (key, _) = keys_addrs[i]; + scan_block.handle_application_tx( + random_block_number(&mut OsRng), + Transaction::SlashReport { slash_points: report.clone(), signed: new_signed(key) }, + ); + } + } + + // Threshold was crossed with 4 reporters (even) -> even median branch exercised. + // Verify the signing topic was recognized and a message was sent. + let sign_topic = expected_initially_recognized_sign_topic(VariantSignId::SlashReport); + assert!( + RecognizedTopics::recognized(&mut txn, set, sign_topic), + "SlashReport sign topic should be recognized" + ); + + let msg = ProcessorMessages::try_recv(&mut txn, set); + assert!(msg.is_some(), "expected SignSlashReport processor message"); + } + + mod fuzz_slash_report { + use super::*; + + /// Independently compute the expected slash report that `handle_application_tx` should + /// produce when `DataSet::Participating` is reached, mirroring the production logic. + /// + /// Returns `None` if `f == 0` (the slash report would be empty and nothing is sent). + fn expected_slash_report(num_validators: usize, reports: &[Vec]) -> Option> { + let f = (num_validators - 1) / 3; + if f == 0 { + return None; + } + + // Compute the median for each validator position across all reporters + let mut medians = Vec::with_capacity(num_validators); + for i in 0 .. num_validators { + let mut values: Vec = reports.iter().map(|r| r[i]).collect(); + values.sort_unstable(); + let median_index = + if (values.len() % 2) == 1 { values.len() / 2 } else { (values.len() / 2) - 1 }; + medians.push(values[median_index]); + } + + // Find worst validator in the supermajority and amortize + let mut sorted = medians.clone(); + sorted.sort_unstable(); + let amortization = sorted[num_validators - f - 1]; + + let amortized: Vec = medians.iter().map(|p| p.saturating_sub(amortization)).collect(); + + // Filter to non-zero entries only + let result: Vec = amortized.into_iter().filter(|&p| p > 0).collect(); + Some(result) + } + + /// Generate a random slash point value drawn from a weighted distribution: + /// ~30% zero, ~20% small (1..100), ~20% medium (100..10_000), ~30% fatal (u32::MAX). + /// The high fatal weight ensures the median across reporters frequently lands on u32::MAX, + /// exercising the Slash::Fatal branch after amortization. + fn random_slash_point(rng: &mut impl Rng) -> u32 { + match rng.gen_range(0u8 .. 10) { + 0 ..= 2 => 0, + 3 ..= 4 => rng.gen_range(1 .. 100), + 5 ..= 6 => rng.gen_range(100 .. 10_000), + _ => u32::MAX, + } + } + + /// Generate `count` slash report vectors, each of length `num_validators`. + fn random_slash_reports( + rng: &mut impl Rng, + num_validators: usize, + count: usize, + ) -> Vec> { + (0 .. count) + .map(|_| (0 .. num_validators).map(|_| random_slash_point(rng)).collect()) + .collect() + } + + /// Fuzz the SlashReport -> Participating path with randomized slash point vectors. + /// + /// Uses 4 validators (f=1) so the threshold-crossing path is reachable. + /// All 4 submit identical reports so the median equals the input. + #[test] + fn fuzz_slash_report_participating_4_validators() { + for _ in 0 .. 200 { + let set = default_test_validator_set(); + + let (keys_addrs, validator_data, validators, weights, total_weight) = + setup_n_validators_with_keys(4); + let set_info = new_test_set_info(&validator_data); + + let slash_points: Vec = (0 .. 4).map(|_| random_slash_point(&mut OsRng)).collect(); + + let mut db = MemDb::new(); + let mut txn = db.txn(); + + // All 4 validators submit the same slash_points + // required_participation = 4*2/3+1 = 3, so 3 cross the threshold. + // The 4th submission is a NOP (past threshold). + let reports: Vec> = vec![slash_points.clone(); 3]; + let expected = expected_slash_report(4, &reports); + + { + let mut scan_block = + new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + for (key, _) in &keys_addrs { + scan_block.handle_application_tx( + 1, + Transaction::SlashReport { + slash_points: slash_points.clone(), + signed: new_signed(*key), + }, + ); + } + } + + match expected { + Some(result) if !result.is_empty() => { + assert!( + ProcessorMessages::try_recv(&mut txn, set).is_some(), + "expected ProcessorMessage for non-empty slash report {result:?}", + ); + } + _ => { + // Empty or f==0 -> slash report is empty, nothing to sign. + // The handler always sends a message when Participating is reached + // (assert passes with len=0 <= f=1). + assert!( + ProcessorMessages::try_recv(&mut txn, set).is_some(), + "expected ProcessorMessage even for empty slash report", + ); + } + } + + let sign_topic = expected_initially_recognized_sign_topic(VariantSignId::SlashReport); + assert!( + RecognizedTopics::recognized(&mut txn, set, sign_topic), + "SlashReport sign topic should be recognized", + ); + } + } + + /// Fuzz with varying reporter opinions (not all identical). + /// Uses 7 validators (f=2) for a richer median calculation. + #[test] + fn fuzz_slash_report_diverse_opinions_7_validators() { + for _ in 0 .. 200 { + let set = default_test_validator_set(); + + // 7 validators, f = (7-1)/3 = 2 + let (keys_addrs, validator_data, validators, weights, _) = + setup_n_validators_with_keys(7); + let set_info = new_test_set_info(&validator_data); + + // required_participation = 7*2/3+1 = 5 + // We have 5 reports from 5 different validators to cross the threshold + let reports = random_slash_reports(&mut OsRng, 7, 5); + let expected = expected_slash_report(7, &reports[.. 5]); + + let mut db = MemDb::new(); + let mut txn = db.txn(); + + { + let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, 7, &weights); + for (i, report) in reports.iter().enumerate() { + let (key, _) = keys_addrs[i]; + scan_block.handle_application_tx( + 1, + Transaction::SlashReport { slash_points: report.clone(), signed: new_signed(key) }, + ); + } + } + + match expected { + Some(result) => { + assert!(result.len() <= 2, "slash report len {} should be <= f=2", result.len()); + } + None => { + unreachable!(); + } + } + + assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); + let sign_topic = expected_initially_recognized_sign_topic(VariantSignId::SlashReport); + assert!(RecognizedTopics::recognized(&mut txn, set, sign_topic)); + } + } + + /// Fuzz the wrong-length path: slash_points.len() != validators.len() -> fatal slash + #[test] + fn fuzz_slash_report_wrong_length() { + for _ in 0 .. 200 { + let num_validators = OsRng.gen_range(4usize .. 10); + let mut wrong_len = OsRng.gen_range(1usize .. 20); + if wrong_len == num_validators { + // Shift to guarantee mismatch + wrong_len = if wrong_len == 1 { 2 } else { wrong_len - 1 }; + } + + let set = default_test_validator_set(); + + let (keys_addrs, validator_data, validators, weights, total_weight) = + setup_n_validators_with_keys(num_validators); + let set_info = new_test_set_info(&validator_data); - scan_block.handle_application_tx(1, Transaction::Batch { hash: batch_hash }); + let mut db = MemDb::new(); + let mut txn = db.txn(); - let topic = expected_topic_after_start_cosigning(VariantSignId::Batch(batch_hash)); - assert!(AccumulatedWeight::get(&mut txn, set, topic).is_some()); + let (signer_key, signer_addr) = keys_addrs[0]; + + { + let mut scan_block = + new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.handle_application_tx( + 1, + Transaction::SlashReport { + slash_points: vec![0; wrong_len], + signed: new_signed(signer_key), + }, + ); + } + + assert!( + TributaryDb::is_fatally_slashed(&mut txn, set, signer_addr), + "signer should be fatally slashed for wrong-length slash report", + ); + assert!( + ProcessorMessages::try_recv(&mut txn, set).is_none(), + "no message should be sent for wrong-length slash report", + ); + } + } + } } #[test] @@ -718,8 +1089,8 @@ mod handle_application_tx { let (key0, addr0) = keys_addrs[0]; let (key1, key2) = (keys_addrs[1].0, keys_addrs[2].0); - let sign_id = VariantSignId::Transaction([42; 32]); - let topic = expected_topic_after_start_cosigning(sign_id); + let sign_id = VariantSignId::Transaction(random_bytes_32(&mut OsRng)); + let topic = expected_initially_recognized_sign_topic(sign_id); // Wrong data length: signer has weight 1 but submits 2 entries -> fatal slash { @@ -729,7 +1100,7 @@ mod handle_application_tx { let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_application_tx( - 1, + random_block_number(&mut OsRng), Transaction::Sign { id: sign_id, attempt: 0, @@ -753,7 +1124,7 @@ mod handle_application_tx { new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); for key in [key0, key1, key2] { scan_block.handle_application_tx( - 1, + random_block_number(&mut OsRng), Transaction::Sign { id: sign_id, attempt: 0, @@ -780,8 +1151,8 @@ mod handle_application_tx { let set_info = new_test_set_info(&validator_data); let (key0, key1, key2) = (keys_addrs[0].0, keys_addrs[1].0, keys_addrs[2].0); - let sign_id = VariantSignId::Transaction([42; 32]); - let preprocess_topic = expected_topic_after_start_cosigning(sign_id); + let sign_id = VariantSignId::Transaction(random_bytes_32(&mut OsRng)); + let preprocess_topic = expected_initially_recognized_sign_topic(sign_id); let share_topic = Topic::Sign { id: sign_id, attempt: 0, round: SigningProtocolRound::Share }; let mut db = MemDb::new(); @@ -793,10 +1164,11 @@ mod handle_application_tx { // Step 1: All validators submit preprocesses, crossing threshold. // This auto-recognizes the Share topic (succeeding_topic) and stores preprocess data. { + let block_number = random_block_number(&mut OsRng); let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); for key in [key0, key1, key2] { scan_block.handle_application_tx( - 1, + block_number, Transaction::Sign { id: sign_id, attempt: 0, @@ -812,14 +1184,15 @@ mod handle_application_tx { assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); // Share topic should now be recognized - assert!(AccumulatedWeight::get(&mut txn, set, share_topic).is_some()); + assert!(RecognizedTopics::recognized(&mut txn, set, share_topic)); // Step 2: All validators submit shares, crossing threshold -> sends Shares message. { + let block_number = random_block_number(&mut OsRng); let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); for key in [key0, key1, key2] { scan_block.handle_application_tx( - 2, + block_number, Transaction::Sign { id: sign_id, attempt: 0, @@ -840,604 +1213,202 @@ mod handle_application_tx { assert!(!TributaryDb::is_fatally_slashed(&mut txn, set, *v)); } } +} + +mod handle_block { + use super::*; #[test] - fn slash_report() { + fn handles_all_transaction_types() { let set = default_test_validator_set(); let (keys_addrs, validator_data, validators, weights, total_weight) = - setup_test_validators_and_weights_with_keys(); + setup_n_validators_with_keys(3); let set_info = new_test_set_info(&validator_data); - let (key0, addr0) = keys_addrs[0]; + let addr0 = validator_data[0].0; + let signed = new_signed(keys_addrs[0].0); - // Wrong length: 3 validators but only 2 slash points -> fatal slash + // Empty block only calls start of block { let mut db = MemDb::new(); let mut txn = db.txn(); - let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - - scan_block.handle_application_tx( - 1, - Transaction::SlashReport { slash_points: vec![0, 0], signed: new_signed(key0) }, - ); + let block = Block { + header: BlockHeader { + parent: random_bytes_32(&mut OsRng), + transactions: random_bytes_32(&mut OsRng), + }, + transactions: vec![], + }; - assert!(TributaryDb::is_fatally_slashed(&mut txn, set, addr0)); + { + let scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.handle_block(random_block_number(&mut OsRng), block); + } + assert_no_pending_messages(&mut txn, set); } - // Valid length: accumulates weight - { + // Each application transaction type passes through handle_block. + // Signed transactions use a real validator key so participant_indexes lookups succeed. + // Cosign and SubstrateBlock need external state populated before they can run. + for tx in all_signed_transactions_and_attempts(signed) { let mut db = MemDb::new(); let mut txn = db.txn(); - let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.handle_application_tx( - 1, - Transaction::SlashReport { slash_points: vec![0, 0, 0], signed: new_signed(key0) }, - ); + let block_txs = vec![TributaryTransaction::Application(tx)]; + let block = Block { + header: BlockHeader { + parent: random_bytes_32(&mut OsRng), + transactions: random_bytes_32(&mut OsRng), + }, + transactions: block_txs.clone(), + }; - assert!(AccumulatedWeight::get(&mut txn, set, Topic::SlashReport).is_some()); + { + let scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.handle_block(random_block_number(&mut OsRng), block); + } + assert_block_side_effects(&mut txn, set, &block_txs); } - // Threshold crossed: computes median slash report and sends SignSlashReport message. - // Uses 4 validators so f = (4-1)/3 = 1, allowing up to 1 slashed validator. - { + // Provided transactions that need preconditions + for tx in all_provided_transactions() { let mut db = MemDb::new(); let mut txn = db.txn(); - let (key0, addr0) = random_serai_address_and_key(&mut OsRng); - let (key1, addr1) = random_serai_address_and_key(&mut OsRng); - let (key2, addr2) = random_serai_address_and_key(&mut OsRng); - let (_, addr3) = random_serai_address_and_key(&mut OsRng); - let validator_data = vec![(addr0, 1u16), (addr1, 1), (addr2, 1), (addr3, 1)]; - let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); - let weights: HashMap = validator_data.iter().copied().collect(); - let set_info = new_test_set_info(&validator_data); - - // Each reporter says: first 3 validators have 0 points, 4th has 100 - // required_participation = 4*2/3+1 = 3, so 3 submissions cross the threshold - { - let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, 4, &weights); - for key in [key0, key1, key2] { - scan_block.handle_application_tx( - 1, - Transaction::SlashReport { slash_points: vec![0, 0, 0, 100], signed: new_signed(key) }, + // Set up required external state + match &tx { + Transaction::Cosign { substrate_block_hash } => { + CosignIntents::provide( + &mut txn, + set, + &CosignIntent { + global_session: random_bytes_32(&mut OsRng), + block_number: random_block_number(&mut OsRng), + block_hash: *substrate_block_hash, + notable: false, + }, ); } + Transaction::SubstrateBlock { hash } => { + let plans = vec![random_bytes_32(&mut OsRng)]; + SubstrateBlockPlans::set(&mut txn, set, *hash, &plans); + } + _ => {} } - assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); - } - } - - /// Exercises the even-length median branch (`(this_validator.len() / 2) - 1`) in - /// the SlashReport handler by using 5 validators where `required_participation = 4` (even). - #[test] - fn slash_report_even_reporter_count_median() { - let set = default_test_validator_set(); - - // 5 validators of weight 1 -> required_participation = 5*2/3+1 = 4 - let keys_addrs: Vec<(RistrettoPoint, SeraiAddress)> = - (0 .. 5).map(|_| random_serai_address_and_key(&mut OsRng)).collect(); - let validator_data: Vec<(SeraiAddress, u16)> = - keys_addrs.iter().map(|(_, addr)| (*addr, 1u16)).collect(); - let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); - let weights: HashMap = validator_data.iter().copied().collect(); - let set_info = new_test_set_info(&validator_data); - - let mut db = MemDb::new(); - let mut txn = db.txn(); - - // 4 reporters submit different opinions about validator 4 (index 4). - // Reports (for all 5 validator positions): - // reporter 0: [0, 0, 0, 0, 10] - // reporter 1: [0, 0, 0, 0, 20] - // reporter 2: [0, 0, 0, 0, 30] - // reporter 3: [0, 0, 0, 0, 40] - // - // Sorted values for validator 4: [10, 20, 30, 40] (len=4, even) - // Even median index: (4 / 2) - 1 = 1 -> median = 20 - // - // f = (5-1)/3 = 1, amortization baseline = sorted_medians[5-1-1] = sorted_medians[3] = 0 - // amortized: [0, 0, 0, 0, 20]. Non-zero entries: [20] for validator 4. - let slash_reports = vec![ - vec![0u32, 0, 0, 0, 10], - vec![0, 0, 0, 0, 20], - vec![0, 0, 0, 0, 30], - vec![0, 0, 0, 0, 40], - ]; - - { - let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, 5, &weights); - for (i, report) in slash_reports.iter().enumerate() { - let (key, _) = keys_addrs[i]; - scan_block.handle_application_tx( - 1, - Transaction::SlashReport { slash_points: report.clone(), signed: new_signed(key) }, - ); - } - } - - // Threshold was crossed with 4 reporters (even) -> even median branch exercised. - // Verify the signing topic was recognized and a message was sent. - let sign_topic = expected_topic_after_start_cosigning(VariantSignId::SlashReport); - assert!( - AccumulatedWeight::get(&mut txn, set, sign_topic).is_some(), - "SlashReport sign topic should be recognized" - ); - - let msg = ProcessorMessages::try_recv(&mut txn, set); - assert!(msg.is_some(), "expected SignSlashReport processor message"); - } - - mod fuzz_slash_report { - use super::*; - use proptest::prelude::*; - - /// Independently compute the expected slash report that `handle_application_tx` should - /// produce when `DataSet::Participating` is reached, mirroring the production logic. - /// - /// Returns `None` if `f == 0` (the slash report would be empty and nothing is sent). - fn expected_slash_report(num_validators: usize, reports: &[Vec]) -> Option> { - let f = (num_validators - 1) / 3; - if f == 0 { - return None; - } + let block_txs = vec![TributaryTransaction::Application(tx)]; + let block = Block { + header: BlockHeader { + parent: random_bytes_32(&mut OsRng), + transactions: random_bytes_32(&mut OsRng), + }, + transactions: block_txs.clone(), + }; - // Compute the median for each validator position across all reporters - let mut medians = Vec::with_capacity(num_validators); - for i in 0 .. num_validators { - let mut values: Vec = reports.iter().map(|r| r[i]).collect(); - values.sort_unstable(); - let median_index = - if (values.len() % 2) == 1 { values.len() / 2 } else { (values.len() / 2) - 1 }; - medians.push(values[median_index]); + { + let scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.handle_block(random_block_number(&mut OsRng), block); } - - // Find worst validator in the supermajority and amortize - let mut sorted = medians.clone(); - sorted.sort_unstable(); - let amortization = sorted[num_validators - f - 1]; - - let amortized: Vec = medians.iter().map(|p| p.saturating_sub(amortization)).collect(); - - // Filter to non-zero entries only - let result: Vec = amortized.into_iter().filter(|&p| p > 0).collect(); - Some(result) + assert_block_side_effects(&mut txn, set, &block_txs); } - /// Generate `count` slash report vectors, each of length `num_validators`. - /// Values are drawn from a small set including 0, small values, large values, and u32::MAX - /// to exercise the Fatal/Points/zero filtering paths. - fn slash_points_strategy( - num_validators: usize, - count: usize, - ) -> impl Strategy>> { - let values = prop::collection::vec( - prop_oneof![ - 3 => Just(0u32), - 3 => 1..100u32, - 2 => 100..10_000u32, - 1 => Just(u32::MAX), - ], - num_validators, - ); - prop::collection::vec(values, count) - } - - proptest! { - #![proptest_config(ProptestConfig::with_cases(200))] - - /// Fuzz the SlashReport -> Participating path with randomized slash point vectors. - /// - /// Uses 4 validators (f=1) so the threshold-crossing path is reachable. - /// All 4 submit identical reports so the median equals the input. - #[test] - fn fuzz_slash_report_participating_4_validators( - slash_points in slash_points_strategy(4, 1).prop_map(|mut v| v.remove(0)), - ) { - let set = default_test_validator_set(); - - let (key0, addr0) = random_serai_address_and_key(&mut OsRng); - let (key1, addr1) = random_serai_address_and_key(&mut OsRng); - let (key2, addr2) = random_serai_address_and_key(&mut OsRng); - let (key3, addr3) = random_serai_address_and_key(&mut OsRng); - let validator_data = vec![(addr0, 1u16), (addr1, 1), (addr2, 1), (addr3, 1)]; - let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); - let weights: HashMap = validator_data.iter().copied().collect(); - let set_info = new_test_set_info(&validator_data); - + // Each Tendermint SlashEvidence type fatally slashes the sender + { + let all_evidence = [ + Evidence::InvalidPrecommit(make_signed_message_bytes(addr0.0)), + Evidence::InvalidValidRound(make_signed_message_bytes(addr0.0)), + Evidence::ConflictingMessages( + make_signed_message_bytes(addr0.0), + make_signed_message_bytes(addr0.0), + ), + ]; + + for evidence in all_evidence { let mut db = MemDb::new(); let mut txn = db.txn(); - // All 4 validators submit the same slash_points - // required_participation = 4*2/3+1 = 3, so 3 cross the threshold. - // The 4th submission is a NOP (past threshold). - let reports: Vec> = vec![slash_points.clone(); 3]; - let expected = expected_slash_report(4, &reports); + let block = Block { + header: BlockHeader { + parent: random_bytes_32(&mut OsRng), + transactions: random_bytes_32(&mut OsRng), + }, + transactions: vec![TributaryTransaction::Tendermint(TendermintTx::SlashEvidence( + evidence, + ))], + }; { - let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, 4, &weights); - for (key, _) in [(key0, &addr0), (key1, &addr1), (key2, &addr2), (key3, &addr3)] { - scan_block.handle_application_tx( - 1, - Transaction::SlashReport { - slash_points: slash_points.clone(), - signed: new_signed(key), - }, - ); - } + let scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.handle_block(1, block); } - - match expected { - Some(result) if !result.is_empty() => { - // Non-empty slash report -> message should be sent - prop_assert!( - ProcessorMessages::try_recv(&mut txn, set).is_some(), - "expected ProcessorMessage for non-empty slash report {:?}", - result - ); - } - _ => { - // Empty or f==0 -> no message sent (slash report is empty, nothing to sign) - // The code still sends the message even for empty reports due to the assert - // passing with len=0 <= f. Verify it gets sent regardless. - // - // With our fix, Points(0) are filtered, so if all amortized values are 0, - // the slash_report is empty. The assert passes (0 <= f=1), and the code still - // recognizes the topic and sends the message. - let msg = ProcessorMessages::try_recv(&mut txn, set); - // The handler always sends a message when Participating is reached - prop_assert!(msg.is_some(), "expected ProcessorMessage even for empty slash report"); - } - } - - // Verify the SlashReport signing topic was recognized - let sign_topic = expected_topic_after_start_cosigning(VariantSignId::SlashReport); - prop_assert!( - AccumulatedWeight::get(&mut txn, set, sign_topic).is_some(), - "SlashReport sign topic should be recognized" + assert!( + TributaryDb::is_fatally_slashed(&txn, set, addr0), + "SlashEvidence should fatally slash the sender", ); - } - - /// Fuzz with varying reporter opinions (not all identical). - /// Uses 7 validators (f=2) for a richer median calculation. - #[test] - fn fuzz_slash_report_diverse_opinions_7_validators( - reports in slash_points_strategy(7, 5), - ) { - let set = default_test_validator_set(); - - // 7 validators, f = (7-1)/3 = 2 - let keys_addrs: Vec<(RistrettoPoint, SeraiAddress)> = - (0 .. 7).map(|_| random_serai_address_and_key(&mut OsRng)).collect(); - let validator_data: Vec<(SeraiAddress, u16)> = - keys_addrs.iter().map(|(_, addr)| (*addr, 1u16)).collect(); - let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); - let weights: HashMap = validator_data.iter().copied().collect(); - let set_info = new_test_set_info(&validator_data); - - // required_participation = 7*2/3+1 = 5 - // We have 5 reports from 5 different validators to cross the threshold - let expected = expected_slash_report(7, &reports[..5]); - let mut db = MemDb::new(); - let mut txn = db.txn(); + assert_no_pending_messages(&mut txn, set); + txn.commit(); + } + } - { - let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, 7, &weights); - for (i, report) in reports.iter().enumerate() { - let (key, _) = keys_addrs[i]; - scan_block.handle_application_tx( - 1, - Transaction::SlashReport { - slash_points: report.clone(), - signed: new_signed(key), - }, - ); - } - } + // Fuzz mixed blocks with random quantities, types, and ordering + for _ in 0 .. 100 { + let mut db = MemDb::new(); + let mut txn = db.txn(); - // Verify the expected result - match expected { - Some(result) => { - prop_assert!(result.len() <= 2, "slash report len {} should be <= f=2", result.len()); - } - None => { - // f == 0, which can't happen with 7 validators - unreachable!(); - } + let num_txs = OsRng.gen_range(1usize ..= 8); + let mut transactions = Vec::with_capacity(num_txs); + let mut has_evidence = false; + let mut batch_hashes = vec![]; + + for _ in 0 .. num_txs { + if OsRng.gen_bool(0.5) { + // Random Tendermint evidence type + let evidence = match OsRng.gen_range(0u8 .. 3) { + 0 => Evidence::InvalidPrecommit(make_signed_message_bytes(addr0.0)), + 1 => Evidence::InvalidValidRound(make_signed_message_bytes(addr0.0)), + _ => Evidence::ConflictingMessages( + make_signed_message_bytes(addr0.0), + make_signed_message_bytes(addr0.0), + ), + }; + transactions + .push(TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(evidence))); + has_evidence = true; + } else { + // Random application transaction, use Batch so we can assert recognition + let hash = random_bytes_32(&mut OsRng); + batch_hashes.push(hash); + transactions.push(TributaryTransaction::Application(Transaction::Batch { hash })); } - - // Participating path was reached -> message and topic recognition - prop_assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); - let sign_topic = expected_topic_after_start_cosigning(VariantSignId::SlashReport); - prop_assert!(AccumulatedWeight::get(&mut txn, set, sign_topic).is_some()); } - /// Fuzz the wrong-length path: slash_points.len() != validators.len() -> fatal slash - #[test] - fn fuzz_slash_report_wrong_length( - num_validators in 4usize..10, - wrong_len in 1usize..20, - ) { - prop_assume!(wrong_len != num_validators); - - let set = default_test_validator_set(); - - let keys_addrs: Vec<(RistrettoPoint, SeraiAddress)> = - (0 .. num_validators).map(|_| random_serai_address_and_key(&mut OsRng)).collect(); - let validator_data: Vec<(SeraiAddress, u16)> = - keys_addrs.iter().map(|(_, addr)| (*addr, 1u16)).collect(); - let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); - let weights: HashMap = validator_data.iter().copied().collect(); - let set_info = new_test_set_info(&validator_data); - let total_weight = num_validators as u16; - - let mut db = MemDb::new(); - let mut txn = db.txn(); - - let (signer_key, signer_addr) = keys_addrs[0]; + let block = Block { + header: BlockHeader { + parent: random_bytes_32(&mut OsRng), + transactions: random_bytes_32(&mut OsRng), + }, + transactions: transactions.clone(), + }; - { - let mut scan_block = - new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.handle_application_tx( - 1, - Transaction::SlashReport { - slash_points: vec![0; wrong_len], - signed: new_signed(signer_key), - }, - ); - } + { + let scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.handle_block(random_block_number(&mut OsRng), block); + } - prop_assert!( - TributaryDb::is_fatally_slashed(&mut txn, set, signer_addr), - "signer should be fatally slashed for wrong-length slash report" + if has_evidence { + assert!( + TributaryDb::is_fatally_slashed(&txn, set, addr0), + "SlashEvidence should fatally slash the sender in mixed blocks", ); - prop_assert!( - ProcessorMessages::try_recv(&mut txn, set).is_none(), - "no message should be sent for wrong-length slash report" + } + for hash in &batch_hashes { + let topic = expected_initially_recognized_sign_topic(VariantSignId::Batch(*hash)); + assert!( + RecognizedTopics::recognized(&txn, set, topic), + "Batch should be recognized regardless of other txs in the block", ); } + assert_block_side_effects(&mut txn, set, &transactions); } } } - -mod handle_block { - use super::*; - - #[test] - fn processes_application_transactions() { - let mut db = MemDb::new(); - let set = default_test_validator_set(); - let batch_hash = random_bytes_32(&mut OsRng); - let (validator_data, validators, weights, total_weight) = setup_test_validators_and_weights(); - let set_info = new_test_set_info(&validator_data); - - let block = Block { - header: BlockHeader { - parent: random_bytes_32(&mut OsRng), - transactions: random_bytes_32(&mut OsRng), - }, - transactions: vec![TributaryTransaction::Application(Transaction::Batch { - hash: batch_hash, - })], - }; - - { - let mut txn = db.txn(); - let scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - - scan_block.handle_block(1, block); - txn.commit(); - } - - let expected_topic = expected_topic_after_start_cosigning(VariantSignId::Batch(batch_hash)); - assert!(TributaryDb::recognized(&db, set, expected_topic)); - } - - #[test] - fn empty_block_only_calls_start_of_block() { - let mut db = MemDb::new(); - let set = default_test_validator_set(); - let (validator_data, validators, weights, total_weight) = setup_test_validators_and_weights(); - let set_info = new_test_set_info(&validator_data); - - let block = Block { - header: BlockHeader { - parent: random_bytes_32(&mut OsRng), - transactions: random_bytes_32(&mut OsRng), - }, - transactions: vec![], - }; - - let mut txn = db.txn(); - let scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - - scan_block.handle_block(1, block); - txn.commit(); - - // No messages, no state changes beyond start_of_block - let mut txn = db.txn(); - assert!(ProcessorMessages::try_recv(&mut txn, set).is_none()); - } - - #[test] - fn multiple_application_txs_in_one_block() { - let mut db = MemDb::new(); - let set = default_test_validator_set(); - let batch_hash_a = [10; 32]; - let batch_hash_b = [20; 32]; - let (validator_data, validators, weights, total_weight) = setup_test_validators_and_weights(); - let set_info = new_test_set_info(&validator_data); - - let block = Block { - header: BlockHeader { - parent: random_bytes_32(&mut OsRng), - transactions: random_bytes_32(&mut OsRng), - }, - transactions: vec![ - TributaryTransaction::Application(Transaction::Batch { hash: batch_hash_a }), - TributaryTransaction::Application(Transaction::Batch { hash: batch_hash_b }), - ], - }; - - let mut txn = db.txn(); - let scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - - scan_block.handle_block(1, block); - txn.commit(); - - for hash in [batch_hash_a, batch_hash_b] { - let topic = expected_topic_after_start_cosigning(VariantSignId::Batch(hash)); - assert!(TributaryDb::recognized(&db, set, topic)); - } - } - - /// Construct a borsh-encoded `SignedMessage` for `TendermintNetwork`. - /// - /// The network's types are: ValidatorId = [u8; 32], Block = TendermintBlock, Signature = [u8; 64]. - /// We manually build the borsh encoding rather than depending on the internal tendermint types. - fn make_signed_message_bytes(sender: [u8; 32]) -> Vec { - let mut bytes = Vec::new(); - // Message fields: - bytes.extend_from_slice(&sender); // sender: [u8; 32] - bytes.extend_from_slice(&0u64.to_le_bytes()); // block: BlockNumber(0) - bytes.extend_from_slice(&0u32.to_le_bytes()); // round: RoundNumber(0) - bytes.push(1); // Data::Prevote variant index - bytes.push(0); // Option::None (no block id) - // Signature: - bytes.extend_from_slice(&[0u8; 64]); // sig: [u8; 64] - bytes - } - - #[test] - fn slash_evidence_invalid_precommit() { - let mut db = MemDb::new(); - let set = default_test_validator_set(); - let (_, addr0) = random_serai_address_and_key(&mut OsRng); - let validator_data = vec![(addr0, 1u16)]; - let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); - let weights: HashMap = validator_data.iter().copied().collect(); - let set_info = new_test_set_info(&validator_data); - - let evidence_bytes = make_signed_message_bytes(addr0.0); - let block = Block { - header: BlockHeader { - parent: random_bytes_32(&mut OsRng), - transactions: random_bytes_32(&mut OsRng), - }, - transactions: vec![TributaryTransaction::Tendermint(TendermintTx::SlashEvidence( - Evidence::InvalidPrecommit(evidence_bytes), - ))], - }; - - let mut txn = db.txn(); - let scan_block = new_scan_block(&mut txn, &set_info, &validators, 1, &weights); - scan_block.handle_block(1, block); - txn.commit(); - - assert!(TributaryDb::is_fatally_slashed(&db, set, addr0)); - } - - #[test] - fn slash_evidence_invalid_valid_round() { - let mut db = MemDb::new(); - let set = default_test_validator_set(); - let (_, addr0) = random_serai_address_and_key(&mut OsRng); - let validator_data = vec![(addr0, 1u16)]; - let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); - let weights: HashMap = validator_data.iter().copied().collect(); - let set_info = new_test_set_info(&validator_data); - - let evidence_bytes = make_signed_message_bytes(addr0.0); - let block = Block { - header: BlockHeader { - parent: random_bytes_32(&mut OsRng), - transactions: random_bytes_32(&mut OsRng), - }, - transactions: vec![TributaryTransaction::Tendermint(TendermintTx::SlashEvidence( - Evidence::InvalidValidRound(evidence_bytes), - ))], - }; - - let mut txn = db.txn(); - let scan_block = new_scan_block(&mut txn, &set_info, &validators, 1, &weights); - scan_block.handle_block(1, block); - txn.commit(); - - assert!(TributaryDb::is_fatally_slashed(&db, set, addr0)); - } - - #[test] - fn slash_evidence_conflicting_messages() { - let mut db = MemDb::new(); - let set = default_test_validator_set(); - let (_, addr0) = random_serai_address_and_key(&mut OsRng); - let validator_data = vec![(addr0, 1u16)]; - let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); - let weights: HashMap = validator_data.iter().copied().collect(); - let set_info = new_test_set_info(&validator_data); - - // Both messages have the same sender; the slash uses the first message's sender - let first = make_signed_message_bytes(addr0.0); - let second = make_signed_message_bytes(addr0.0); - let block = Block { - header: BlockHeader { - parent: random_bytes_32(&mut OsRng), - transactions: random_bytes_32(&mut OsRng), - }, - transactions: vec![TributaryTransaction::Tendermint(TendermintTx::SlashEvidence( - Evidence::ConflictingMessages(first, second), - ))], - }; - - let mut txn = db.txn(); - let scan_block = new_scan_block(&mut txn, &set_info, &validators, 1, &weights); - scan_block.handle_block(1, block); - txn.commit(); - - assert!(TributaryDb::is_fatally_slashed(&db, set, addr0)); - } - - /// Verifies handle_block processes both Tendermint and Application transactions in one block. - #[test] - fn mixed_tendermint_and_application_txs() { - let mut db = MemDb::new(); - let set = default_test_validator_set(); - let (_, addr0) = random_serai_address_and_key(&mut OsRng); - let (_, addr1) = random_serai_address_and_key(&mut OsRng); - let validator_data = vec![(addr0, 1u16), (addr1, 1)]; - let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); - let weights: HashMap = validator_data.iter().copied().collect(); - let set_info = new_test_set_info(&validator_data); - - let batch_hash = [99; 32]; - let evidence_bytes = make_signed_message_bytes(addr0.0); - - let block = Block { - header: BlockHeader { - parent: random_bytes_32(&mut OsRng), - transactions: random_bytes_32(&mut OsRng), - }, - transactions: vec![ - // Tendermint SlashEvidence first - TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(Evidence::InvalidPrecommit( - evidence_bytes, - ))), - // Then an Application transaction - TributaryTransaction::Application(Transaction::Batch { hash: batch_hash }), - ], - }; - - let mut txn = db.txn(); - let scan_block = new_scan_block(&mut txn, &set_info, &validators, 2, &weights); - scan_block.handle_block(1, block); - txn.commit(); - - // Tendermint evidence slashed addr0 - assert!(TributaryDb::is_fatally_slashed(&db, set, addr0)); - // Application tx was still processed - let topic = expected_topic_after_start_cosigning(VariantSignId::Batch(batch_hash)); - assert!(TributaryDb::recognized(&db, set, topic)); - } -} diff --git a/coordinator/tributary/src/tests/scan_tributary.rs b/coordinator/tributary/src/tests/scan_tributary.rs new file mode 100644 index 000000000..0e632f1cb --- /dev/null +++ b/coordinator/tributary/src/tests/scan_tributary.rs @@ -0,0 +1,229 @@ +use blake2::{Digest as _, Blake2s256}; + +use serai_primitives::test_helpers::random_genesis; +use serai_task::test_helpers::TaskTest; +use tributary_sdk::{ + Tributary, ReadWrite as _, Block, BlockHeader, Transaction as TributaryTransaction, Evidence, + tendermint::tx::TendermintTx, +}; +use super::*; + +/// Create a Tributary with a single validator. Returns the Tributary (kept alive so +/// the Tendermint machine keeps running) and the validator's signing key. +async fn make_tributary( + db: MemDb, +) -> (Tributary, Zeroizing<::F>, [u8; 32]) { + let key = random_key(&mut OsRng); + let pub_key = get_key_point(key.clone()); + let genesis = random_genesis(&mut OsRng); + let tributary = Tributary::::new( + db, + genesis, + // Use a past start_time so TendermintMachine::new doesn't sleep waiting for block end time + 1, + key.clone(), + vec![(pub_key, 1)], + MockP2p, + ) + .await + .expect("Tributary::new returned None"); + (tributary, key, genesis) +} + +#[tokio::test] +async fn new_scan_tributary_task() { + let db = MemDb::new(); + let (tributary, _, _) = make_tributary(db.clone()).await; + + // Single validator with weight > 1 + { + let (_, addr) = random_serai_address_and_key(&mut OsRng); + let set_info = new_test_set_info(&[(addr, 3)]); + let task = ScanTributaryTask::::new(db.clone(), set_info, tributary.reader()); + + assert_eq!(task.validators.len(), 1); + assert_eq!(task.validators[0], addr); + assert_eq!(task.total_weight, 3); + assert_eq!(*task.validator_weights.get(&addr).unwrap(), 3); + } + + // Multiple validators with different weights + { + let (_, addr1) = random_serai_address_and_key(&mut OsRng); + let (_, addr2) = random_serai_address_and_key(&mut OsRng); + let (_, addr3) = random_serai_address_and_key(&mut OsRng); + let set_info = new_test_set_info(&[(addr1, 1), (addr2, 2), (addr3, 4)]); + let task = ScanTributaryTask::::new(db.clone(), set_info, tributary.reader()); + + assert_eq!(task.validators.len(), 3); + assert_eq!(task.total_weight, 7); + assert_eq!(*task.validator_weights.get(&addr1).unwrap(), 1); + assert_eq!(*task.validator_weights.get(&addr2).unwrap(), 2); + assert_eq!(*task.validator_weights.get(&addr3).unwrap(), 4); + } + + // Preserves set info + { + let (_, addr) = random_serai_address_and_key(&mut OsRng); + let set_info = new_test_set_info(&[(addr, 1)]); + let expected_set = set_info.set; + let task = ScanTributaryTask::::new(db.clone(), set_info, tributary.reader()); + + assert_eq!(task.set.set, expected_set); + } +} + +/// Wait until `block_after(parent)` returns `Some`, with a 30s timeout. +async fn wait_for_block_after( + tributary: &Tributary, + parent: &[u8; 32], +) -> [u8; 32] { + let reader = tributary.reader(); + let start = std::time::Instant::now(); + loop { + if let Some(hash) = reader.block_after(parent) { + return hash; + } + if start.elapsed() > std::time::Duration::from_secs(30) { + panic!("timed out waiting for a block after {:?}", parent); + } + tokio::time::sleep(std::time::Duration::from_millis(20)).await; + } +} + +/// Write a fake block into the DB so the TributaryReader can find it. +/// Returns the block's hash. +fn inject_block( + mut txn: impl DbTxn, + genesis: [u8; 32], + parent: [u8; 32], + transactions: Vec>, +) -> [u8; 32] { + let tx_hashes: Vec<[u8; 32]> = transactions.iter().map(|tx| tx.hash()).collect(); + let txs_hash = + Blake2s256::digest(tx_hashes.iter().flat_map(|h| h.iter().copied()).collect::>()).into(); + let block = Block { header: BlockHeader { parent, transactions: txs_hash }, transactions }; + let block_hash = block.hash(); + let serialized = block.serialize(); + + let block_after_key = MemDb::key( + b"tributary_blockchain", + b"block_after", + [genesis.as_ref(), parent.as_ref()].concat(), + ); + let block_key = + MemDb::key(b"tributary_blockchain", b"block", [genesis.as_ref(), block_hash.as_ref()].concat()); + + txn.put(block_after_key, block_hash); + txn.put(block_key, serialized); + txn.commit(); + + block_hash +} + +#[tokio::test(flavor = "multi_thread")] +async fn scan_tributary_task_run_iteration() { + let (_, addr) = random_serai_address_and_key(&mut OsRng); + let set_info = new_test_set_info(&[(addr, 1)]); + + // No blocks committed yet: returns false + { + let db = MemDb::new(); + let (tributary, _, _) = make_tributary(db.clone()).await; + + let mut task = + ScanTributaryTask::::new(db, set_info.clone(), tributary.reader()); + TaskTest::task_runs_once_and_matches_progress(&mut task, false).await; + } + + let mut db = MemDb::new(); + let (tributary, _, genesis) = make_tributary(db.clone()).await; + + // Wait for at least one real committed block + wait_for_block_after(&tributary, &genesis).await; + + // Create one task that persists across the remaining steps so each run_iteration + // continues from where the previous one left off. + let mut task = + ScanTributaryTask::::new(db.clone(), set_info.clone(), tributary.reader()); + + // Processes committed block(s) and records progress + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + + let (last_handled_block_number, last_handled_block_hash) = + TributaryDb::last_handled_tributary_block(&db, default_test_validator_set()).unwrap(); + assert!(last_handled_block_number >= 1, "expected at least block 1 to be handled"); + + // Processes block with provided and signed txs - inject after the actual last handled block + { + let batch_tx = + TributaryTransaction::Application(Transaction::Batch { hash: random_bytes_32(&mut OsRng) }); + let fake_evidence = TributaryTransaction::Tendermint(TendermintTx::SlashEvidence( + Evidence::InvalidPrecommit(make_signed_message_bytes(addr.0)), + )); + let block_txs = vec![fake_evidence, batch_tx]; + + let local_qty_key = + MemDb::key(b"tributary_provided", b"local_quantity", [genesis.as_ref(), b"Batch"].concat()); + let block_hash = inject_block(db.txn(), genesis, last_handled_block_hash, block_txs.clone()); + let block_qty_key = MemDb::key( + b"tributary_provided", + b"block_quantity", + [genesis.as_ref(), block_hash.as_ref(), b"Batch"].concat(), + ); + { + let mut txn = db.txn(); + txn.put(&local_qty_key, 1u32.to_le_bytes()); + txn.put(block_qty_key, 1u32.to_le_bytes()); + txn.commit(); + } + + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + + let mut txn = db.txn(); + assert_block_side_effects(&mut txn, default_test_validator_set(), &block_txs); + } + + // Errors when locally provided txs are missing + { + let mut db2 = MemDb::new(); + let (tributary2, _, genesis2) = make_tributary(db2.clone()).await; + + let cosign_tx = Transaction::Cosign { substrate_block_hash: random_block_hash(&mut OsRng) }; + tributary2.provide_transaction(cosign_tx).await.unwrap(); + + // Wait for a block that includes the provided transaction + let reader = tributary2.reader(); + let mut parent = genesis2; + let start = std::time::Instant::now(); + loop { + if start.elapsed() > std::time::Duration::from_secs(30) { + panic!("timed out waiting for a block with the provided tx"); + } + if let Some(hash) = reader.block_after(&parent) { + let block = reader.block(&hash).unwrap(); + if block + .transactions + .iter() + .any(|tx| matches!(tx.kind(), tributary_sdk::transaction::TransactionKind::Provided(_))) + { + break; + } + parent = hash; + } else { + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + } + } + + // Delete the locally_provided_quantity to trigger the error + let local_qty_key = + MemDb::key(b"tributary_provided", b"local_quantity", [genesis2.as_ref(), b"Cosign"].concat()); + let mut txn = db2.txn(); + txn.del(local_qty_key); + txn.commit(); + + let set_info = new_test_set_info(&[(addr, 1)]); + let mut task = ScanTributaryTask::::new(db2, set_info, reader); + TaskTest::task_runs_and_fails_with(&mut task, "didn't have the provided Transactions").await; + } +} diff --git a/coordinator/tributary/src/tests/transaction.rs b/coordinator/tributary/src/tests/transaction.rs index 76d19b706..73d570d89 100644 --- a/coordinator/tributary/src/tests/transaction.rs +++ b/coordinator/tributary/src/tests/transaction.rs @@ -3,7 +3,7 @@ use std::io::{self, Cursor, Read, Write}; use blake2::{digest::typenum::U32, Digest as _, Blake2b}; use borsh::{BorshDeserialize, BorshSerialize}; -use rand::{CryptoRng, Rng, RngCore, rngs::OsRng}; +use rand::{RngCore, rngs::OsRng}; use ciphersuite::{ group::{Group as _, GroupEncoding, ff::PrimeField}, @@ -18,126 +18,7 @@ use tributary_sdk::{ transaction::{Transaction as TransactionTrait, TransactionError, TransactionKind}, }; -use crate::{ - db::Topic, - tests::{random_key, random_signed}, - transaction::{Signed, SigningProtocolRound, Transaction}, -}; - -/// One of each signed transaction kind, and attempts: at 0, a random attempt, and u32::MAX. -fn all_signed_transactions_and_attempts() -> Vec { - let random_attempt = OsRng.gen_range(1u32 .. u32::MAX); - vec![ - // RemoveParticipant - Transaction::RemoveParticipant { - participant: random_serai_address(&mut OsRng), - signed: random_signed(&mut OsRng), - }, - // DkgParticipation - Transaction::DkgParticipation { - participation: random_vec_u8(&mut OsRng), - signed: random_signed(&mut OsRng), - }, - // DkgConfirmationPreprocess - Transaction::DkgConfirmationPreprocess { - attempt: 0, - preprocess: random_bytes_64(&mut OsRng), - signed: random_signed(&mut OsRng), - }, - Transaction::DkgConfirmationPreprocess { - attempt: random_attempt, - preprocess: random_bytes_64(&mut OsRng), - signed: random_signed(&mut OsRng), - }, - Transaction::DkgConfirmationPreprocess { - attempt: u32::MAX, - preprocess: random_bytes_64(&mut OsRng), - signed: random_signed(&mut OsRng), - }, - // DkgConfirmationShare - Transaction::DkgConfirmationShare { - attempt: 0, - share: random_bytes_32(&mut OsRng), - signed: random_signed(&mut OsRng), - }, - Transaction::DkgConfirmationShare { - attempt: random_attempt, - share: random_bytes_32(&mut OsRng), - signed: random_signed(&mut OsRng), - }, - Transaction::DkgConfirmationShare { - attempt: u32::MAX, - share: random_bytes_32(&mut OsRng), - signed: random_signed(&mut OsRng), - }, - // Sign Preprocess - Transaction::Sign { - id: VariantSignId::Transaction(random_bytes_32(&mut OsRng)), - attempt: 0, - round: SigningProtocolRound::Preprocess, - data: vec![random_vec_u8(&mut OsRng)], - signed: random_signed(&mut OsRng), - }, - Transaction::Sign { - id: VariantSignId::Transaction(random_bytes_32(&mut OsRng)), - attempt: random_attempt, - round: SigningProtocolRound::Preprocess, - data: vec![random_vec_u8(&mut OsRng)], - signed: random_signed(&mut OsRng), - }, - Transaction::Sign { - id: VariantSignId::Transaction(random_bytes_32(&mut OsRng)), - attempt: u32::MAX, - round: SigningProtocolRound::Preprocess, - data: vec![random_vec_u8(&mut OsRng)], - signed: random_signed(&mut OsRng), - }, - // Sign Share - Transaction::Sign { - id: VariantSignId::Batch(random_bytes_32(&mut OsRng)), - attempt: 0, - round: SigningProtocolRound::Share, - data: vec![random_vec_u8(&mut OsRng), random_vec_u8(&mut OsRng)], - signed: random_signed(&mut OsRng), - }, - Transaction::Sign { - id: VariantSignId::Batch(random_bytes_32(&mut OsRng)), - attempt: random_attempt, - round: SigningProtocolRound::Share, - data: vec![random_vec_u8(&mut OsRng), random_vec_u8(&mut OsRng)], - signed: random_signed(&mut OsRng), - }, - Transaction::Sign { - id: VariantSignId::Batch(random_bytes_32(&mut OsRng)), - attempt: u32::MAX, - round: SigningProtocolRound::Share, - data: vec![random_vec_u8(&mut OsRng), random_vec_u8(&mut OsRng)], - signed: random_signed(&mut OsRng), - }, - // SlashReport - Transaction::SlashReport { - slash_points: (0 .. 3).map(|_| OsRng.next_u32()).collect(), - signed: random_signed(&mut OsRng), - }, - ] -} - -/// One of each provided transaction kind. -fn all_provided_transactions() -> Vec { - vec![ - Transaction::Cosign { substrate_block_hash: random_block_hash(&mut OsRng) }, - Transaction::Cosigned { substrate_block_hash: random_block_hash(&mut OsRng) }, - Transaction::SubstrateBlock { hash: random_block_hash(&mut OsRng) }, - Transaction::Batch { hash: random_block_hash(&mut OsRng).0 }, - ] -} - -/// One of each of all transaction kinds. -fn all_transactions() -> Vec { - let mut txs = all_signed_transactions_and_attempts(); - txs.extend(all_provided_transactions()); - txs -} +use super::*; fn all_signing_protocol_rounds() -> Vec { vec![SigningProtocolRound::Preprocess, SigningProtocolRound::Share] @@ -467,7 +348,7 @@ mod transaction { out } - for mut tx in all_signed_transactions_and_attempts() { + for mut tx in all_signed_transactions_and_attempts(random_signed(&mut OsRng)) { tx.sign(&mut OsRng, genesis, &key); let (expected_order, expected_nonce) = match &tx { @@ -709,7 +590,7 @@ mod transaction { let genesis = random_genesis(&mut OsRng); // Sets correct signer and produces verifiable signature - for mut tx in all_signed_transactions_and_attempts() { + for mut tx in all_signed_transactions_and_attempts(random_signed(&mut OsRng)) { tx.sign(&mut OsRng, genesis, &key); let sig_hash = tx.sig_hash(genesis); diff --git a/coordinator/tributary/src/tests/tributary.rs b/coordinator/tributary/src/tests/tributary.rs new file mode 100644 index 000000000..c2ee26bcf --- /dev/null +++ b/coordinator/tributary/src/tests/tributary.rs @@ -0,0 +1,78 @@ +use serai_db::{Db, DbTxn, MemDb}; +use crate::*; +use super::*; + +/// Helper to extract slash_points from a SlashReport transaction. +fn unwrap_slash_report(tx: Transaction) -> (Vec, Signed) { + match tx { + Transaction::SlashReport { slash_points, signed } => (slash_points, signed), + other => panic!("expected SlashReport, got {other:?}"), + } +} + +#[test] +fn slash_report() { + let set = default_test_validator_set(); + + // No slash points set: all zeros + { + let db = MemDb::new(); + let validators = vec![ + (random_serai_address(&mut OsRng), 1), + (random_serai_address(&mut OsRng), 1), + (random_serai_address(&mut OsRng), 1), + ]; + let set_info = new_test_set_info(&validators); + + let (points, signed) = unwrap_slash_report(slash_report_transaction(&db, &set_info)); + assert_eq!(points, vec![0, 0, 0]); + assert_eq!(signed, Signed::default()); + } + + // Respects validator order + { + let mut db = MemDb::new(); + let (v1, v2, v3, v4) = ( + random_serai_address(&mut OsRng), + random_serai_address(&mut OsRng), + random_serai_address(&mut OsRng), + random_serai_address(&mut OsRng), + ); + let set_info = new_test_set_info(&[(v1, 1), (v2, 1), (v3, 1), (v4, 1)]); + + let (slash1, slash2, slash3, slash4) = + (OsRng.next_u32(), OsRng.next_u32(), OsRng.next_u32(), OsRng.next_u32()); + + { + let mut txn = db.txn(); + SlashPoints::set(&mut txn, set, v1, &slash1); + // SlashPoints sets validator 3 before 2 here, + // but this order doesn't affect the validators order of set_info + SlashPoints::set(&mut txn, set, v3, &slash3); + SlashPoints::set(&mut txn, set, v2, &slash2); + SlashPoints::set(&mut txn, set, v4, &slash4); + txn.commit(); + } + + let (points, signed) = unwrap_slash_report(slash_report_transaction(&db, &set_info)); + assert_eq!(points, vec![slash1, slash2, slash3, slash4]); + assert_eq!(signed, Signed::default()); + } + + // Fatal slash yields u32::MAX + { + let mut db = MemDb::new(); + let (v1, v2) = (random_serai_address(&mut OsRng), random_serai_address(&mut OsRng)); + let set_info = new_test_set_info(&[(v1, 1), (v2, 1)]); + + { + let mut txn = db.txn(); + TributaryDb::fatal_slash(&mut txn, set, v1, "test reason"); + txn.commit(); + } + + let (points, signed) = unwrap_slash_report(slash_report_transaction(&db, &set_info)); + assert_eq!(points, vec![u32::MAX, 0]); + assert_eq!(signed, Signed::default()); + } +} From e073057e98243f56e43a7d8bed618ee6c64c97aa Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Tue, 7 Apr 2026 17:07:01 -0400 Subject: [PATCH 48/71] feat(substrate/test_helpers): missing test utility --- substrate/primitives/src/test_helpers.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/substrate/primitives/src/test_helpers.rs b/substrate/primitives/src/test_helpers.rs index 908c926f8..48d684c0f 100644 --- a/substrate/primitives/src/test_helpers.rs +++ b/substrate/primitives/src/test_helpers.rs @@ -28,6 +28,11 @@ pub fn random_bytes_64(rng: &mut R) -> [u8; 64] { /// Generate a random `Vec` with a random length between 1 and 128. pub fn random_vec_u8(rng: &mut R) -> Vec { let len = (rng.next_u32() % 128) as usize + 1; + random_vec_of_len(rng, len) +} + +/// Generate a random byte vector of a specific length. +pub fn random_vec_of_len(rng: &mut R, len: usize) -> Vec { let mut bytes = vec![0u8; len]; rng.fill_bytes(&mut bytes); bytes From 5b04ae07f292a91868562d3ab11601078c0da964 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Thu, 9 Apr 2026 10:53:00 -0400 Subject: [PATCH 49/71] chore(coordinator/tributary): initial self-review by comparing changes --- coordinator/tributary/src/db.rs | 10 +- coordinator/tributary/src/tests/db.rs | 58 +- coordinator/tributary/src/tests/mod.rs | 6 +- coordinator/tributary/src/tests/scan_block.rs | 591 +++++++----------- .../tributary/src/tests/transaction.rs | 1 - coordinator/tributary/src/transaction.rs | 63 +- substrate/primitives/Cargo.toml | 1 - substrate/primitives/src/test_helpers.rs | 51 +- 8 files changed, 349 insertions(+), 432 deletions(-) diff --git a/coordinator/tributary/src/db.rs b/coordinator/tributary/src/db.rs index cfbdfcc8b..92b50a556 100644 --- a/coordinator/tributary/src/db.rs +++ b/coordinator/tributary/src/db.rs @@ -52,6 +52,7 @@ pub(crate) enum Participating { } pub(crate) fn required_participation(n: u16) -> u16 { + // All of our topics require 2/3rds participation n.checked_mul(2).expect(&format!("required_participation overflowed: {n} * 2")) / 3 + 1 } @@ -110,8 +111,8 @@ impl Topic { pub(crate) fn sign_id(self, set: ExternalValidatorSet) -> Option { #[expect(clippy::match_same_arms)] match self { - Topic::Sign { id, attempt, round: _ } => Some(SignId { session: set.session, id, attempt }), Topic::RemoveParticipant { .. } | Topic::DkgConfirmation { .. } | Topic::SlashReport => None, + Topic::Sign { id, attempt, round: _ } => Some(SignId { session: set.session, id, attempt }), } } @@ -337,7 +338,8 @@ impl TributaryDb { ); } pub(crate) fn finish_cosigning(txn: &mut impl DbTxn, set: ExternalValidatorSet) { - ActivelyCosigning::take(txn, set).expect("finished cosigning but wasn't cosigning"); + ActivelyCosigning::take(txn, set) + .expect("tried to finish cosigning but wasn't actively cosigning"); } pub(crate) fn mark_cosigned( txn: &mut impl DbTxn, @@ -401,9 +403,9 @@ impl TributaryDb { txn: &mut impl DbTxn, set: ExternalValidatorSet, validator: SeraiAddress, - #[cfg_attr(coverage, allow(unused_variables))] reason: &str, + _reason: &str, ) { - serai_env::warn!("{validator} fatally slashed: {reason}"); + serai_env::warn!("{validator} fatally slashed: {_reason}"); SlashPoints::set(txn, set, validator, &u32::MAX); } diff --git a/coordinator/tributary/src/tests/db.rs b/coordinator/tributary/src/tests/db.rs index 3bb1d94fd..1bb02db71 100644 --- a/coordinator/tributary/src/tests/db.rs +++ b/coordinator/tributary/src/tests/db.rs @@ -147,10 +147,16 @@ mod required_participation_tests { } #[test] - #[should_panic = "overflowed"] fn panics_on_overflow() { - // u16::MAX * 2 overflows u16 - required_participation(u16::MAX); + let res = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + required_participation(u16::MAX); + })); + assert!(res.is_err()); + + let res = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + required_participation((u16::MAX / 2) + 1); + })); + assert!(res.is_err()); } } @@ -910,7 +916,7 @@ mod tributary_db { topic, validator, validator_weight, - &vec![1, 2, 3], + &random_vec_u8(&mut OsRng), ); // Second call with same (validator, topic) should panic @@ -923,7 +929,7 @@ mod tributary_db { topic, validator, validator_weight, - &vec![4, 5, 6], + &random_vec_u8(&mut OsRng), ); } @@ -934,7 +940,7 @@ mod tributary_db { fn double_call_after_threshold_with_reattempt_panics() { // DkgConfirmation Preprocess has a reattempt topic, so entries survive post-threshold let topic = Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Preprocess }; - let (set, _validator, validators, total_weight, _validator_weight) = + let (set, validator, validators, total_weight, validator_weight) = default_accumulate_setup(); let mut db = MemDb::new(); let mut txn = db.txn(); @@ -961,9 +967,9 @@ mod tributary_db { total_weight, block_number, topic, - validators[0], - 1, - &vec![99], + validator, + validator_weight, + &random_vec_u8(&mut OsRng), ); } @@ -974,7 +980,7 @@ mod tributary_db { fn double_call_after_threshold_without_reattempt_is_nop() { // RemoveParticipant has no reattempt, so entries are cleaned up post-threshold let topic = Topic::RemoveParticipant { participant: random_serai_address(&mut OsRng) }; - let (set, _validator, validators, total_weight, _validator_weight) = + let (set, validator, validators, total_weight, validator_weight) = default_accumulate_setup(); let mut db = MemDb::new(); let mut txn = db.txn(); @@ -1002,9 +1008,9 @@ mod tributary_db { total_weight, block_number, topic, - validators[0], - 1, - &vec![99], + validator, + validator_weight, + &random_vec_u8(&mut OsRng), ); assert!(matches!(result, DataSet::None), "should be NOP after threshold"); @@ -1044,7 +1050,7 @@ mod tributary_db { let post_slashed = TributaryDb::is_fatally_slashed(db, set, validator); let post_weight = AccumulatedWeight::get(db, set, topic); - // Branch 1: Slash for participating in unrecognized topic requiring recognition. + // Slash for participating in unrecognized topic requiring recognition. if topic.requires_recognition() && pre_weight.is_none() { assert!(post_slashed, "should be fatally slashed for unrecognized topic"); assert!(matches!(result, DataSet::None)); @@ -1058,7 +1064,7 @@ mod tributary_db { let weight_before = pre_weight.unwrap_or(0); - // Branch 2: Slash for participating without completing the preceding topic. + // Slash for participating without completing the preceding topic. if topic.preceding_topic().is_some() && !has_preceding_accumulated { assert!(post_slashed, "should be fatally slashed for missing preceding participation"); assert!(matches!(result, DataSet::None)); @@ -1066,7 +1072,7 @@ mod tributary_db { return; } - // Branch 3: Already accumulated past the threshold - NOP. + // Already accumulated past the threshold - NOP. if weight_before >= required { assert!(matches!(result, DataSet::None)); assert_eq!(post_weight, pre_weight, "weight unchanged when past threshold"); @@ -1076,7 +1082,7 @@ mod tributary_db { return; } - // Branch 5: Old attempt - the next attempt's topic already has weight. + // Old attempt, the next attempt's topic already has weight. // Note: pre_weight may be None (topic not yet recognized) which is preserved. let next_attempt_superseded = has_next_topic_weight && topic.next_attempt_topic().is_some(); if next_attempt_superseded { @@ -1088,7 +1094,7 @@ mod tributary_db { return; } - // Accumulation happened (Branches 6 & 7) + // Accumulation happened let new_weight = weight_before + validator_weight; assert_eq!(post_weight, Some(new_weight), "weight should reflect accumulation"); @@ -1097,9 +1103,9 @@ mod tributary_db { } if new_weight >= required { - // Branch 7: Threshold crossed. + // Threshold crossed. - // 7a: Reattempt should be queued if topic is reattemptable. + // Reattempt should be queued if topic is reattemptable. if let Some((reattempt_attempt, reattempt_topic)) = topic.reattempt_topic() { let blocks_till = u64::from(reattempt_attempt) .checked_mul(u64::from(BASE_REATTEMPT_DELAY)) @@ -1115,7 +1121,7 @@ mod tributary_db { ); } - // 7b: Succeeding topic should be recognized (weight set to 0). + // Succeeding topic should be recognized (weight set to 0). if let Some(succeeding) = topic.succeeding_topic() { assert_eq!( AccumulatedWeight::get(db, set, succeeding), @@ -1124,7 +1130,7 @@ mod tributary_db { ); } - // 7c: Accumulated data cleanup depends on whether a reattempt exists. + // Accumulated data cleanup depends on whether a reattempt exists. // The cleanup loop only iterates the `validators` slice, so data for a validator // not in the list is never deleted regardless of reattempt status. let has_reattempt = topic.reattempt_topic().is_some(); @@ -1141,10 +1147,7 @@ mod tributary_db { ); } - // 7d: Result depends on whether the validator was in the collection list. - // The collection loop only gathers data from the `validators` slice. - // `participated` = data_set.contains_key(&validator), which is false when - // the validator is not in the slice. + // Result depends on whether the validator was in the collection list. if validator_in_list { match result { DataSet::Participating(data_set) => { @@ -1186,7 +1189,8 @@ mod tributary_db { } } } else { - // Branch 6: Below threshold - data stored, result is None. + // Below threshold + // data stored, result is None. assert!(matches!(result, DataSet::None), "result should be None when below threshold"); assert_eq!( Accumulated::>::get(db, set, topic, validator), diff --git a/coordinator/tributary/src/tests/mod.rs b/coordinator/tributary/src/tests/mod.rs index 6ad3cfd55..5c94bf85a 100644 --- a/coordinator/tributary/src/tests/mod.rs +++ b/coordinator/tributary/src/tests/mod.rs @@ -231,7 +231,7 @@ pub(crate) fn make_signed_message_bytes(sender: [u8; 32]) -> Vec { /// Drain expected messages produced by the given transactions, then assert both queues are empty. /// -/// Some transactions produce messages on first submission (e.g. DkgParticipation, Cosign). +/// Some transactions produce messages on first submission (DkgParticipation, Cosign, SlashReport). /// This function drains those expected messages before calling `assert_no_pending_messages`. pub(crate) fn assert_block_side_effects( txn: &mut impl serai_db::DbTxn, @@ -296,7 +296,7 @@ pub(crate) fn new_test_set_info(validators: &[(SeraiAddress, u16)]) -> NewSetInf /// Generate `n` random validators (weight 1 each) with keys, returning all derived collections. pub(crate) fn setup_n_validators_with_keys( - n: usize, + n: u16, ) -> ( Vec<(RistrettoPoint, SeraiAddress)>, Vec<(SeraiAddress, u16)>, @@ -310,7 +310,7 @@ pub(crate) fn setup_n_validators_with_keys( keys_addrs.iter().map(|(_, addr)| (*addr, 1u16)).collect(); let validators: Vec = validator_data.iter().map(|(a, _)| *a).collect(); let weights: HashMap = validator_data.iter().copied().collect(); - let total_weight = n as u16; + let total_weight = n; (keys_addrs, validator_data, validators, weights, total_weight) } diff --git a/coordinator/tributary/src/tests/scan_block.rs b/coordinator/tributary/src/tests/scan_block.rs index cc399285d..b01cbe8fc 100644 --- a/coordinator/tributary/src/tests/scan_block.rs +++ b/coordinator/tributary/src/tests/scan_block.rs @@ -9,11 +9,7 @@ use tributary_sdk::{ Block, BlockHeader, Transaction as TributaryTransaction, Evidence, tendermint::tx::TendermintTx, }; -use crate::{ - CosignIntents, DkgConfirmationMessages, ProcessorMessages, RecognizedTopics, ScanBlock, - SubstrateBlockPlans, db::CosignIntents as DbCosignIntents, -}; - +use crate::{*, db::CosignIntents as DbCosignIntents}; use super::*; fn new_scan_block<'a, TDT: DbTxn>( @@ -75,7 +71,7 @@ fn potentially_start_cosign() { assert_eq!(TributaryDb::actively_cosigning(&mut txn, set), Some(initial_block_hash)); } - // No TributaryDb::latest_substrate_block_to_cosign block: no-op + // No TributaryDb::latest_substrate_block_to_cosign block: nop { let mut db = MemDb::new(); let mut txn = db.txn(); @@ -86,7 +82,7 @@ fn potentially_start_cosign() { assert!(TributaryDb::actively_cosigning(&mut txn, set).is_none()); } - // Already cosigned: no-op + // Already cosigned: nop { let mut db = MemDb::new(); let initial_block_hash = random_block_hash(&mut OsRng); @@ -237,10 +233,9 @@ fn accumulate_dkg_confirmation() { assert_eq!(data_set[&Participant::new(3).unwrap()], data3); } - // Past threshold: further accumulations from a new validator are no-ops + // Past threshold: further accumulations from a new validator are nops { // Add a 4th validator so we have a fresh signer after threshold is crossed. - // total_weight=4, required_participation = 3, so v0+v1+v2 cross threshold. let v4 = random_serai_address(&mut OsRng); let mut validator_data_4 = validator_data.clone(); validator_data_4.push((v4, 1)); @@ -735,115 +730,66 @@ mod handle_application_tx { use super::*; #[test] - fn odd_slash_points() { - let set = default_test_validator_set(); - let (keys_addrs, validator_data, validators, weights, total_weight) = - setup_test_validators_and_weights_with_keys(); - let set_info = new_test_set_info(&validator_data); - let (key0, addr0) = keys_addrs[0]; - - // Wrong length: 3 validators but mismatched slash points -> fatal slash - for wrong_points in [ - vec![], - // 1 or 2 - vec![0; 1 + (OsRng.next_u32() as usize % 2)], - // 4..100 - vec![0; 4 + (OsRng.next_u32() as usize % 97)], - ] { - let mut db = MemDb::new(); - let mut txn = db.txn(); - - { - let mut scan_block = - new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.handle_application_tx( - random_block_number(&mut OsRng), - Transaction::SlashReport { - slash_points: wrong_points.clone(), - signed: new_signed(key0), - }, - ); - } - assert!( - TributaryDb::is_fatally_slashed(&mut txn, set, addr0), - "expected fatal slash for slash_points length {}, but wasn't slashed", - wrong_points.len() - ); + fn wrong_length() { + let num_validators = OsRng.gen_range(4u16 .. 10); + let mut wrong_len = OsRng.gen_range(1u16 .. 20); + if wrong_len == num_validators { + wrong_len = if wrong_len == 1 { 2 } else { wrong_len - 1 }; } + let set = default_test_validator_set(); + let (keys_addrs, validator_data, validators, weights, total_weight) = - setup_n_validators_with_keys(4); + setup_n_validators_with_keys(num_validators); let set_info = new_test_set_info(&validator_data); let mut db = MemDb::new(); let mut txn = db.txn(); - // Valid length: accumulates weight + let (signer_key, signer_addr) = keys_addrs[0]; + { let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_application_tx( random_block_number(&mut OsRng), Transaction::SlashReport { - slash_points: vec![0, 0, 0, 100], - signed: new_signed(keys_addrs[0].0), + slash_points: vec![0; wrong_len as usize], + signed: new_signed(signer_key), }, ); } - assert!(RecognizedTopics::recognized(&mut txn, set, Topic::SlashReport)); - assert!(ProcessorMessages::try_recv(&mut txn, set).is_none()); - // Threshold crossed: computes median slash report and sends SignSlashReport message - { - let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, 4, &weights); - for key in [keys_addrs[1].0, keys_addrs[2].0] { - scan_block.handle_application_tx( - random_block_number(&mut OsRng), - // Each reporter says: first 3 validators have 0 points, 4th has 100 - Transaction::SlashReport { slash_points: vec![0, 0, 0, 100], signed: new_signed(key) }, - ); - } - } - let sign_topic = expected_initially_recognized_sign_topic(VariantSignId::SlashReport); - assert!(RecognizedTopics::recognized(&mut txn, set, sign_topic,)); - assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); + assert!( + TributaryDb::is_fatally_slashed(&mut txn, set, signer_addr), + "signer should be fatally slashed for wrong-length slash report", + ); + assert!( + ProcessorMessages::try_recv(&mut txn, set).is_none(), + "no message should be sent for wrong-length slash report", + ); } - /// Exercises the even-length median branch (`(this_validator.len() / 2) - 1`) in - /// the SlashReport handler by using 5 validators where `required_participation = 4` (even). #[test] - fn even_slash_points() { - let set = default_test_validator_set(); + fn fatal_slash_as_reported_median() { + let num_validators = OsRng.gen_range(4u16 .. 10); + let num_reports = required_participation(num_validators) as usize; - // 5 validators of weight 1 -> required_participation = 5*2/3+1 = 4 - let (keys_addrs, validator_data, validators, weights, _) = setup_n_validators_with_keys(5); + let set = default_test_validator_set(); + let (keys_addrs, validator_data, validators, weights, total_weight) = + setup_n_validators_with_keys(num_validators); let set_info = new_test_set_info(&validator_data); + let report = vec![u32::MAX, 0, 0, 0]; + let reports: Vec> = vec![report; num_reports]; + let mut db = MemDb::new(); let mut txn = db.txn(); - // 4 reporters submit different opinions about validator 4 (index 4). - // Reports (for all 5 validator positions): - // reporter 0: [0, 0, 0, 0, 10] - // reporter 1: [0, 0, 0, 0, 20] - // reporter 2: [0, 0, 0, 0, 30] - // reporter 3: [0, 0, 0, 0, 40] - // - // Sorted values for validator 4: [10, 20, 30, 40] (len=4, even) - // Even median index: (4 / 2) - 1 = 1 -> median = 20 - // - // f = (5-1)/3 = 1, amortization baseline = sorted_medians[5-1-1] = sorted_medians[3] = 0 - // amortized: [0, 0, 0, 0, 20]. Non-zero entries: [20] for validator 4. - let slash_reports = vec![ - vec![0u32, 0, 0, 0, 10], - vec![0, 0, 0, 0, 20], - vec![0, 0, 0, 0, 30], - vec![0, 0, 0, 0, 40], - ]; - { - let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, 5, &weights); - for (i, report) in slash_reports.iter().enumerate() { + let mut scan_block = + new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + for (i, report) in reports.iter().enumerate() { let (key, _) = keys_addrs[i]; scan_block.handle_application_tx( random_block_number(&mut OsRng), @@ -852,16 +798,9 @@ mod handle_application_tx { } } - // Threshold was crossed with 4 reporters (even) -> even median branch exercised. - // Verify the signing topic was recognized and a message was sent. - let sign_topic = expected_initially_recognized_sign_topic(VariantSignId::SlashReport); - assert!( - RecognizedTopics::recognized(&mut txn, set, sign_topic), - "SlashReport sign topic should be recognized" - ); - + // A ProcessorMessage should be produced containing a Fatal slash let msg = ProcessorMessages::try_recv(&mut txn, set); - assert!(msg.is_some(), "expected SignSlashReport processor message"); + assert!(msg.is_some(), "expected ProcessorMessage for fatal slash report"); } mod fuzz_slash_report { @@ -871,15 +810,15 @@ mod handle_application_tx { /// produce when `DataSet::Participating` is reached, mirroring the production logic. /// /// Returns `None` if `f == 0` (the slash report would be empty and nothing is sent). - fn expected_slash_report(num_validators: usize, reports: &[Vec]) -> Option> { + fn expected_slash_report(num_validators: u16, reports: &[Vec]) -> Option> { let f = (num_validators - 1) / 3; if f == 0 { return None; } // Compute the median for each validator position across all reporters - let mut medians = Vec::with_capacity(num_validators); - for i in 0 .. num_validators { + let mut medians = Vec::with_capacity(num_validators as usize); + for i in 0 .. usize::from(num_validators) { let mut values: Vec = reports.iter().map(|r| r[i]).collect(); values.sort_unstable(); let median_index = @@ -890,7 +829,7 @@ mod handle_application_tx { // Find worst validator in the supermajority and amortize let mut sorted = medians.clone(); sorted.sort_unstable(); - let amortization = sorted[num_validators - f - 1]; + let amortization = sorted[usize::from(num_validators - f - 1)]; let amortized: Vec = medians.iter().map(|p| p.saturating_sub(amortization)).collect(); @@ -899,64 +838,42 @@ mod handle_application_tx { Some(result) } - /// Generate a random slash point value drawn from a weighted distribution: - /// ~30% zero, ~20% small (1..100), ~20% medium (100..10_000), ~30% fatal (u32::MAX). - /// The high fatal weight ensures the median across reporters frequently lands on u32::MAX, - /// exercising the Slash::Fatal branch after amortization. - fn random_slash_point(rng: &mut impl Rng) -> u32 { - match rng.gen_range(0u8 .. 10) { - 0 ..= 2 => 0, - 3 ..= 4 => rng.gen_range(1 .. 100), - 5 ..= 6 => rng.gen_range(100 .. 10_000), - _ => u32::MAX, - } - } - /// Generate `count` slash report vectors, each of length `num_validators`. fn random_slash_reports( rng: &mut impl Rng, - num_validators: usize, - count: usize, + num_validators: u16, + count: u16, ) -> Vec> { - (0 .. count) - .map(|_| (0 .. num_validators).map(|_| random_slash_point(rng)).collect()) - .collect() + (0 .. count).map(|_| (0 .. num_validators).map(|_| rng.next_u32()).collect()).collect() } - /// Fuzz the SlashReport -> Participating path with randomized slash point vectors. - /// - /// Uses 4 validators (f=1) so the threshold-crossing path is reachable. - /// All 4 submit identical reports so the median equals the input. #[test] - fn fuzz_slash_report_participating_4_validators() { + fn fuzz_slash_report_even_validators() { for _ in 0 .. 200 { + // random even: 4, 6, 8, or 10 + let n = OsRng.gen_range(2u16 ..= 5) * 2; + let num_reports = required_participation(n as u16); + let set = default_test_validator_set(); let (keys_addrs, validator_data, validators, weights, total_weight) = - setup_n_validators_with_keys(4); + setup_n_validators_with_keys(n); let set_info = new_test_set_info(&validator_data); - let slash_points: Vec = (0 .. 4).map(|_| random_slash_point(&mut OsRng)).collect(); + let reports = random_slash_reports(&mut OsRng, n, num_reports); + let expected = expected_slash_report(n, &reports); let mut db = MemDb::new(); let mut txn = db.txn(); - // All 4 validators submit the same slash_points - // required_participation = 4*2/3+1 = 3, so 3 cross the threshold. - // The 4th submission is a NOP (past threshold). - let reports: Vec> = vec![slash_points.clone(); 3]; - let expected = expected_slash_report(4, &reports); - { let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - for (key, _) in &keys_addrs { + for (i, report) in reports.iter().enumerate() { + let (key, _) = keys_addrs[i]; scan_block.handle_application_tx( - 1, - Transaction::SlashReport { - slash_points: slash_points.clone(), - signed: new_signed(*key), - }, + random_block_number(&mut OsRng), + Transaction::SlashReport { slash_points: report.clone(), signed: new_signed(key) }, ); } } @@ -969,9 +886,6 @@ mod handle_application_tx { ); } _ => { - // Empty or f==0 -> slash report is empty, nothing to sign. - // The handler always sends a message when Participating is reached - // (assert passes with len=0 <= f=1). assert!( ProcessorMessages::try_recv(&mut txn, set).is_some(), "expected ProcessorMessage even for empty slash report", @@ -987,32 +901,33 @@ mod handle_application_tx { } } - /// Fuzz with varying reporter opinions (not all identical). - /// Uses 7 validators (f=2) for a richer median calculation. #[test] - fn fuzz_slash_report_diverse_opinions_7_validators() { + fn fuzz_slash_report_odd_validators() { for _ in 0 .. 200 { + // random odd: 5, 7, 9, or 11 + let n = OsRng.gen_range(2u16 ..= 5) * 2 + 1; + let f = usize::from((n - 1) / 3); + let num_reports = required_participation(n); + let set = default_test_validator_set(); - // 7 validators, f = (7-1)/3 = 2 - let (keys_addrs, validator_data, validators, weights, _) = - setup_n_validators_with_keys(7); + let (keys_addrs, validator_data, validators, weights, total_weight) = + setup_n_validators_with_keys(n); let set_info = new_test_set_info(&validator_data); - // required_participation = 7*2/3+1 = 5 - // We have 5 reports from 5 different validators to cross the threshold - let reports = random_slash_reports(&mut OsRng, 7, 5); - let expected = expected_slash_report(7, &reports[.. 5]); + let reports = random_slash_reports(&mut OsRng, n, num_reports); + let expected = expected_slash_report(n, &reports); let mut db = MemDb::new(); let mut txn = db.txn(); { - let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, 7, &weights); + let mut scan_block = + new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); for (i, report) in reports.iter().enumerate() { let (key, _) = keys_addrs[i]; scan_block.handle_application_tx( - 1, + random_block_number(&mut OsRng), Transaction::SlashReport { slash_points: report.clone(), signed: new_signed(key) }, ); } @@ -1020,7 +935,7 @@ mod handle_application_tx { match expected { Some(result) => { - assert!(result.len() <= 2, "slash report len {} should be <= f=2", result.len()); + assert!(result.len() <= f, "slash report len {} should be <= f={f}", result.len()); } None => { unreachable!(); @@ -1032,51 +947,6 @@ mod handle_application_tx { assert!(RecognizedTopics::recognized(&mut txn, set, sign_topic)); } } - - /// Fuzz the wrong-length path: slash_points.len() != validators.len() -> fatal slash - #[test] - fn fuzz_slash_report_wrong_length() { - for _ in 0 .. 200 { - let num_validators = OsRng.gen_range(4usize .. 10); - let mut wrong_len = OsRng.gen_range(1usize .. 20); - if wrong_len == num_validators { - // Shift to guarantee mismatch - wrong_len = if wrong_len == 1 { 2 } else { wrong_len - 1 }; - } - - let set = default_test_validator_set(); - - let (keys_addrs, validator_data, validators, weights, total_weight) = - setup_n_validators_with_keys(num_validators); - let set_info = new_test_set_info(&validator_data); - - let mut db = MemDb::new(); - let mut txn = db.txn(); - - let (signer_key, signer_addr) = keys_addrs[0]; - - { - let mut scan_block = - new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.handle_application_tx( - 1, - Transaction::SlashReport { - slash_points: vec![0; wrong_len], - signed: new_signed(signer_key), - }, - ); - } - - assert!( - TributaryDb::is_fatally_slashed(&mut txn, set, signer_addr), - "signer should be fatally slashed for wrong-length slash report", - ); - assert!( - ProcessorMessages::try_recv(&mut txn, set).is_none(), - "no message should be sent for wrong-length slash report", - ); - } - } } } @@ -1140,7 +1010,7 @@ mod handle_application_tx { } } - /// Exercises the Sign Share -> Participating path (line 559: `Shares { id, shares: data_set }`). + /// Exercises the Sign Share -> Participating path. /// Requires first accumulating preprocesses to threshold (which recognizes the Share topic /// and stores preceding data), then accumulating shares to threshold. #[test] @@ -1215,200 +1085,193 @@ mod handle_application_tx { } } -mod handle_block { - use super::*; +#[test] +fn handle_block() { + let set = default_test_validator_set(); + let (keys_addrs, validator_data, validators, weights, total_weight) = + setup_n_validators_with_keys(3); + let set_info = new_test_set_info(&validator_data); + let addr0 = validator_data[0].0; + let signed = new_signed(keys_addrs[0].0); - #[test] - fn handles_all_transaction_types() { - let set = default_test_validator_set(); - let (keys_addrs, validator_data, validators, weights, total_weight) = - setup_n_validators_with_keys(3); - let set_info = new_test_set_info(&validator_data); - let addr0 = validator_data[0].0; - let signed = new_signed(keys_addrs[0].0); + // Empty block only calls start of block + { + let mut db = MemDb::new(); + let mut txn = db.txn(); + let block = Block { + header: BlockHeader { + parent: random_bytes_32(&mut OsRng), + transactions: random_bytes_32(&mut OsRng), + }, + transactions: vec![], + }; - // Empty block only calls start of block { - let mut db = MemDb::new(); - let mut txn = db.txn(); - let block = Block { - header: BlockHeader { - parent: random_bytes_32(&mut OsRng), - transactions: random_bytes_32(&mut OsRng), - }, - transactions: vec![], - }; - - { - let scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.handle_block(random_block_number(&mut OsRng), block); - } - assert_no_pending_messages(&mut txn, set); + let scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.handle_block(random_block_number(&mut OsRng), block); } + assert_no_pending_messages(&mut txn, set); + } - // Each application transaction type passes through handle_block. - // Signed transactions use a real validator key so participant_indexes lookups succeed. - // Cosign and SubstrateBlock need external state populated before they can run. - for tx in all_signed_transactions_and_attempts(signed) { - let mut db = MemDb::new(); - let mut txn = db.txn(); + // Each application transaction type passes through handle_block. + // Signed transactions use a real validator key so participant_indexes lookups succeed. + // Cosign and SubstrateBlock need external state populated before they can run. + for tx in all_signed_transactions_and_attempts(signed) { + let mut db = MemDb::new(); + let mut txn = db.txn(); - let block_txs = vec![TributaryTransaction::Application(tx)]; - let block = Block { - header: BlockHeader { - parent: random_bytes_32(&mut OsRng), - transactions: random_bytes_32(&mut OsRng), - }, - transactions: block_txs.clone(), - }; + let block_txs = vec![TributaryTransaction::Application(tx)]; + let block = Block { + header: BlockHeader { + parent: random_bytes_32(&mut OsRng), + transactions: random_bytes_32(&mut OsRng), + }, + transactions: block_txs.clone(), + }; - { - let scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.handle_block(random_block_number(&mut OsRng), block); - } - assert_block_side_effects(&mut txn, set, &block_txs); + { + let scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.handle_block(random_block_number(&mut OsRng), block); } + assert_block_side_effects(&mut txn, set, &block_txs); + } - // Provided transactions that need preconditions - for tx in all_provided_transactions() { - let mut db = MemDb::new(); - let mut txn = db.txn(); + // Provided transactions that need preconditions + for tx in all_provided_transactions() { + let mut db = MemDb::new(); + let mut txn = db.txn(); - // Set up required external state - match &tx { - Transaction::Cosign { substrate_block_hash } => { - CosignIntents::provide( - &mut txn, - set, - &CosignIntent { - global_session: random_bytes_32(&mut OsRng), - block_number: random_block_number(&mut OsRng), - block_hash: *substrate_block_hash, - notable: false, - }, - ); - } - Transaction::SubstrateBlock { hash } => { - let plans = vec![random_bytes_32(&mut OsRng)]; - SubstrateBlockPlans::set(&mut txn, set, *hash, &plans); - } - _ => {} + // Set up required external state + match &tx { + Transaction::Cosign { substrate_block_hash } => { + CosignIntents::provide( + &mut txn, + set, + &CosignIntent { + global_session: random_bytes_32(&mut OsRng), + block_number: random_block_number(&mut OsRng), + block_hash: *substrate_block_hash, + notable: false, + }, + ); } - - let block_txs = vec![TributaryTransaction::Application(tx)]; - let block = Block { - header: BlockHeader { - parent: random_bytes_32(&mut OsRng), - transactions: random_bytes_32(&mut OsRng), - }, - transactions: block_txs.clone(), - }; - - { - let scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.handle_block(random_block_number(&mut OsRng), block); + Transaction::SubstrateBlock { hash } => { + let plans = vec![random_bytes_32(&mut OsRng)]; + SubstrateBlockPlans::set(&mut txn, set, *hash, &plans); } - assert_block_side_effects(&mut txn, set, &block_txs); + _ => {} } - // Each Tendermint SlashEvidence type fatally slashes the sender - { - let all_evidence = [ - Evidence::InvalidPrecommit(make_signed_message_bytes(addr0.0)), - Evidence::InvalidValidRound(make_signed_message_bytes(addr0.0)), - Evidence::ConflictingMessages( - make_signed_message_bytes(addr0.0), - make_signed_message_bytes(addr0.0), - ), - ]; - - for evidence in all_evidence { - let mut db = MemDb::new(); - let mut txn = db.txn(); - - let block = Block { - header: BlockHeader { - parent: random_bytes_32(&mut OsRng), - transactions: random_bytes_32(&mut OsRng), - }, - transactions: vec![TributaryTransaction::Tendermint(TendermintTx::SlashEvidence( - evidence, - ))], - }; - - { - let scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.handle_block(1, block); - } - assert!( - TributaryDb::is_fatally_slashed(&txn, set, addr0), - "SlashEvidence should fatally slash the sender", - ); + let block_txs = vec![TributaryTransaction::Application(tx)]; + let block = Block { + header: BlockHeader { + parent: random_bytes_32(&mut OsRng), + transactions: random_bytes_32(&mut OsRng), + }, + transactions: block_txs.clone(), + }; - assert_no_pending_messages(&mut txn, set); - txn.commit(); - } + { + let scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.handle_block(random_block_number(&mut OsRng), block); } + assert_block_side_effects(&mut txn, set, &block_txs); + } - // Fuzz mixed blocks with random quantities, types, and ordering - for _ in 0 .. 100 { + // Each Tendermint SlashEvidence type fatally slashes the sender + { + let all_evidence = [ + Evidence::InvalidPrecommit(make_signed_message_bytes(addr0.0)), + Evidence::InvalidValidRound(make_signed_message_bytes(addr0.0)), + Evidence::ConflictingMessages( + make_signed_message_bytes(addr0.0), + make_signed_message_bytes(addr0.0), + ), + ]; + + for evidence in all_evidence { let mut db = MemDb::new(); let mut txn = db.txn(); - let num_txs = OsRng.gen_range(1usize ..= 8); - let mut transactions = Vec::with_capacity(num_txs); - let mut has_evidence = false; - let mut batch_hashes = vec![]; - - for _ in 0 .. num_txs { - if OsRng.gen_bool(0.5) { - // Random Tendermint evidence type - let evidence = match OsRng.gen_range(0u8 .. 3) { - 0 => Evidence::InvalidPrecommit(make_signed_message_bytes(addr0.0)), - 1 => Evidence::InvalidValidRound(make_signed_message_bytes(addr0.0)), - _ => Evidence::ConflictingMessages( - make_signed_message_bytes(addr0.0), - make_signed_message_bytes(addr0.0), - ), - }; - transactions - .push(TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(evidence))); - has_evidence = true; - } else { - // Random application transaction, use Batch so we can assert recognition - let hash = random_bytes_32(&mut OsRng); - batch_hashes.push(hash); - transactions.push(TributaryTransaction::Application(Transaction::Batch { hash })); - } - } - let block = Block { header: BlockHeader { parent: random_bytes_32(&mut OsRng), transactions: random_bytes_32(&mut OsRng), }, - transactions: transactions.clone(), + transactions: vec![TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(evidence))], }; { let scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.handle_block(random_block_number(&mut OsRng), block); + scan_block.handle_block(1, block); } + assert!( + TributaryDb::is_fatally_slashed(&txn, set, addr0), + "SlashEvidence should fatally slash the sender", + ); - if has_evidence { - assert!( - TributaryDb::is_fatally_slashed(&txn, set, addr0), - "SlashEvidence should fatally slash the sender in mixed blocks", - ); - } - for hash in &batch_hashes { - let topic = expected_initially_recognized_sign_topic(VariantSignId::Batch(*hash)); - assert!( - RecognizedTopics::recognized(&txn, set, topic), - "Batch should be recognized regardless of other txs in the block", - ); + assert_no_pending_messages(&mut txn, set); + txn.commit(); + } + } + + // Fuzz mixed blocks with random quantities, types, and ordering + for _ in 0 .. 100 { + let mut db = MemDb::new(); + let mut txn = db.txn(); + + let num_txs = OsRng.gen_range(1usize ..= 8); + let mut transactions = Vec::with_capacity(num_txs); + let mut has_evidence = false; + let mut batch_hashes = vec![]; + + for _ in 0 .. num_txs { + if OsRng.gen_bool(0.5) { + // Random Tendermint evidence type + let evidence = match OsRng.gen_range(0u8 .. 3) { + 0 => Evidence::InvalidPrecommit(make_signed_message_bytes(addr0.0)), + 1 => Evidence::InvalidValidRound(make_signed_message_bytes(addr0.0)), + _ => Evidence::ConflictingMessages( + make_signed_message_bytes(addr0.0), + make_signed_message_bytes(addr0.0), + ), + }; + transactions.push(TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(evidence))); + has_evidence = true; + } else { + // Random application transaction, use Batch so we can assert recognition + let hash = random_bytes_32(&mut OsRng); + batch_hashes.push(hash); + transactions.push(TributaryTransaction::Application(Transaction::Batch { hash })); } - assert_block_side_effects(&mut txn, set, &transactions); } + + let block = Block { + header: BlockHeader { + parent: random_bytes_32(&mut OsRng), + transactions: random_bytes_32(&mut OsRng), + }, + transactions: transactions.clone(), + }; + + { + let scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); + scan_block.handle_block(random_block_number(&mut OsRng), block); + } + + if has_evidence { + assert!( + TributaryDb::is_fatally_slashed(&txn, set, addr0), + "SlashEvidence should fatally slash the sender in mixed blocks", + ); + } + for hash in &batch_hashes { + let topic = expected_initially_recognized_sign_topic(VariantSignId::Batch(*hash)); + assert!( + RecognizedTopics::recognized(&txn, set, topic), + "Batch should be recognized regardless of other txs in the block", + ); + } + assert_block_side_effects(&mut txn, set, &transactions); } } diff --git a/coordinator/tributary/src/tests/transaction.rs b/coordinator/tributary/src/tests/transaction.rs index 73d570d89..6e1cea134 100644 --- a/coordinator/tributary/src/tests/transaction.rs +++ b/coordinator/tributary/src/tests/transaction.rs @@ -242,7 +242,6 @@ mod transaction { SigningProtocolRound::Preprocess => expected.push(0u8), SigningProtocolRound::Share => expected.push(1u8), } - // Use the RoundPayloads type of Vec to fit for both rounds expected.extend(&(data.len() as u32).to_le_bytes()); for d in data { expected.extend(&(d.len() as u32).to_le_bytes()); diff --git a/coordinator/tributary/src/transaction.rs b/coordinator/tributary/src/transaction.rs index 5065f235c..03d22609f 100644 --- a/coordinator/tributary/src/transaction.rs +++ b/coordinator/tributary/src/transaction.rs @@ -152,38 +152,39 @@ pub enum Transaction { substrate_block_hash: BlockHash, }, - // After producing this cosign, we need to start work on the latest intended-to-be cosigned - // block. That requires agreement on when this cosign was produced, which we solve by noting - // this cosign on-chain. - // - // We ideally don't have this transaction at all. The coordinator, without access to any of the - // key shares, could observe the FROST signing session and determine a successful completion. - // Unfortunately, that functionality is not present in modular-frost, so we do need to support - // *some* asynchronous flow (where the processor or P2P network informs us of the successful - // completion). - // - // If we use a `Provided` transaction, that requires everyone observe this cosign. - // - // If we use an `Unsigned` transaction, we can't verify the cosign signature inside - // `Transaction::verify` unless we embedded the full `SignedCosign` on-chain. The issue is since - // a Tributary is stateless with regards to the on-chain logic, including `Transaction::verify`, - // we can't verify the signature against the group's public key unless we also include that (but - // then we open a DoS where arbitrary group keys are specified to cause inclusion of arbitrary - // blobs on chain). - // - // If we use a `Signed` transaction, we mitigate the DoS risk by having someone to fatally - // slash. We have horrible performance though as for 100 validators, all 100 will publish this - // transaction. - // - // We could use a signed `Unsigned` transaction, where it includes a signer and signature but - // isn't technically a Signed transaction. This lets us de-duplicate the transaction premised on - // its contents. - // - // The optimal choice is likely to use a `Provided` transaction. We don't actually need to - // observe the produced cosign (which is ephemeral). As long as it's agreed the cosign in - // question no longer needs to produced, which would mean the cosigning protocol at-large - // cosigning the block in question, it'd be safe to provide this and move on to the next cosign. /// Note an intended-to-be-cosigned Substrate block as cosigned + /// + /// After producing this cosign, we need to start work on the latest intended-to-be cosigned + /// block. That requires agreement on when this cosign was produced, which we solve by noting + /// this cosign on-chain. + /// + /// We ideally don't have this transaction at all. The coordinator, without access to any of the + /// key shares, could observe the FROST signing session and determine a successful completion. + /// Unfortunately, that functionality is not present in modular-frost, so we do need to support + /// *some* asynchronous flow (where the processor or P2P network informs us of the successful + /// completion). + /// + /// If we use a `Provided` transaction, that requires everyone observe this cosign. + /// + /// If we use an `Unsigned` transaction, we can't verify the cosign signature inside + /// `Transaction::verify` unless we embedded the full `SignedCosign` on-chain. The issue is since + /// a Tributary is stateless with regards to the on-chain logic, including `Transaction::verify`, + /// we can't verify the signature against the group's public key unless we also include that (but + /// then we open a DoS where arbitrary group keys are specified to cause inclusion of arbitrary + /// blobs on chain). + /// + /// If we use a `Signed` transaction, we mitigate the DoS risk by having someone to fatally + /// slash. We have horrible performance though as for 100 validators, all 100 will publish this + /// transaction. + /// + /// We could use a signed `Unsigned` transaction, where it includes a signer and signature but + /// isn't technically a Signed transaction. This lets us de-duplicate the transaction premised on + /// its contents. + /// + /// The optimal choice is likely to use a `Provided` transaction. We don't actually need to + /// observe the produced cosign (which is ephemeral). As long as it's agreed the cosign in + /// question no longer needs to produced, which would mean the cosigning protocol at-large + /// cosigning the block in question, it'd be safe to provide this and move on to the next cosign. Cosigned { /// The hash of the Substrate block which was cosigned substrate_block_hash: BlockHash, diff --git a/substrate/primitives/Cargo.toml b/substrate/primitives/Cargo.toml index e9d53cc02..06cec625e 100644 --- a/substrate/primitives/Cargo.toml +++ b/substrate/primitives/Cargo.toml @@ -42,7 +42,6 @@ rand_core = { version = "0.6", default-features = false, features = ["std"] } bech32 = { version = "0.11", default-features = false, features = ["alloc"] } [features] -test-helpers = ["std"] std = [ "rand_core/std", diff --git a/substrate/primitives/src/test_helpers.rs b/substrate/primitives/src/test_helpers.rs index 9113bad50..047dee135 100644 --- a/substrate/primitives/src/test_helpers.rs +++ b/substrate/primitives/src/test_helpers.rs @@ -1,6 +1,6 @@ //! Test helpers for generating random instances of primitive types. -use alloc::vec; +use alloc::{vec, vec::Vec}; use rand_core::{RngCore, CryptoRng}; @@ -8,6 +8,8 @@ use crate::{ BlockHash, address::{SeraiAddress, ExternalAddress}, crypto::{Public, ExternalKey}, + network_id::ExternalNetworkId, + validator_sets::{ExternalValidatorSet, Session}, }; /// Generate a random 32-byte array. @@ -24,6 +26,19 @@ pub fn random_bytes_64(rng: &mut R) -> [u8; 64] { bytes } +/// Generate a random `Vec` with a random length between 1 and 128. +pub fn random_vec_u8(rng: &mut R) -> Vec { + let len = (rng.next_u32() % 128) as usize + 1; + random_vec_of_len(rng, len) +} + +/// Generate a random byte vector of a specific length. +pub fn random_vec_of_len(rng: &mut R, len: usize) -> Vec { + let mut bytes = vec![0u8; len]; + rng.fill_bytes(&mut bytes); + bytes +} + /// Generate a random [`ExternalAddress`]. pub fn random_external_address(rng: &mut R) -> ExternalAddress { let len = usize::try_from(rng.next_u32() % ExternalAddress::MAX_SIZE).unwrap(); @@ -75,3 +90,37 @@ fn random_external_key_is_in_range() { pub fn random_block_hash(rng: &mut R) -> BlockHash { BlockHash(random_bytes_32(rng)) } + +/// Generate a random global session ID (`[u8; 32]`). +pub fn random_global_session(rng: &mut R) -> [u8; 32] { + random_bytes_32(rng) +} + +/// Generate a random genesis +pub fn random_genesis(rng: &mut R) -> [u8; 32] { + random_bytes_32(rng) +} + +/// Generate a random block number. +pub fn random_block_number(rng: &mut R) -> u64 { + rng.next_u64() +} + +/// Generate a random [`ExternalNetworkId`]. +pub fn random_external_network_id(rng: &mut R) -> ExternalNetworkId { + let all: Vec<_> = ExternalNetworkId::all().collect(); + all[(rng.next_u32() as usize) % all.len()] +} + +/// Generate a random [`ExternalValidatorSet`]. +pub fn random_validator_set(rng: &mut R) -> ExternalValidatorSet { + ExternalValidatorSet { + network: random_external_network_id(rng), + session: Session(rng.next_u32()), + } +} + +/// A default [`ExternalValidatorSet`] for tests where the set value doesn't matter. +pub fn default_test_validator_set() -> ExternalValidatorSet { + ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) } +} From c40fa883adfce091db2b514712b3f4bb5439571d Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Thu, 9 Apr 2026 16:09:18 -0400 Subject: [PATCH 50/71] chore(coordinator/tributary): missing misc --- coordinator/tributary/src/tests/scan_block.rs | 3 ++- tests/substrate/Cargo.toml | 6 ------ 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/coordinator/tributary/src/tests/scan_block.rs b/coordinator/tributary/src/tests/scan_block.rs index b01cbe8fc..9a81b474f 100644 --- a/coordinator/tributary/src/tests/scan_block.rs +++ b/coordinator/tributary/src/tests/scan_block.rs @@ -780,7 +780,8 @@ mod handle_application_tx { setup_n_validators_with_keys(num_validators); let set_info = new_test_set_info(&validator_data); - let report = vec![u32::MAX, 0, 0, 0]; + let mut report = vec![0u32; num_validators as usize]; + report[0] = u32::MAX; let reports: Vec> = vec![report; num_reports]; let mut db = MemDb::new(); diff --git a/tests/substrate/Cargo.toml b/tests/substrate/Cargo.toml index 325bc4b61..5cb20c00d 100644 --- a/tests/substrate/Cargo.toml +++ b/tests/substrate/Cargo.toml @@ -23,9 +23,3 @@ tokio = { version = "1", features = ["time"] } dockertest = "0.5" serai-docker-tests = { path = "../docker" } - -rand_core = { version = "0.6", default-features = false, features = ["std"] } - -[dev-dependencies] -rand = { version = "0.8", default-features = false, features = ["std"] } -rand_chacha = { version = "0.3", default-features = false, features = ["std"] } From b1b2ccf00b3b3a42f13b60ef1e4700de4caeaf9e Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Fri, 10 Apr 2026 14:17:09 -0400 Subject: [PATCH 51/71] chore: ci alerts --- Cargo.lock | 768 ++++++++++++------ coordinator/tributary/src/db.rs | 3 +- .../tributary/src/tests/transaction.rs | 2 +- substrate/primitives/src/test_helpers.rs | 4 +- 4 files changed, 540 insertions(+), 237 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dc95fbe10..a2f55a05c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -110,7 +110,7 @@ dependencies = [ "auto_impl", "borsh", "c-kzg", - "derive_more 2.0.1", + "derive_more 2.1.1", "either", "k256", "once_cell", @@ -203,7 +203,7 @@ dependencies = [ "auto_impl", "borsh", "c-kzg", - "derive_more 2.0.1", + "derive_more 2.1.1", "either", "serde", "serde_with", @@ -282,7 +282,7 @@ dependencies = [ "alloy-sol-types", "async-trait", "auto_impl", - "derive_more 2.0.1", + "derive_more 2.1.1", "futures-utils-wasm", "serde", "serde_json", @@ -334,10 +334,10 @@ dependencies = [ "bytes", "cfg-if", "const-hex", - "derive_more 2.0.1", + "derive_more 2.1.1", "foldhash 0.2.0", "hashbrown 0.16.1", - "indexmap 2.13.1", + "indexmap 2.14.0", "itoa", "k256", "keccak-asm", @@ -449,7 +449,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2145138f3214928f08cd13da3cb51ef7482b5920d8ac5a02ecd4e38d1a8f6d1e" dependencies = [ "alloy-primitives", - "derive_more 2.0.1", + "derive_more 2.1.1", "serde", "serde_with", ] @@ -565,7 +565,7 @@ dependencies = [ "alloy-sol-macro-input", "const-hex", "heck", - "indexmap 2.13.1", + "indexmap 2.14.0", "proc-macro-error2", "proc-macro2", "quote", @@ -621,7 +621,7 @@ dependencies = [ "alloy-json-rpc", "auto_impl", "base64", - "derive_more 2.0.1", + "derive_more 2.1.1", "futures", "futures-utils-wasm", "parking_lot", @@ -643,7 +643,7 @@ checksum = "3f14b5d9b2c2173980202c6ff470d96e7c5e202c65a9f67884ad565226df7fbb" dependencies = [ "alloy-primitives", "alloy-rlp", - "derive_more 2.0.1", + "derive_more 2.1.1", "nybbles", "serde", "smallvec", @@ -1027,6 +1027,16 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd307490d624467aa6f74b0eabb77633d1f758a7b25f12bceb0b22e08d9726f6" +[[package]] +name = "base256emoji" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e9430d9a245a77c92176e649af6e275f20839a48389859d1661e9a128d077c" +dependencies = [ + "const-str", + "match-lookup", +] + [[package]] name = "base58ck" version = "0.1.0" @@ -1368,9 +1378,9 @@ version = "2.99.99" [[package]] name = "cc" -version = "1.2.59" +version = "1.2.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7a4d3ec6524d28a329fc53654bbadc9bdd7b0431f5d65f1a56ffb28a1ee5283" +checksum = "43c5703da9466b66a946814e1adf53ea2c90f10063b86290cc9eb67ce3478a20" dependencies = [ "find-msvc-tools", "jobserver", @@ -1615,6 +1625,12 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "const-str" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f421161cb492475f1661ddc9815a745a1c894592070661180fdec3d4872e9c3" + [[package]] name = "const_format" version = "0.2.35" @@ -1635,6 +1651,15 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "convert_case" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "core-foundation" version = "0.9.4" @@ -1713,27 +1738,27 @@ dependencies = [ [[package]] name = "cranelift-assembler-x64" -version = "0.130.0" +version = "0.130.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f248321c6a7d4de5dcf2939368e96a397ad3f53b6a076e38d0104d1da326d37" +checksum = "046d4b584c3bb9b5eb500c8f29549bec36be11000f1ba2a927cef3d1a9875691" dependencies = [ "cranelift-assembler-x64-meta", ] [[package]] name = "cranelift-assembler-x64-meta" -version = "0.130.0" +version = "0.130.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab6d78ff1f7d9bf8b7e1afbedbf78ba49e38e9da479d4c8a2db094e22f64e2bc" +checksum = "b9b194a7870becb1490366fc0ae392ccd188065ff35f8391e77ac659db6fb977" dependencies = [ "cranelift-srcgen", ] [[package]] name = "cranelift-bforest" -version = "0.130.0" +version = "0.130.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b6005ba640213a5b95382aeaf6b82bf028309581c8d7349778d66f27dc1180b" +checksum = "bb6a4ab44c6b371e661846b97dab687387a60ac4e2f864e2d4257284aad9e889" dependencies = [ "cranelift-entity", "wasmtime-internal-core", @@ -1741,9 +1766,9 @@ dependencies = [ [[package]] name = "cranelift-bitset" -version = "0.130.0" +version = "0.130.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fb5b134a12b559ff0c0f5af0fcd755ad380723b5016c4e0d36f74d39485340" +checksum = "b8b7a44150c2f471a94023482bda1902710746e4bed9f9973d60c5a94319b06d" dependencies = [ "serde", "serde_derive", @@ -1752,9 +1777,9 @@ dependencies = [ [[package]] name = "cranelift-codegen" -version = "0.130.0" +version = "0.130.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85837de8be7f17a4034a6b08816f05a3144345d2091937b39d415990daca28f4" +checksum = "01b06598133b1dd76758b8b95f8d6747c124124aade50cea96a3d88b962da9fa" dependencies = [ "bumpalo", "cranelift-assembler-x64", @@ -1780,9 +1805,9 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.130.0" +version = "0.130.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e433faa87d38e5b8ff469e44a26fea4f93e58abd7a7c10bad9810056139700c9" +checksum = "6190e2e7bcf0a678da2f715363d34ed530fedf7a2f0ab75edaefef72a70465ff" dependencies = [ "cranelift-assembler-x64-meta", "cranelift-codegen-shared", @@ -1793,24 +1818,24 @@ dependencies = [ [[package]] name = "cranelift-codegen-shared" -version = "0.130.0" +version = "0.130.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5397ba61976e13944ca71230775db13ee1cb62849701ed35b753f4761ed0a9b7" +checksum = "f583cf203d1aa8b79560e3b01f929bdacf9070b015eec4ea9c46e22a3f83e4a0" [[package]] name = "cranelift-control" -version = "0.130.0" +version = "0.130.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc81c88765580720eb30f4fc2c1bfdb75fcbf3094f87b3cd69cecca79d77a245" +checksum = "803159df35cc398ae54473c150b16d6c77e92ab2948be638488de126a3328fbc" dependencies = [ "arbitrary", ] [[package]] name = "cranelift-entity" -version = "0.130.0" +version = "0.130.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "463feed5d46cf8763f3ba3045284cf706dd161496e20ec9c14afbb4ba09b9e66" +checksum = "3109e417257082d88087f5bcce677525bdaa8322b88dd7f175ed1a1fd41d546c" dependencies = [ "cranelift-bitset", "serde", @@ -1820,9 +1845,9 @@ dependencies = [ [[package]] name = "cranelift-frontend" -version = "0.130.0" +version = "0.130.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c5eca7696c1c04ab4c7ed8d18eadbb47d6cc9f14ec86fe0881bf1d7e97e261" +checksum = "14db6b0e0e4994c581092df78d837be2072578f7cb2528f96a6cf895e56dee63" dependencies = [ "cranelift-codegen", "log", @@ -1832,15 +1857,15 @@ dependencies = [ [[package]] name = "cranelift-isle" -version = "0.130.0" +version = "0.130.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1153844610cc9c6da8cf10ce205e45da1a585b7688ed558aa808bbe2e4e6d77" +checksum = "ec66ea5025c7317383699778282ac98741d68444f956e3b1d7b62f12b7216e67" [[package]] name = "cranelift-native" -version = "0.130.0" +version = "0.130.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97b583fe9a60f06b0464cee6be5a17f623fd91b217aaac99b51b339d19911af" +checksum = "373ade56438e6232619d85678477d0a88a31b3581936e0503e61e96b546b0800" dependencies = [ "cranelift-codegen", "libc", @@ -1849,9 +1874,9 @@ dependencies = [ [[package]] name = "cranelift-srcgen" -version = "0.130.0" +version = "0.130.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8594dc6bb4860fa8292f1814c76459dbfb933e1978d8222de6380efce45c7cee" +checksum = "ef53619d3cd5c78fd998c6d9420547af26b72e6456f94c2a8a2334cb76b42baa" [[package]] name = "crc32fast" @@ -2150,11 +2175,11 @@ dependencies = [ [[package]] name = "derive_more" -version = "2.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +checksum = "d751e9e49156b02b44f9c1815bcb94b984cdcc4396ecc32521c739452808b134" dependencies = [ - "derive_more-impl 2.0.1", + "derive_more-impl 2.1.1", ] [[package]] @@ -2170,12 +2195,14 @@ dependencies = [ [[package]] name = "derive_more-impl" -version = "2.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb" dependencies = [ + "convert_case", "proc-macro2", "quote", + "rustc_version", "syn 2.0.117", "unicode-xid", ] @@ -2622,12 +2649,14 @@ dependencies = [ [[package]] name = "expander" -version = "2.0.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f86a749cf851891866c10515ef6c299b5c69661465e9c3bbe7e07a2b77fb0f7" +checksum = "e2c470c71d91ecbd179935b24170459e926382eaaa86b590b78814e180d8a8e2" dependencies = [ "blake2 0.10.6", + "file-guard", "fs-err", + "prettyplease", "proc-macro2", "quote", "syn 2.0.117", @@ -2676,6 +2705,16 @@ version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" +[[package]] +name = "file-guard" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21ef72acf95ec3d7dbf61275be556299490a245f017cf084bd23b4f68cf9407c" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "finality-grandpa" version = "0.16.3" @@ -2736,7 +2775,7 @@ checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" [[package]] name = "fork-tree" version = "13.0.1" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "parity-scale-codec", ] @@ -2763,7 +2802,7 @@ dependencies = [ [[package]] name = "frame-benchmarking" version = "46.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "frame-support", "frame-support-procedural", @@ -2785,7 +2824,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "46.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "frame-support", "frame-system", @@ -2801,7 +2840,7 @@ dependencies = [ [[package]] name = "frame-support" version = "46.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "array-bytes", "environmental", @@ -2830,7 +2869,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "37.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "Inflector", "cfg-expr", @@ -2849,7 +2888,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "13.0.1" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate 3.5.0", @@ -2861,7 +2900,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "12.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "proc-macro2", "quote", @@ -2871,7 +2910,7 @@ dependencies = [ [[package]] name = "frame-system" version = "46.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "cfg-if", "frame-support", @@ -2888,7 +2927,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.52.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "frame-support", "parity-scale-codec", @@ -3184,7 +3223,7 @@ checksum = "19e16c5073773ccf057c282be832a59ee53ef5ff98db3aeff7f8314f52ffc196" dependencies = [ "fnv", "hashbrown 0.16.1", - "indexmap 2.13.1", + "indexmap 2.14.0", "stable_deref_trait", ] @@ -3240,7 +3279,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.13.1", + "indexmap 2.14.0", "slab", "tokio", "tokio-util", @@ -3253,15 +3292,6 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e7d7786361d7425ae2fe4f9e407eb0efaa0840f5212d109cc018c40c35c6ab4" -[[package]] -name = "hash256-std-hasher" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" -dependencies = [ - "crunchy", -] - [[package]] name = "hashbrown" version = "0.13.2" @@ -3301,6 +3331,12 @@ dependencies = [ "serde_core", ] +[[package]] +name = "hashbrown" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f467dd6dccf739c208452f8014c75c18bb8301b050ad1cfb27153803edb0f51" + [[package]] name = "hashlink" version = "0.9.1" @@ -3606,6 +3642,88 @@ dependencies = [ "cc", ] +[[package]] +name = "icu_collections" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2984d1cd16c883d7935b9e07e44071dca8d917fd52ecc02c04d5fa0b5a3f191c" +dependencies = [ + "displaydoc", + "potential_utf", + "utf8_iter", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92219b62b3e2b4d88ac5119f8904c10f8f61bf7e95b640d25ba3075e6cac2c29" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c56e5ee99d6e3d33bd91c5d85458b6005a22140021cc324cea84dd0e72cff3b4" +dependencies = [ + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da3be0ae77ea334f4da67c12f149704f19f81d1adf7c51cf482943e84a2bad38" + +[[package]] +name = "icu_properties" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bee3b67d0ea5c2cca5003417989af8996f8604e34fb9ddf96208a033901e70de" +dependencies = [ + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e2bbb201e0c04f7b4b3e14382af113e17ba4f63e2c9d2ee626b720cbce54a14" + +[[package]] +name = "icu_provider" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139c4cf31c8b5f33d7e199446eff9c1e02decfc2f0eec2c8d71f65befa45b421" +dependencies = [ + "displaydoc", + "icu_locale_core", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + [[package]] name = "ident_case" version = "1.0.1" @@ -3625,22 +3743,12 @@ dependencies = [ [[package]] name = "idna_adapter" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "279259b0ac81c89d11c290495fdcfa96ea3643b7df311c138b6fe8ca5237f0f8" -dependencies = [ - "idna_mapping", - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "idna_mapping" -version = "1.1.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11c13906586a4b339310541a274dd927aff6fcbb5b8e3af90634c4b31681c792" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" dependencies = [ - "unicode-joining-type", + "icu_normalizer", + "icu_properties", ] [[package]] @@ -3732,12 +3840,12 @@ version = "1.99.99" [[package]] name = "indexmap" -version = "2.13.1" +version = "2.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45a8a2b9cb3e0b0c1803dbb0758ffac5de2f425b23c28f518faabd9d805342ff" +checksum = "d466e9454f08e4a911e14806c24e16fba1b4c121d1ea474396f396069cf949d9" dependencies = [ "equivalent", - "hashbrown 0.16.1", + "hashbrown 0.17.0", "serde", "serde_core", ] @@ -3812,9 +3920,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.94" +version = "0.3.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e04e2ef80ce82e13552136fabeef8a5ed1f985a96805761cbb9a2c34e7664d9" +checksum = "2964e92d1d9dc3364cae4d718d93f227e3abb088e747d92e0395bfdedf1c12ca" dependencies = [ "once_cell", "wasm-bindgen", @@ -4434,9 +4542,9 @@ dependencies = [ [[package]] name = "libredox" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ddbf48fd451246b1f8c2610bd3b4ac0cc6e149d89832867093ab69a17194f08" +checksum = "e02f3bb43d335493c96bf3fd3a321600bf6bd07ed34bc64118e9293bdffea46c" dependencies = [ "libc", ] @@ -4487,6 +4595,12 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53" +[[package]] +name = "litemap" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92daf443525c4cce67b150400bc2316076100ce0b3686209eb8cf3c31612e6f0" + [[package]] name = "lock_api" version = "0.4.14" @@ -4604,6 +4718,17 @@ dependencies = [ "syn 2.0.117", ] +[[package]] +name = "match-lookup" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "757aee279b8bdbb9f9e676796fd459e4207a1f986e87886700abf589f5abf771" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + [[package]] name = "matchers" version = "0.2.0" @@ -4978,11 +5103,12 @@ dependencies = [ [[package]] name = "multibase" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b3539ec3c1f04ac9748a260728e855f261b4977f5c3406612c884564f329404" +checksum = "8694bb4835f452b0e3bb06dbebb1d6fc5385b6ca1caf2e55fd165c042390ec77" dependencies = [ "base-x", + "base256emoji", "data-encoding", "data-encoding-macro", ] @@ -5251,7 +5377,7 @@ checksum = "271638cd5fa9cca89c4c304675ca658efc4e64a66c716b7cfe1afb4b9611dbbc" dependencies = [ "crc32fast", "hashbrown 0.16.1", - "indexmap 2.13.1", + "indexmap 2.14.0", "memchr", ] @@ -5304,7 +5430,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "46.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "frame-support", "frame-system", @@ -5315,7 +5441,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "46.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "frame-benchmarking", "frame-support", @@ -5336,7 +5462,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "46.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "frame-benchmarking", "frame-support", @@ -5356,7 +5482,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "46.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "frame-support", "frame-system", @@ -5367,7 +5493,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "45.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "frame-benchmarking", "frame-support", @@ -5501,7 +5627,7 @@ checksum = "8701b58ea97060d5e5b155d383a69952a60943f0e6dfe30b04c287beb0b27455" dependencies = [ "fixedbitset", "hashbrown 0.15.5", - "indexmap 2.13.1", + "indexmap 2.14.0", ] [[package]] @@ -5607,6 +5733,15 @@ dependencies = [ "serde", ] +[[package]] +name = "potential_utf" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0103b1cef7ec0cf76490e969665504990193874ea05c85ff9bab8b911d0a0564" +dependencies = [ + "zerovec", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -5852,9 +5987,9 @@ dependencies = [ [[package]] name = "pulley-interpreter" -version = "43.0.0" +version = "43.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7975f0975fa2c047bf47d617bdf716689e42ee82b159bd000ead7330d7697a1b" +checksum = "010dec3755eb61b2f1051ecb3611b718460b7a74c131e474de2af20a845938af" dependencies = [ "cranelift-bitset", "log", @@ -5864,9 +5999,9 @@ dependencies = [ [[package]] name = "pulley-macros" -version = "43.0.0" +version = "43.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a210c0386ef0ddedb337ec99b91e560ae9c341415ef75958cb39ddb537bb0c84" +checksum = "ad360c32e85ca4b083ac0e2b6856e8f11c3d5060dafa7d5dc57b370857fa3018" dependencies = [ "proc-macro2", "quote", @@ -5969,7 +6104,7 @@ dependencies = [ "once_cell", "socket2 0.6.3", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.60.2", ] [[package]] @@ -6577,9 +6712,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.10" +version = "0.103.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df33b2b81ac578cabaf06b89b0631153a3f416b0a886e8a7a1707fb51abbd1ef" +checksum = "20a6af516fea4b20eccceaf166e8aa666ac996208e8a644ce3ef5aa783bc7cd4" dependencies = [ "ring", "rustls-pki-types", @@ -6625,7 +6760,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "36.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "log", "sp-core", @@ -6636,7 +6771,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.56.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "async-trait", "futures", @@ -6666,7 +6801,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.54.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "futures", "log", @@ -6687,7 +6822,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.49.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "parity-scale-codec", "sp-api", @@ -6702,7 +6837,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "49.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -6722,7 +6857,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "45.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "fnv", "futures", @@ -6748,7 +6883,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.52.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "hash-db", "kvdb", @@ -6775,7 +6910,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.55.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "async-trait", "futures", @@ -6797,7 +6932,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.56.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "async-trait", "fork-tree", @@ -6834,7 +6969,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.55.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "fork-tree", "parity-scale-codec", @@ -6847,7 +6982,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa" version = "0.41.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "ahash", "array-bytes", @@ -6891,7 +7026,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.55.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "async-trait", "futures", @@ -6914,7 +7049,7 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.48.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "parity-scale-codec", "parking_lot", @@ -6933,7 +7068,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.44.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "sc-allocator", "sp-wasm-interface", @@ -6943,7 +7078,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.44.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "log", "parking_lot", @@ -6958,7 +7093,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.55.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "console", "futures", @@ -6974,7 +7109,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "40.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "array-bytes", "parking_lot", @@ -6988,7 +7123,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.56.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "array-bytes", "async-channel", @@ -7032,7 +7167,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.53.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "bitflags 1.3.2", "parity-scale-codec", @@ -7042,7 +7177,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.56.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "ahash", "futures", @@ -7061,7 +7196,7 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.55.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "array-bytes", "async-channel", @@ -7094,7 +7229,7 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.55.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "array-bytes", "futures", @@ -7113,7 +7248,7 @@ dependencies = [ [[package]] name = "sc-network-types" version = "0.20.2" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "bs58", "bytes", @@ -7134,7 +7269,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.20.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -7143,7 +7278,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "51.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "futures", "jsonrpsee", @@ -7162,7 +7297,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.55.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -7174,7 +7309,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "28.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "dyn-clone", "forwarded-header-value", @@ -7198,7 +7333,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.57.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "async-trait", "directories", @@ -7258,7 +7393,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.42.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "log", "parity-scale-codec", @@ -7268,7 +7403,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "47.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "derive_more 1.0.0", "futures", @@ -7288,7 +7423,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "30.0.1" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "chrono", "futures", @@ -7307,7 +7442,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "45.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "chrono", "console", @@ -7334,7 +7469,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "11.1.1" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "proc-macro-crate 3.5.0", "proc-macro2", @@ -7345,12 +7480,12 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "45.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "async-trait", "futures", "futures-timer", - "indexmap 2.13.1", + "indexmap 2.14.0", "itertools 0.14.0", "linked-hash-map", "parity-scale-codec", @@ -7376,11 +7511,11 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "44.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "async-trait", "futures", - "indexmap 2.13.1", + "indexmap 2.14.0", "log", "parity-scale-codec", "serde", @@ -7394,7 +7529,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "20.1.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "async-channel", "futures", @@ -7806,15 +7941,21 @@ dependencies = [ "ciphersuite 0.4.2", "dalek-ff-group", "dkg", - "log", + "env_logger", + "rand 0.8.5", + "rand_chacha 0.3.1", "rand_core 0.6.4", "schnorr-signatures", "serai-coordinator-substrate", "serai-cosign-types", "serai-db", + "serai-env", "serai-primitives", "serai-processor-messages", + "serai-substrate-tests", "serai-task", + "tendermint-machine", + "tokio", "tributary-sdk", "zeroize", ] @@ -8743,7 +8884,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.99.99", - "indexmap 2.13.1", + "indexmap 2.14.0", "schemars 0.9.99", "schemars 1.99.99", "serde_core", @@ -8980,7 +9121,7 @@ dependencies = [ [[package]] name = "sp-api" version = "41.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "hash-db", "log", @@ -8999,7 +9140,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "27.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "Inflector", "blake2 0.11.0-rc.5", @@ -9013,7 +9154,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "45.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "parity-scale-codec", "serde", @@ -9024,7 +9165,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "28.0.1" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "num-traits", "parity-scale-codec", @@ -9035,7 +9176,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "41.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "parity-scale-codec", "sp-api", @@ -9045,7 +9186,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "41.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "sp-api", "sp-inherents", @@ -9055,7 +9196,7 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "44.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "futures", "parity-scale-codec", @@ -9074,7 +9215,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.47.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "async-trait", "futures", @@ -9090,7 +9231,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.47.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "async-trait", "parity-scale-codec", @@ -9107,7 +9248,7 @@ dependencies = [ [[package]] name = "sp-consensus-grandpa" version = "28.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "finality-grandpa", "log", @@ -9123,7 +9264,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.47.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "parity-scale-codec", "serde", @@ -9133,7 +9274,7 @@ dependencies = [ [[package]] name = "sp-core" version = "40.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "array-bytes", "bip39", @@ -9142,9 +9283,9 @@ dependencies = [ "bounded-collections", "bs58", "dyn-clone", + "fnv", "futures", "hash-db", - "hash256-std-hasher", "hex", "impl-codec", "impl-serde", @@ -9170,7 +9311,7 @@ dependencies = [ [[package]] name = "sp-crypto-hashing" version = "0.1.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "blake2 0.11.0-rc.5", "byteorder", @@ -9184,7 +9325,7 @@ dependencies = [ [[package]] name = "sp-crypto-hashing-proc-macro" version = "0.1.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "quote", "sp-crypto-hashing", @@ -9194,7 +9335,7 @@ dependencies = [ [[package]] name = "sp-database" version = "10.0.1" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "kvdb", "kvdb-rocksdb", @@ -9204,7 +9345,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.32.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "environmental", "parity-scale-codec", @@ -9214,7 +9355,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "41.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -9226,7 +9367,7 @@ dependencies = [ [[package]] name = "sp-io" version = "45.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "bytes", "log", @@ -9247,7 +9388,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "46.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "sp-core", "sp-runtime", @@ -9257,7 +9398,7 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.46.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "parity-scale-codec", "parking_lot", @@ -9268,7 +9409,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "38.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "rustc-hash", "serde", @@ -9277,11 +9418,11 @@ dependencies = [ [[package]] name = "sp-runtime" version = "46.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "bytes", "either", - "hash256-std-hasher", + "fnv", "impl-trait-for-tuples", "log", "parity-scale-codec", @@ -9301,7 +9442,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "34.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "bytes", "parity-scale-codec", @@ -9314,7 +9455,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "21.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "Inflector", "expander", @@ -9327,7 +9468,7 @@ dependencies = [ [[package]] name = "sp-session" version = "43.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "parity-scale-codec", "sp-api", @@ -9339,7 +9480,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "43.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "parity-scale-codec", "sp-core", @@ -9349,7 +9490,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.50.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "hash-db", "log", @@ -9368,7 +9509,7 @@ dependencies = [ [[package]] name = "sp-storage" version = "23.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "impl-serde", "parity-scale-codec", @@ -9379,7 +9520,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "41.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "async-trait", "parity-scale-codec", @@ -9390,7 +9531,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "19.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "parity-scale-codec", "regex", @@ -9402,7 +9543,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "41.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "sp-api", "sp-runtime", @@ -9411,7 +9552,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "43.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "ahash", "foldhash 0.1.5", @@ -9434,7 +9575,7 @@ dependencies = [ [[package]] name = "sp-version" version = "44.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "impl-serde", "parity-scale-codec", @@ -9448,7 +9589,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "15.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "parity-scale-codec", "proc-macro-warning", @@ -9460,7 +9601,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "24.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "impl-trait-for-tuples", "log", @@ -9471,7 +9612,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "34.0.0" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "bounded-collections", "parity-scale-codec", @@ -9616,7 +9757,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.17.7" -source = "git+https://github.com/serai-dex/patch-polkadot-sdk#b5eceab27ce23d84d13746ca44f2a07431b9d1e8" +source = "git+https://github.com/serai-dex/patch-polkadot-sdk#ae1ac09ea152666328a4351ee2dbcfbda7f3fc69" dependencies = [ "hyper", "log", @@ -9733,7 +9874,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32497e9a4c7b38532efcdebeef879707aa9f794296a4f0244f6f69e9bc8574bd" dependencies = [ "fastrand", - "getrandom 0.4.2", + "getrandom 0.3.99", "once_cell", "rustix", "windows-sys 0.61.2", @@ -9839,6 +9980,16 @@ dependencies = [ "crunchy", ] +[[package]] +name = "tinystr" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8323304221c2a851516f22236c5722a72eaa19749016521d6dff0824447d96d" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinyvec" version = "1.11.0" @@ -9955,7 +10106,7 @@ version = "0.25.11+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b59c4d22ed448339746c59b905d24568fcbb3ab65a500494f7b8c3e97739f2b" dependencies = [ - "indexmap 2.13.1", + "indexmap 2.14.0", "toml_datetime", "toml_parser", "winnow 1.0.1", @@ -10194,24 +10345,12 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" -[[package]] -name = "unicode-bidi" -version = "0.3.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5" - [[package]] name = "unicode-ident" version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" -[[package]] -name = "unicode-joining-type" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8d00a78170970967fdb83f9d49b92f959ab2bb829186b113e4f4604ad98e180" - [[package]] name = "unicode-normalization" version = "0.1.25" @@ -10221,6 +10360,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-segmentation" +version = "1.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9629274872b2bfaf8d66f5f15725007f635594914870f65218920345aa11aa8c" + [[package]] name = "unicode-width" version = "0.2.2" @@ -10347,9 +10492,9 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.117" +version = "0.2.118" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0551fc1bb415591e3372d0bc4780db7e587d84e2a7e79da121051c5c4b89d0b0" +checksum = "0bf938a0bacb0469e83c1e148908bd7d5a6010354cf4fb73279b7447422e3a89" dependencies = [ "cfg-if", "once_cell", @@ -10360,9 +10505,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.117" +version = "0.2.118" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fbdf9a35adf44786aecd5ff89b4563a90325f9da0923236f6104e603c7e86be" +checksum = "eeff24f84126c0ec2db7a449f0c2ec963c6a49efe0698c4242929da037ca28ed" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -10370,9 +10515,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.117" +version = "0.2.118" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dca9693ef2bab6d4e6707234500350d8dad079eb508dca05530c85dc3a529ff2" +checksum = "9d08065faf983b2b80a79fd87d8254c409281cf7de75fc4b773019824196c904" dependencies = [ "bumpalo", "proc-macro2", @@ -10383,9 +10528,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.117" +version = "0.2.118" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39129a682a6d2d841b6c429d0c51e5cb0ed1a03829d8b3d1e69a011e62cb3d3b" +checksum = "5fd04d9e306f1907bd13c6361b5c6bfc7b3b3c095ed3f8a9246390f8dbdee129" dependencies = [ "unicode-ident", ] @@ -10408,7 +10553,7 @@ checksum = "4f08c9adee0428b7bddf3890fc27e015ac4b761cc608c822667102b8bfd6995e" dependencies = [ "bitflags 2.11.0", "hashbrown 0.16.1", - "indexmap 2.13.1", + "indexmap 2.14.0", "semver", "serde", ] @@ -10426,9 +10571,9 @@ dependencies = [ [[package]] name = "wasmtime" -version = "43.0.0" +version = "43.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54fa9f298901a64ed3eae16b130f0b30c80dbb74a9e7f129a791f4e74649b917" +checksum = "ce205cd643d661b5ba5ba4717e13730262e8cdbc8f2eacbc7b906d45c1a74026" dependencies = [ "addr2line", "async-trait", @@ -10465,9 +10610,9 @@ dependencies = [ [[package]] name = "wasmtime-environ" -version = "43.0.0" +version = "43.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75a3aaaa3a522f443af67a7ed8d4efa20b0c3784e1031980537fbfcb497f70a7" +checksum = "0b8b78abf3677d4a0a5db82e5015b4d085ff3a1b8b472cbb8c70d4b769f019ce" dependencies = [ "anyhow", "cranelift-bforest", @@ -10475,7 +10620,7 @@ dependencies = [ "cranelift-entity", "gimli", "hashbrown 0.16.1", - "indexmap 2.13.1", + "indexmap 2.14.0", "log", "object", "postcard", @@ -10492,9 +10637,9 @@ dependencies = [ [[package]] name = "wasmtime-internal-core" -version = "43.0.0" +version = "43.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e671917bb6856ae360cb59d7aaf26f1cfd042c7b924319dd06fd380739fc0b2e" +checksum = "22632b187e1b0716f1b9ac57ad29013bed33175fcb19e10bb6896126f82fac67" dependencies = [ "hashbrown 0.16.1", "libm", @@ -10503,9 +10648,9 @@ dependencies = [ [[package]] name = "wasmtime-internal-cranelift" -version = "43.0.0" +version = "43.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dfd752e1dcf79eeeadc6f2681e2fb4a9f0b5534d18c5b9b93faccd0de2c80c" +checksum = "8b3ca07b3e0bb3429674b173b5800577719d600774dd81bff58f775c0aaa64ee" dependencies = [ "cfg-if", "cranelift-codegen", @@ -10530,9 +10675,9 @@ dependencies = [ [[package]] name = "wasmtime-internal-fiber" -version = "43.0.0" +version = "43.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1e9171af643316c11d6ebe52f31f6e2a5d6d1d270de9167a7b7b6f0e3f72982" +checksum = "20c8b2c9704eb1f33ead025ec16038277ccb63d0a14c31e99d5b765d7c36da55" dependencies = [ "cc", "cfg-if", @@ -10545,9 +10690,9 @@ dependencies = [ [[package]] name = "wasmtime-internal-jit-debug" -version = "43.0.0" +version = "43.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fe23134536b9883ffc2afcffae23f7ffbcb1791e2d9fac6d6464a37ea4c8fdd" +checksum = "d950310d07391d34369f62c48336ebb14eacbd4d6f772bb5f349c24e838e0664" dependencies = [ "cc", "wasmtime-internal-versioned-export-macros", @@ -10555,9 +10700,9 @@ dependencies = [ [[package]] name = "wasmtime-internal-jit-icache-coherence" -version = "43.0.0" +version = "43.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b3112806515fac8495883885eb8dbdde849988ae91fe6beb544c0d7c0f4c9aa" +checksum = "3606662c156962d096be3127b8b8ae8ee2f8be3f896dad29259ff01ddb64abfd" dependencies = [ "cfg-if", "libc", @@ -10567,9 +10712,9 @@ dependencies = [ [[package]] name = "wasmtime-internal-unwinder" -version = "43.0.0" +version = "43.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dafc29c6e538273fda8409335137654751bdf24beab65702b7866b0a85ee108a" +checksum = "75eef0747e52dc545b075f64fd0e0cc237ae738e641266b1970e07e2d744bc32" dependencies = [ "cfg-if", "cranelift-codegen", @@ -10580,9 +10725,9 @@ dependencies = [ [[package]] name = "wasmtime-internal-versioned-export-macros" -version = "43.0.0" +version = "43.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "772f2b105b7fdd3dfb2cdf70c297baaeb96fe76a95cdc6fa516f713f04090c73" +checksum = "d8b0a5dab02a8fb527f547855ecc0e05f9fdc3d5bd57b8b080349408f9a6cece" dependencies = [ "proc-macro2", "quote", @@ -10605,9 +10750,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.94" +version = "0.3.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd70027e39b12f0849461e08ffc50b9cd7688d942c1c8e3c7b22273236b4dd0a" +checksum = "4f2dfbb17949fa2088e5d39408c48368947b86f7834484e87b73de55bc14d97d" dependencies = [ "js-sys", "wasm-bindgen", @@ -10796,7 +10941,16 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", ] [[package]] @@ -10814,14 +10968,31 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_gnullvm", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", ] [[package]] @@ -10839,48 +11010,96 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" + [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" + [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +[[package]] +name = "windows_i686_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" + [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" + [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +[[package]] +name = "windows_i686_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" + [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" + [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" + [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" + [[package]] name = "winnow" version = "0.7.15" @@ -10912,6 +11131,12 @@ dependencies = [ name = "wit-bindgen-rust-macro" version = "0.51.99" +[[package]] +name = "writeable" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ffae5123b2d3fc086436f8834ae3ab053a283cfac8fe0a0b8eaae044768a4c4" + [[package]] name = "wyz" version = "0.5.1" @@ -11005,6 +11230,29 @@ dependencies = [ "time", ] +[[package]] +name = "yoke" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abe8c5fda708d9ca3df187cae8bfb9ceda00dd96231bed36e445a1a48e66f9ca" +dependencies = [ + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de844c262c8848816172cef550288e7dc6c7b7814b4ee56b3e1553f275f1858e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", + "synstructure 0.13.2", +] + [[package]] name = "zalloc" version = "0.1.0" @@ -11033,6 +11281,27 @@ dependencies = [ "syn 2.0.117", ] +[[package]] +name = "zerofrom" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69faa1f2a1ea75661980b013019ed6687ed0e83d069bc1114e2cc74c6c04c4df" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11532158c46691caf0f2593ea8358fed6bbf68a0315e80aae9bd41fbade684a1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", + "synstructure 0.13.2", +] + [[package]] name = "zeroize" version = "1.8.2" @@ -11053,6 +11322,39 @@ dependencies = [ "syn 2.0.117", ] +[[package]] +name = "zerotrie" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f9152d31db0792fa83f70fb2f83148effb5c1f5b8c7686c3459e361d9bc20bf" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90f911cbc359ab6af17377d242225f4d75119aec87ea711a880987b18cd7b239" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "625dc425cab0dca6dc3c3319506e6593dcb08a9f387ea3b284dbd52a92c40555" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + [[package]] name = "zmij" version = "1.0.21" diff --git a/coordinator/tributary/src/db.rs b/coordinator/tributary/src/db.rs index 92b50a556..d00772b27 100644 --- a/coordinator/tributary/src/db.rs +++ b/coordinator/tributary/src/db.rs @@ -482,7 +482,8 @@ impl TributaryDb { // Accumulate the data accumulated_weight = accumulated_weight.checked_add(validator_weight).expect(&format!( - "accumulated_weight {accumulated_weight} overflowed adding validator_weight {validator_weight}" + "accumulated_weight {} overflowed adding validator_weight {}", + accumulated_weight, validator_weight )); AccumulatedWeight::set(txn, set, topic, &accumulated_weight); Accumulated::set(txn, set, topic, validator, data); diff --git a/coordinator/tributary/src/tests/transaction.rs b/coordinator/tributary/src/tests/transaction.rs index 6e1cea134..3956fd068 100644 --- a/coordinator/tributary/src/tests/transaction.rs +++ b/coordinator/tributary/src/tests/transaction.rs @@ -275,7 +275,7 @@ mod transaction { } } - /// Regression test: `Transaction::read` must use `deserialize_reader`, not `borsh::from_reader`. + /// Regression test: `Transaction::read` must use `deserialize_reader`, not `borsh::from_reader` /// /// `borsh::from_reader` asserts the reader is exhausted after deserialization. When multiple /// transactions are serialized into a single stream (as happens in `Block::read`), the first diff --git a/substrate/primitives/src/test_helpers.rs b/substrate/primitives/src/test_helpers.rs index 047dee135..2776f1980 100644 --- a/substrate/primitives/src/test_helpers.rs +++ b/substrate/primitives/src/test_helpers.rs @@ -28,7 +28,7 @@ pub fn random_bytes_64(rng: &mut R) -> [u8; 64] { /// Generate a random `Vec` with a random length between 1 and 128. pub fn random_vec_u8(rng: &mut R) -> Vec { - let len = (rng.next_u32() % 128) as usize + 1; + let len = usize::try_from(rng.next_u32() % 128).unwrap() + 1; random_vec_of_len(rng, len) } @@ -109,7 +109,7 @@ pub fn random_block_number(rng: &mut R) -> u64 { /// Generate a random [`ExternalNetworkId`]. pub fn random_external_network_id(rng: &mut R) -> ExternalNetworkId { let all: Vec<_> = ExternalNetworkId::all().collect(); - all[(rng.next_u32() as usize) % all.len()] + all[usize::try_from(rng.next_u32()).unwrap() % all.len()] } /// Generate a random [`ExternalValidatorSet`]. From 9b9ec8d4ffde9c2b334d89245839e27808866226 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Wed, 15 Apr 2026 10:22:00 -0400 Subject: [PATCH 52/71] chore(coordinator/tributary): fix match same arms --- coordinator/tributary/src/db.rs | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/coordinator/tributary/src/db.rs b/coordinator/tributary/src/db.rs index d00772b27..446cdd945 100644 --- a/coordinator/tributary/src/db.rs +++ b/coordinator/tributary/src/db.rs @@ -61,16 +61,17 @@ impl Topic { pub(crate) fn next_attempt_topic(self) -> Option { #[expect(clippy::match_same_arms)] match self { + Topic::RemoveParticipant { .. } => None, Topic::DkgConfirmation { attempt, round: _ } => Some(Topic::DkgConfirmation { attempt: attempt.checked_add(1)?, round: SigningProtocolRound::Preprocess, }), + Topic::SlashReport => None, Topic::Sign { id, attempt, round: _ } => Some(Topic::Sign { id, attempt: attempt.checked_add(1)?, round: SigningProtocolRound::Preprocess, }), - Topic::RemoveParticipant { .. } | Topic::SlashReport => None, } } @@ -78,6 +79,7 @@ impl Topic { pub(crate) fn reattempt_topic(self) -> Option<(u32, Topic)> { #[expect(clippy::match_same_arms)] match self { + Topic::RemoveParticipant { .. } => None, Topic::DkgConfirmation { attempt, round } => match round { SigningProtocolRound::Preprocess => { let next_attempt = attempt.checked_add(1)?; @@ -91,6 +93,7 @@ impl Topic { } SigningProtocolRound::Share => None, }, + Topic::SlashReport => None, Topic::Sign { id, attempt, round } => match round { SigningProtocolRound::Preprocess => { let next_attempt = attempt.checked_add(1)?; @@ -101,7 +104,6 @@ impl Topic { } SigningProtocolRound::Share => None, }, - Topic::RemoveParticipant { .. } | Topic::SlashReport => None, } } @@ -111,7 +113,9 @@ impl Topic { pub(crate) fn sign_id(self, set: ExternalValidatorSet) -> Option { #[expect(clippy::match_same_arms)] match self { - Topic::RemoveParticipant { .. } | Topic::DkgConfirmation { .. } | Topic::SlashReport => None, + Topic::RemoveParticipant { .. } => None, + Topic::DkgConfirmation { .. } => None, + Topic::SlashReport => None, Topic::Sign { id, attempt, round: _ } => Some(SignId { session: set.session, id, attempt }), } } @@ -128,6 +132,7 @@ impl Topic { ) -> Option { #[expect(clippy::match_same_arms)] match self { + Topic::RemoveParticipant { .. } => None, Topic::DkgConfirmation { attempt, round: _ } => Some({ let id = { let mut id = [0; 32]; @@ -137,7 +142,8 @@ impl Topic { }; SignId { session: set.session, id, attempt } }), - Topic::RemoveParticipant { .. } | Topic::SlashReport | Topic::Sign { .. } => None, + Topic::SlashReport => None, + Topic::Sign { .. } => None, } } @@ -147,19 +153,20 @@ impl Topic { pub(crate) fn preceding_topic(self) -> Option { #[expect(clippy::match_same_arms)] match self { + Topic::RemoveParticipant { .. } => None, Topic::DkgConfirmation { attempt, round } => match round { SigningProtocolRound::Preprocess => None, SigningProtocolRound::Share => { Some(Topic::DkgConfirmation { attempt, round: SigningProtocolRound::Preprocess }) } }, + Topic::SlashReport => None, Topic::Sign { id, attempt, round } => match round { SigningProtocolRound::Preprocess => None, SigningProtocolRound::Share => { Some(Topic::Sign { id, attempt, round: SigningProtocolRound::Preprocess }) } }, - Topic::RemoveParticipant { .. } | Topic::SlashReport => None, } } @@ -169,19 +176,20 @@ impl Topic { pub(crate) fn succeeding_topic(self) -> Option { #[expect(clippy::match_same_arms)] match self { + Topic::RemoveParticipant { .. } => None, Topic::DkgConfirmation { attempt, round } => match round { SigningProtocolRound::Preprocess => { Some(Topic::DkgConfirmation { attempt, round: SigningProtocolRound::Share }) } SigningProtocolRound::Share => None, }, + Topic::SlashReport => None, Topic::Sign { id, attempt, round } => match round { SigningProtocolRound::Preprocess => { Some(Topic::Sign { id, attempt, round: SigningProtocolRound::Share }) } SigningProtocolRound::Share => None, }, - Topic::RemoveParticipant { .. } | Topic::SlashReport => None, } } @@ -203,8 +211,10 @@ impl Topic { pub(crate) fn participating(&self) -> Participating { #[expect(clippy::match_same_arms)] match self { - Topic::RemoveParticipant { .. } | Topic::SlashReport => Participating::Everyone, - Topic::DkgConfirmation { .. } | Topic::Sign { .. } => Participating::Participated, + Topic::RemoveParticipant { .. } => Participating::Everyone, + Topic::DkgConfirmation { .. } => Participating::Participated, + Topic::SlashReport => Participating::Everyone, + Topic::Sign { .. } => Participating::Participated, } } } From 1b669c9a63966ebd74149860de6fcdf298cb4aab Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Wed, 15 Apr 2026 10:26:10 -0400 Subject: [PATCH 53/71] chore: lock --- Cargo.lock | 120 ++++++++++++++++++++++++++++------------------------- 1 file changed, 64 insertions(+), 56 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d18a96bd4..ca605c126 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -86,9 +86,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.2.33" +version = "0.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4e9e31d834fe25fe991b8884e4b9f0e59db4a97d86e05d1464d6899c013cd62" +checksum = "84e0378e959aa6a885897522080a990e80eb317f1e9a222a604492ea50e13096" dependencies = [ "alloy-primitives", "num_enum", @@ -343,7 +343,7 @@ dependencies = [ "keccak-asm", "paste", "proptest", - "rand 0.9.2", + "rand 0.9.4", "rapidhash", "ruint", "rustc-hash", @@ -1071,7 +1071,7 @@ version = "0.72.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" dependencies = [ - "bitflags 2.11.0", + "bitflags 2.11.1", "cexpr", "clang-sys", "itertools 0.13.0", @@ -1186,9 +1186,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.11.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af" +checksum = "c4512299f36f043ab09a583e57bceb5a5aab7a73db1805848e8fef3c9e8c78b3" [[package]] name = "bitvec" @@ -3217,9 +3217,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.33.1" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e16c5073773ccf057c282be832a59ee53ef5ff98db3aeff7f8314f52ffc196" +checksum = "0bf7f043f89559805f8c7cacc432749b2fa0d0a0a9ee46ce47164ed5ba7f126c" dependencies = [ "fnv", "hashbrown 0.16.1", @@ -3250,7 +3250,7 @@ dependencies = [ "parking_lot", "portable-atomic", "quanta", - "rand 0.9.2", + "rand 0.9.4", "smallvec", "spinning_top", "web-time", @@ -3419,7 +3419,7 @@ dependencies = [ "idna", "ipnet", "once_cell", - "rand 0.9.2", + "rand 0.9.4", "ring", "socket2 0.5.10", "thiserror 2.0.18", @@ -3441,7 +3441,7 @@ dependencies = [ "lru-slab", "once_cell", "parking_lot", - "rand 0.9.2", + "rand 0.9.4", "resolv-conf", "smallvec", "thiserror 2.0.18", @@ -3570,16 +3570,15 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.7" +version = "0.27.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +checksum = "33ca68d021ef39cf6463ab54c1d0f5daf03377b70561305bb89a8f83aab66e0f" dependencies = [ "http", "hyper", "hyper-util", "rustls", "rustls-native-certs", - "rustls-pki-types", "tokio", "tokio-rustls", "tower-service", @@ -3802,7 +3801,7 @@ dependencies = [ "hyper", "hyper-util", "log", - "rand 0.9.2", + "rand 0.9.4", "tokio", "url", "xmltree", @@ -4103,9 +4102,9 @@ checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" [[package]] name = "libc" -version = "0.2.184" +version = "0.2.185" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48f5d2a454e16a5ea0f4ced81bd44e4cfc7bd3a507b61887c99fd3538b28e4af" +checksum = "52ff2c0fe9bc6cb6b14a0592c2ff4fa9ceb83eea9db979b0487cd054946a2b8f" [[package]] name = "libm" @@ -4183,7 +4182,7 @@ dependencies = [ "futures-timer", "libp2p-identity", "multiaddr 0.18.2", - "multihash 0.19.3", + "multihash 0.19.4", "multistream-select", "parking_lot", "pin-project", @@ -4272,7 +4271,7 @@ dependencies = [ "bs58", "ed25519-dalek", "hkdf", - "multihash 0.19.3", + "multihash 0.19.4", "quick-protobuf", "rand 0.8.5", "sha2 0.10.9", @@ -4358,7 +4357,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "multiaddr 0.18.2", - "multihash 0.19.3", + "multihash 0.19.4", "quick-protobuf", "rand 0.8.5", "snow", @@ -4621,9 +4620,9 @@ checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" [[package]] name = "lru" -version = "0.16.3" +version = "0.16.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1dc47f592c06f33f8e3aea9591776ec7c9f9e4124778ff8a3c3b87159f7e593" +checksum = "7f66e8d5d03f609abc3a39e6f08e4164ebf1447a732906d39eb9b99b7919ef39" dependencies = [ "hashbrown 0.16.1", ] @@ -5096,7 +5095,7 @@ dependencies = [ "data-encoding", "libp2p-identity", "multibase", - "multihash 0.19.3", + "multihash 0.19.4", "percent-encoding", "serde", "static_assertions", @@ -5144,11 +5143,11 @@ dependencies = [ [[package]] name = "multihash" -version = "0.19.3" +version = "0.19.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b430e7953c29dd6a09afc29ff0bb69c6e306329ee6794700aee27b76a1aea8d" +checksum = "89ace881e3f514092ce9efbcb8f413d0ad9763860b828981c2de51ddc666936c" dependencies = [ - "core2", + "no_std_io2", "unsigned-varint 0.8.0", ] @@ -5201,7 +5200,7 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ce3636fa715e988114552619582b530481fd5ef176a1e5c1bf024077c2c9445" dependencies = [ - "bitflags 2.11.0", + "bitflags 2.11.1", "libc", "log", "netlink-packet-core", @@ -5240,12 +5239,21 @@ version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" dependencies = [ - "bitflags 2.11.0", + "bitflags 2.11.1", "cfg-if", "cfg_aliases", "libc", ] +[[package]] +name = "no_std_io2" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a3564ce7035b1e4778d8cb6cacebb5d766b5e8fe5a75b9e441e33fb61a872c6" +dependencies = [ + "memchr", +] + [[package]] name = "nohash-hasher" version = "0.2.0" @@ -5523,7 +5531,7 @@ dependencies = [ "lz4", "memmap2", "parking_lot", - "rand 0.9.2", + "rand 0.9.4", "siphasher", "snap", "winapi", @@ -5677,9 +5685,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.32" +version = "0.3.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" +checksum = "19f132c84eca552bf34cab8ec81f1c1dcc229b811638f9d283dceabe58c5569e" [[package]] name = "polling" @@ -5926,9 +5934,9 @@ checksum = "4b45fcc2344c680f5025fe57779faef368840d0bd1f42f216291f0dc4ace4744" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.11.0", + "bitflags 2.11.1", "num-traits", - "rand 0.9.2", + "rand 0.9.4", "rand_chacha 0.9.0", "rand_xorshift", "regex-syntax", @@ -6084,7 +6092,7 @@ dependencies = [ "bytes", "getrandom 0.3.99", "lru-slab", - "rand 0.9.2", + "rand 0.9.4", "ring", "rustc-hash", "rustls", @@ -6145,9 +6153,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.9.2" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +checksum = "44c5af06bb1b7d3216d91932aed5265164bf384dc89cd6ba05cf59a35f5f76ea" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.5", @@ -6231,14 +6239,14 @@ version = "11.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "498cd0dc59d73224351ee52a95fee0f1a617a2eae0e7d9d720cc622c73a54186" dependencies = [ - "bitflags 2.11.0", + "bitflags 2.11.1", ] [[package]] name = "rayon" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" +checksum = "fb39b166781f92d482534ef4b4b1b2568f42613b53e5b6c160e24cfbfa30926d" dependencies = [ "either", "rayon-core", @@ -6273,7 +6281,7 @@ version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags 2.11.0", + "bitflags 2.11.1", ] [[package]] @@ -6527,7 +6535,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e480426a7d76b458789e4a1be3ffbce9df798f0145f0520c1cdf967755cfcbf" dependencies = [ "alloy-eip7928", - "bitflags 2.11.0", + "bitflags 2.11.1", "revm-bytecode", "revm-primitives", ] @@ -6623,7 +6631,7 @@ dependencies = [ "primitive-types", "proptest", "rand 0.8.5", - "rand 0.9.2", + "rand 0.9.4", "rlp", "ruint-macro", "serde_core", @@ -6671,7 +6679,7 @@ version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190" dependencies = [ - "bitflags 2.11.0", + "bitflags 2.11.1", "errno", "libc", "linux-raw-sys", @@ -6680,9 +6688,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.37" +version = "0.23.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "758025cb5fccfd3bc2fd74708fd4682be41d99e5dff73c377c0646c6012c73a4" +checksum = "69f9466fb2c14ea04357e91413efb882e2a6d4a406e625449bc0a5d360d53a21" dependencies = [ "once_cell", "ring", @@ -6716,9 +6724,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.11" +version = "0.103.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20a6af516fea4b20eccceaf166e8aa666ac996208e8a644ce3ef5aa783bc7cd4" +checksum = "8279bb85272c9f10811ae6a6c547ff594d6a7f3c6c6b02ee9726d1d0dcfcdd06" dependencies = [ "ring", "rustls-pki-types", @@ -7263,7 +7271,7 @@ dependencies = [ "multiaddr 0.17.1", "multiaddr 0.18.2", "multihash 0.17.0", - "multihash 0.19.3", + "multihash 0.19.4", "rand 0.8.5", "serde_with", "thiserror 2.0.18", @@ -7707,7 +7715,7 @@ version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7f4bc775c73d9a02cde8bf7b2ec4c9d12743edf609006c7facc23998404cd1d" dependencies = [ - "bitflags 2.11.0", + "bitflags 2.11.1", "core-foundation 0.10.1", "core-foundation-sys", "libc", @@ -9854,7 +9862,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a13f3d0daba03132c0aa9767f98351b3488edc2c100cda2d2ec2b04f3d8d3c8b" dependencies = [ - "bitflags 2.11.0", + "bitflags 2.11.1", "core-foundation 0.9.4", "system-configuration-sys", ] @@ -10021,9 +10029,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.51.1" +version = "1.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f66bf9585cda4b724d3e78ab34b73fb2bbaba9011b9bfdf69dc836382ea13b8c" +checksum = "a91135f59b1cbf38c91e73cf3386fca9bb77915c45ce2771460c9d92f0f3d776" dependencies = [ "bytes", "libc", @@ -10170,7 +10178,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ - "bitflags 2.11.0", + "bitflags 2.11.1", "bytes", "http", "http-body", @@ -10565,7 +10573,7 @@ version = "0.245.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4f08c9adee0428b7bddf3890fc27e015ac4b761cc608c822667102b8bfd6995e" dependencies = [ - "bitflags 2.11.0", + "bitflags 2.11.1", "hashbrown 0.16.1", "indexmap 2.14.0", "semver", @@ -10591,7 +10599,7 @@ checksum = "ce205cd643d661b5ba5ba4717e13730262e8cdbc8f2eacbc7b906d45c1a74026" dependencies = [ "addr2line", "async-trait", - "bitflags 2.11.0", + "bitflags 2.11.1", "bumpalo", "cc", "cfg-if", @@ -11230,7 +11238,7 @@ dependencies = [ "nohash-hasher", "parking_lot", "pin-project", - "rand 0.9.2", + "rand 0.9.4", "static_assertions", "web-time", ] From 3970ccc0f05a8d8fcb7801e17e8c59f5bc1ab654 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Wed, 15 Apr 2026 11:21:37 -0400 Subject: [PATCH 54/71] chore(coordinator/tributary): clippy errors --- coordinator/tributary/src/db.rs | 11 ++++++++--- coordinator/tributary/src/transaction.rs | 1 + 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/coordinator/tributary/src/db.rs b/coordinator/tributary/src/db.rs index 446cdd945..0bf743e25 100644 --- a/coordinator/tributary/src/db.rs +++ b/coordinator/tributary/src/db.rs @@ -53,7 +53,10 @@ pub(crate) enum Participating { pub(crate) fn required_participation(n: u16) -> u16 { // All of our topics require 2/3rds participation - n.checked_mul(2).expect(&format!("required_participation overflowed: {n} * 2")) / 3 + 1 + #[expect(clippy::expect_fun_call)] + n.checked_mul(2).expect(&format!("required_participation overflowed: {n} * 2")) / + 3 + + 1 } impl Topic { @@ -413,9 +416,9 @@ impl TributaryDb { txn: &mut impl DbTxn, set: ExternalValidatorSet, validator: SeraiAddress, - _reason: &str, + #[cfg_attr(coverage, allow(unused_variables))] reason: &str, ) { - serai_env::warn!("{validator} fatally slashed: {_reason}"); + serai_env::warn!("{validator} fatally slashed: {reason}"); SlashPoints::set(txn, set, validator, &u32::MAX); } @@ -491,6 +494,7 @@ impl TributaryDb { } // Accumulate the data + #[expect(clippy::expect_fun_call)] accumulated_weight = accumulated_weight.checked_add(validator_weight).expect(&format!( "accumulated_weight {} overflowed adding validator_weight {}", accumulated_weight, validator_weight @@ -506,6 +510,7 @@ impl TributaryDb { // Linearly scale the time for the protocol with the attempt number let blocks_till_reattempt = u64::from(attempt) * u64::from(BASE_REATTEMPT_DELAY); + #[expect(clippy::expect_fun_call)] let recognize_at = block_number.checked_add(blocks_till_reattempt).expect(&format!( "recognize_at overflowed: block_number {block_number} + delay {blocks_till_reattempt}" )); diff --git a/coordinator/tributary/src/transaction.rs b/coordinator/tributary/src/transaction.rs index 03d22609f..51c23b2a0 100644 --- a/coordinator/tributary/src/transaction.rs +++ b/coordinator/tributary/src/transaction.rs @@ -252,6 +252,7 @@ impl ReadWrite for Transaction { impl TransactionTrait for Transaction { fn kind(&self) -> TransactionKind { + #[expect(clippy::match_same_arms)] match self { Transaction::RemoveParticipant { participant, signed } => TransactionKind::Signed( borsh::to_vec(&(b"RemoveParticipant".as_slice(), participant)).unwrap(), From accd14449eb6729216f038370c9f0389db6a237b Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Wed, 15 Apr 2026 13:46:00 -0400 Subject: [PATCH 55/71] chore(coordinator/tributary): clippy errors --- coordinator/tributary/src/db.rs | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/coordinator/tributary/src/db.rs b/coordinator/tributary/src/db.rs index 0bf743e25..24848831b 100644 --- a/coordinator/tributary/src/db.rs +++ b/coordinator/tributary/src/db.rs @@ -53,10 +53,7 @@ pub(crate) enum Participating { pub(crate) fn required_participation(n: u16) -> u16 { // All of our topics require 2/3rds participation - #[expect(clippy::expect_fun_call)] - n.checked_mul(2).expect(&format!("required_participation overflowed: {n} * 2")) / - 3 + - 1 + n.checked_mul(2).unwrap_or_else(|| panic!("required_participation overflowed: {} * 2", n)) / 3 + 1 } impl Topic { @@ -494,11 +491,12 @@ impl TributaryDb { } // Accumulate the data - #[expect(clippy::expect_fun_call)] - accumulated_weight = accumulated_weight.checked_add(validator_weight).expect(&format!( - "accumulated_weight {} overflowed adding validator_weight {}", - accumulated_weight, validator_weight - )); + accumulated_weight = accumulated_weight.checked_add(validator_weight).unwrap_or_else(|| { + panic!( + "accumulated_weight {} overflowed adding validator_weight {}", + accumulated_weight, validator_weight + ) + }); AccumulatedWeight::set(txn, set, topic, &accumulated_weight); Accumulated::set(txn, set, topic, validator, data); @@ -511,9 +509,12 @@ impl TributaryDb { let blocks_till_reattempt = u64::from(attempt) * u64::from(BASE_REATTEMPT_DELAY); #[expect(clippy::expect_fun_call)] - let recognize_at = block_number.checked_add(blocks_till_reattempt).expect(&format!( - "recognize_at overflowed: block_number {block_number} + delay {blocks_till_reattempt}" - )); + let recognize_at = block_number.checked_add(blocks_till_reattempt).unwrap_or_else(|| { + panic!( + "recognize_at overflowed: block_number {} + delay {}", + block_number, blocks_till_reattempt + ); + }); let mut queued = Reattempt::get(txn, set, recognize_at).unwrap_or(Vec::with_capacity(1)); queued.push(reattempt_topic); Reattempt::set(txn, set, recognize_at, &queued); From febf688e32bd75f595b34a302ec223ed17aac629 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Wed, 15 Apr 2026 14:10:34 -0400 Subject: [PATCH 56/71] chore(coordinator/tributary): fix panics --- coordinator/tributary/src/db.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/coordinator/tributary/src/db.rs b/coordinator/tributary/src/db.rs index 24848831b..da55be74c 100644 --- a/coordinator/tributary/src/db.rs +++ b/coordinator/tributary/src/db.rs @@ -53,7 +53,7 @@ pub(crate) enum Participating { pub(crate) fn required_participation(n: u16) -> u16 { // All of our topics require 2/3rds participation - n.checked_mul(2).unwrap_or_else(|| panic!("required_participation overflowed: {} * 2", n)) / 3 + 1 + n.checked_mul(2).unwrap_or_else(|| panic!("required_participation overflowed: {n} * 2")) / 3 + 1 } impl Topic { @@ -493,8 +493,7 @@ impl TributaryDb { // Accumulate the data accumulated_weight = accumulated_weight.checked_add(validator_weight).unwrap_or_else(|| { panic!( - "accumulated_weight {} overflowed adding validator_weight {}", - accumulated_weight, validator_weight + "accumulated_weight {accumulated_weight} overflowed adding validator_weight {validator_weight}", ) }); AccumulatedWeight::set(txn, set, topic, &accumulated_weight); @@ -511,8 +510,7 @@ impl TributaryDb { #[expect(clippy::expect_fun_call)] let recognize_at = block_number.checked_add(blocks_till_reattempt).unwrap_or_else(|| { panic!( - "recognize_at overflowed: block_number {} + delay {}", - block_number, blocks_till_reattempt + "recognize_at overflowed: block_number {block_number} + delay {blocks_till_reattempt}", ); }); let mut queued = Reattempt::get(txn, set, recognize_at).unwrap_or(Vec::with_capacity(1)); From f799da7ddddc52e37d603b35240566c7a20af3df Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Wed, 15 Apr 2026 14:11:08 -0400 Subject: [PATCH 57/71] chore(coordinator/tributary): clippy errors --- coordinator/tributary/src/db.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/coordinator/tributary/src/db.rs b/coordinator/tributary/src/db.rs index da55be74c..45c17de1f 100644 --- a/coordinator/tributary/src/db.rs +++ b/coordinator/tributary/src/db.rs @@ -507,7 +507,6 @@ impl TributaryDb { // Linearly scale the time for the protocol with the attempt number let blocks_till_reattempt = u64::from(attempt) * u64::from(BASE_REATTEMPT_DELAY); - #[expect(clippy::expect_fun_call)] let recognize_at = block_number.checked_add(blocks_till_reattempt).unwrap_or_else(|| { panic!( "recognize_at overflowed: block_number {block_number} + delay {blocks_till_reattempt}", From 75a1cf74de9701baff6c66c9d1ed7e3191816058 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Wed, 15 Apr 2026 15:09:10 -0400 Subject: [PATCH 58/71] chore(coordinator/tributary): clippy errors --- coordinator/tributary/src/tests/db.rs | 120 +++++++++++------- coordinator/tributary/src/tests/mod.rs | 39 +++--- coordinator/tributary/src/tests/scan_block.rs | 55 ++++---- .../tributary/src/tests/scan_tributary.rs | 26 ++-- .../tributary/src/tests/transaction.rs | 76 +++++++---- coordinator/tributary/src/tests/tributary.rs | 12 +- 6 files changed, 194 insertions(+), 134 deletions(-) diff --git a/coordinator/tributary/src/tests/db.rs b/coordinator/tributary/src/tests/db.rs index 1bb02db71..a0f115fd8 100644 --- a/coordinator/tributary/src/tests/db.rs +++ b/coordinator/tributary/src/tests/db.rs @@ -1,7 +1,7 @@ -use rand::{Rng, RngCore, rngs::OsRng}; +use rand::{Rng as _, RngCore as _, rngs::OsRng}; use messages::sign::{SignId, VariantSignId}; -use serai_db::{Db, DbTxn, MemDb}; +use serai_db::{Db as _, DbTxn, MemDb}; use serai_primitives::{ address::SeraiAddress, validator_sets::ExternalValidatorSet, @@ -95,6 +95,7 @@ fn all_preprocess_topics_and_attempts() -> Vec { type NoEachFn = fn(usize, &DataSet); /// Cross threshold by accumulating from all validators, returning the final result. +#[expect(clippy::too_many_arguments)] fn accumulate_to_threshold( txn: &mut impl DbTxn, set: ExternalValidatorSet, @@ -140,7 +141,7 @@ mod required_participation_tests { // No panics { - let random_n = (OsRng.next_u32() as u16) % (u16::MAX / 2); + let random_n = OsRng.gen_range(0 .. u16::MAX / 2); let _ = required_participation(random_n); let _ = required_participation(u16::MAX / 2); } @@ -182,7 +183,9 @@ mod topic { round: SigningProtocolRound::Preprocess }) ), - _ => assert_eq!(topic.next_attempt_topic(), None), + Topic::RemoveParticipant { .. } | Topic::SlashReport => { + assert_eq!(topic.next_attempt_topic(), None); + } } } } @@ -212,7 +215,9 @@ mod topic { ), SigningProtocolRound::Share => assert_eq!(topic.reattempt_topic(), None), }, - _ => assert_eq!(topic.reattempt_topic(), None), + Topic::RemoveParticipant { .. } | Topic::SlashReport => { + assert_eq!(topic.reattempt_topic(), None); + } } } } @@ -223,9 +228,11 @@ mod topic { for topic in all_topics_and_attempts() { match topic { Topic::Sign { id, attempt, round: _ } => { - assert_eq!(topic.sign_id(set), Some(SignId { session: set.session, id, attempt })) + assert_eq!(topic.sign_id(set), Some(SignId { session: set.session, id, attempt })); + } + Topic::RemoveParticipant { .. } | Topic::DkgConfirmation { .. } | Topic::SlashReport => { + assert_eq!(topic.sign_id(set), None); } - _ => assert_eq!(topic.sign_id(set), None), } } } @@ -235,19 +242,23 @@ mod topic { let set = random_validator_set(&mut OsRng); for topic in all_topics_and_attempts() { match topic { - Topic::DkgConfirmation { attempt, round: _ } => assert_eq!( - topic.dkg_confirmation_sign_id(set), - Some({ - let id = { - let mut id = [0; 32]; - let encoded_set = borsh::to_vec(&set).unwrap(); - id[.. encoded_set.len()].copy_from_slice(&encoded_set); - VariantSignId::Batch(id) - }; - SignId { session: set.session, id, attempt } - }) - ), - _ => assert_eq!(topic.dkg_confirmation_sign_id(set), None), + Topic::DkgConfirmation { attempt, round: _ } => { + assert_eq!( + topic.dkg_confirmation_sign_id(set), + Some({ + let id = { + let mut id = [0; 32]; + let encoded_set = borsh::to_vec(&set).unwrap(); + id[.. encoded_set.len()].copy_from_slice(&encoded_set); + VariantSignId::Batch(id) + }; + SignId { session: set.session, id, attempt } + }) + ); + } + Topic::RemoveParticipant { .. } | Topic::SlashReport | Topic::Sign { .. } => { + assert_eq!(topic.dkg_confirmation_sign_id(set), None); + } } } } @@ -264,7 +275,12 @@ mod topic { topic.preceding_topic(), Some(Topic::Sign { id, attempt, round: SigningProtocolRound::Preprocess }) ), - _ => assert_eq!(topic.preceding_topic(), None), + Topic::RemoveParticipant { .. } | + Topic::DkgConfirmation { round: SigningProtocolRound::Preprocess, .. } | + Topic::SlashReport | + Topic::Sign { round: SigningProtocolRound::Preprocess, .. } => { + assert_eq!(topic.preceding_topic(), None); + } } // preceding and succeeding should be inverses @@ -289,7 +305,12 @@ mod topic { topic.succeeding_topic(), Some(Topic::Sign { id, attempt, round: SigningProtocolRound::Share }) ), - _ => assert_eq!(topic.succeeding_topic(), None), + Topic::RemoveParticipant { .. } | + Topic::DkgConfirmation { round: SigningProtocolRound::Share, .. } | + Topic::SlashReport | + Topic::Sign { round: SigningProtocolRound::Share, .. } => { + assert_eq!(topic.succeeding_topic(), None); + } } } } @@ -299,10 +320,12 @@ mod topic { for topic in all_topics_and_attempts() { match topic { Topic::DkgConfirmation { attempt, .. } => { - assert_eq!(topic.requires_recognition(), attempt != 0) + assert_eq!(topic.requires_recognition(), attempt != 0); + } + Topic::Sign { .. } => assert!(topic.requires_recognition()), + Topic::RemoveParticipant { .. } | Topic::SlashReport => { + assert!(!topic.requires_recognition()); } - Topic::Sign { .. } => assert_eq!(topic.requires_recognition(), true), - _ => assert_eq!(topic.requires_recognition(), false), } } } @@ -312,10 +335,10 @@ mod topic { for topic in all_topics_and_attempts() { match topic { Topic::RemoveParticipant { .. } | Topic::SlashReport => { - assert_eq!(topic.participating(), Participating::Everyone) + assert_eq!(topic.participating(), Participating::Everyone); } Topic::DkgConfirmation { .. } | Topic::Sign { .. } => { - assert_eq!(topic.participating(), Participating::Participated) + assert_eq!(topic.participating(), Participating::Participated); } } } @@ -346,7 +369,7 @@ mod tributary_db { // Same set cannot recognize again until finished { let mut txn = db.txn(); - assert_eq!(ActivelyCosigning::get(&mut txn, set), Some(block_hash1)); + assert_eq!(ActivelyCosigning::get(&txn, set), Some(block_hash1)); let retry = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { let block_hash2 = random_block_hash(&mut OsRng); @@ -366,7 +389,7 @@ mod tributary_db { { let mut txn = db.txn(); TributaryDb::finish_cosigning(&mut txn, set); - assert_eq!(ActivelyCosigning::get(&mut txn, set), None); + assert_eq!(ActivelyCosigning::get(&txn, set), None); // Previous topic remains recognized assert!(TributaryDb::recognized(&txn, set, expected_topic)); @@ -381,10 +404,10 @@ mod tributary_db { let block_number2 = random_block_number(&mut OsRng); TributaryDb::start_cosigning(&mut txn, set, block_hash2, block_number2); - assert_eq!(ActivelyCosigning::get(&mut txn, set), Some(block_hash2)); + assert_eq!(ActivelyCosigning::get(&txn, set), Some(block_hash2)); TributaryDb::finish_cosigning(&mut txn, set); - assert_eq!(ActivelyCosigning::get(&mut txn, set), None); + assert_eq!(ActivelyCosigning::get(&txn, set), None); // The new topic is now recognized assert!(TributaryDb::recognized( @@ -415,7 +438,7 @@ mod tributary_db { all_topics_and_attempts().len() ); - for _iteration in 0 .. 100 { + for iteration in 0 .. 100 { for topic in all_topics_and_attempts() { // Fresh DB per topic so recognized state doesn't leak between iterations let mut db = MemDb::new(); @@ -427,7 +450,7 @@ mod tributary_db { reattemptable_topics.iter().copied().filter(|_| OsRng.next_u64() % 2 == 0).collect(); serai_env::trace!( - "iteration={_iteration}, topic={topic:?}, block_number={block_number}, \ + "iteration={iteration}, topic={topic:?}, block_number={block_number}, \ reattempts={reattempts:?}" ); @@ -453,7 +476,7 @@ mod tributary_db { // When no reattempts were set, verify the current topic's reattempt was not recognized if reattempts.is_empty() { if let Some((_, reattempt_topic)) = topic.reattempt_topic() { - assert_eq!(TributaryDb::recognized(&txn, set, reattempt_topic), false); + assert!(!TributaryDb::recognized(&txn, set, reattempt_topic)); serai_env::trace!("verified {reattempt_topic:?} not recognized (no reattempts)"); } } @@ -575,7 +598,7 @@ mod tributary_db { ); txn.commit(); - assert_eq!(TributaryDb::is_fatally_slashed(&db, set, validator), false); + assert!(!TributaryDb::is_fatally_slashed(&db, set, validator)); // Below threshold (1 of 3) so result is None but data is stored assert!(matches!(result, DataSet::None)); @@ -629,9 +652,8 @@ mod tributary_db { ); txn.commit(); - assert_eq!( - TributaryDb::is_fatally_slashed(&db, set, validator), - false, + assert!( + !TributaryDb::is_fatally_slashed(&db, set, validator), "preceding key exists (same type) so validator should not be slashed" ); assert!(matches!(result, DataSet::None), "below threshold (1 of 3)"); @@ -667,7 +689,7 @@ mod tributary_db { total_weight, block_number, topic, - |i| [i as u8; 32], + |i| [u8::try_from(i).unwrap(); 32], Some(|i: usize, result: &DataSet| { if i < 2 { assert!(matches!(result, DataSet::None)); @@ -789,7 +811,7 @@ mod tributary_db { total_weight, block_number, topic, - |i| [i as u8; 32], + |i| [u8::try_from(i).unwrap(); 32], None::, ); assert!(matches!(result, DataSet::Participating(_))); @@ -800,7 +822,7 @@ mod tributary_db { for (i, v) in validators.iter().enumerate() { assert_eq!( Accumulated::::get(&db, set, topic, *v), - Some([i as u8; 32]), + Some([u8::try_from(i).unwrap(); 32]), "data should be preserved when reattempt exists: {topic:?}" ); } @@ -846,7 +868,7 @@ mod tributary_db { total_weight, block_number, topic, - |i| [i as u8; 32], + |i| [u8::try_from(i).unwrap(); 32], None::, ); })); @@ -875,7 +897,7 @@ mod tributary_db { total_weight, random_block_number(&mut OsRng), topic, - |i| [i as u8; 32], + |i| [u8::try_from(i).unwrap(); 32], None::, ); txn.commit(); @@ -955,7 +977,7 @@ mod tributary_db { total_weight, block_number, topic, - |i| vec![i as u8], + |i| vec![u8::try_from(i).unwrap()], None::>)>, ); @@ -993,7 +1015,7 @@ mod tributary_db { total_weight, block_number, topic, - |i| vec![i as u8], + |i| vec![u8::try_from(i).unwrap()], None::>)>, ); @@ -1029,7 +1051,7 @@ mod tributary_db { /// /// Independently computes the expected DB state by tracing the code paths in `accumulate` /// based on the inputs and pre-state, then asserts the actual DB matches. - #[expect(clippy::too_many_arguments)] + #[expect(clippy::too_many_arguments, clippy::fn_params_excessive_bools)] fn verify_accumulate_invariants( db: &MemDb, set: ExternalValidatorSet, @@ -1263,7 +1285,7 @@ mod tributary_db { // When validator_in_list is false, the accumulating validator is an outsider // not present in the validators slice. This exercises the `participated = false` // branch when the threshold is crossed. - let cur_validator = (cur_validator as usize) % validators.len(); + let cur_validator = usize::from(cur_validator) % validators.len(); let validator = if validator_in_list { validators[cur_validator] } else { @@ -1272,7 +1294,7 @@ mod tributary_db { if has_preceding_topic_accumulated { if let Some(preceding_topic) = topic.preceding_topic() { - Accumulated::set(&mut txn, set, preceding_topic, validator, &data) + Accumulated::set(&mut txn, set, preceding_topic, validator, &data); } } @@ -1315,7 +1337,7 @@ mod tributary_db { if let Err(panic) = catch_result { let msg = panic .downcast_ref::() - .map(|s| s.as_str()) + .map(String::as_str) .or_else(|| panic.downcast_ref::<&str>().copied()) .unwrap_or(""); if msg.contains("overflowed") { diff --git a/coordinator/tributary/src/tests/mod.rs b/coordinator/tributary/src/tests/mod.rs index 5c94bf85a..2ebcf9355 100644 --- a/coordinator/tributary/src/tests/mod.rs +++ b/coordinator/tributary/src/tests/mod.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; -use ciphersuite::group::GroupEncoding; +use ciphersuite::group::GroupEncoding as _; use ciphersuite::WrappedGroup; use dalek_ff_group::{Ristretto, RistrettoPoint}; use messages::sign::VariantSignId; @@ -47,14 +47,14 @@ pub(crate) fn random_key( Zeroizing::new(::F::random(&mut *rng)) } -pub(crate) fn get_key_point(key: Zeroizing<::F>) -> RistrettoPoint { - Ristretto::generator() * *key +pub(crate) fn get_key_point(key: &Zeroizing<::F>) -> RistrettoPoint { + Ristretto::generator() * **key } pub(crate) fn random_serai_address_and_key( rng: &mut R, ) -> (RistrettoPoint, SeraiAddress) { - let key = get_key_point(random_key(rng)); + let key = get_key_point(&random_key(rng)); (key, SeraiAddress(key.to_bytes())) } @@ -64,8 +64,9 @@ pub(crate) fn random_signed(rng: &mut R) -> Signed { } /// One of each signed transaction kind, and attempts: at 0, a random attempt, and u32::MAX. -pub(crate) fn all_signed_transactions_and_attempts(signed: Signed) -> Vec { +pub(crate) fn all_signed_transactions_and_attempts(signed: &Signed) -> Vec { let random_attempt = OsRng.gen_range(1u32 .. u32::MAX); + let signed = *signed; vec![ // RemoveParticipant Transaction::RemoveParticipant { participant: random_serai_address(&mut OsRng), signed }, @@ -160,7 +161,7 @@ pub(crate) fn all_provided_transactions() -> Vec { /// One of each of all transaction kinds. pub(crate) fn all_transactions() -> Vec { - let mut txs = all_signed_transactions_and_attempts(random_signed(&mut OsRng)); + let mut txs = all_signed_transactions_and_attempts(&random_signed(&mut OsRng)); txs.extend(all_provided_transactions()); txs } @@ -259,7 +260,13 @@ pub(crate) fn assert_block_side_effects( "SlashReport topic should be recognized", ); } - _ => {} + Transaction::RemoveParticipant { .. } | + Transaction::DkgConfirmationPreprocess { .. } | + Transaction::DkgConfirmationShare { .. } | + Transaction::Cosigned { .. } | + Transaction::SubstrateBlock { .. } | + Transaction::Batch { .. } | + Transaction::Sign { .. } => {} }, tributary_sdk::Transaction::Tendermint(_) => {} } @@ -294,16 +301,16 @@ pub(crate) fn new_test_set_info(validators: &[(SeraiAddress, u16)]) -> NewSetInf } } -/// Generate `n` random validators (weight 1 each) with keys, returning all derived collections. -pub(crate) fn setup_n_validators_with_keys( - n: u16, -) -> ( +pub(crate) type ValidatorSetup = ( Vec<(RistrettoPoint, SeraiAddress)>, Vec<(SeraiAddress, u16)>, Vec, HashMap, u16, -) { +); + +/// Generate `n` random validators (weight 1 each) with keys, returning all derived collections. +pub(crate) fn setup_n_validators_with_keys(n: u16) -> ValidatorSetup { let keys_addrs: Vec<(RistrettoPoint, SeraiAddress)> = (0 .. n).map(|_| random_serai_address_and_key(&mut OsRng)).collect(); let validator_data: Vec<(SeraiAddress, u16)> = @@ -316,12 +323,6 @@ pub(crate) fn setup_n_validators_with_keys( } /// Common test setup with 3 random validators each with weight 1, total_weight = 3. -pub(crate) fn setup_test_validators_and_weights_with_keys() -> ( - Vec<(RistrettoPoint, SeraiAddress)>, - Vec<(SeraiAddress, u16)>, - Vec, - HashMap, - u16, -) { +pub(crate) fn setup_test_validators_and_weights_with_keys() -> ValidatorSetup { setup_n_validators_with_keys(3) } diff --git a/coordinator/tributary/src/tests/scan_block.rs b/coordinator/tributary/src/tests/scan_block.rs index 9a81b474f..7f3d8b860 100644 --- a/coordinator/tributary/src/tests/scan_block.rs +++ b/coordinator/tributary/src/tests/scan_block.rs @@ -2,7 +2,7 @@ use core::marker::PhantomData; use schnorr::SchnorrSignature; -use serai_db::{Db, DbTxn, MemDb}; +use serai_db::{Db as _, DbTxn, MemDb}; use serai_primitives::test_helpers::{random_block_hash, random_block_number, random_vec_of_len}; use serai_cosign_types::CosignIntent; use tributary_sdk::{ @@ -276,7 +276,7 @@ mod handle_application_tx { txn.commit(); } - for tx in all_signed_transactions_and_attempts(Signed::default()) { + for tx in all_signed_transactions_and_attempts(&Signed::default()) { let mut txn = db.txn(); { @@ -313,7 +313,7 @@ mod handle_application_tx { Transaction::RemoveParticipant { participant: nonexistent, signed: Signed::default() }, ); - assert!(TributaryDb::is_fatally_slashed(&mut txn, set, default_signer)); + assert!(TributaryDb::is_fatally_slashed(&txn, set, default_signer)); } // Valid RemoveParticipant accumulates weight and eventually crosses threshold @@ -340,14 +340,14 @@ mod handle_application_tx { } assert!( RecognizedTopics::recognized( - &mut txn, + &txn, set, Topic::RemoveParticipant { participant: target } ), "RemoveParticipant topic should be recognized after handling the tx" ); assert!( - !TributaryDb::is_fatally_slashed(&mut txn, set, target), + !TributaryDb::is_fatally_slashed(&txn, set, target), "target should not be fatally slashed after one vote" ); @@ -364,7 +364,7 @@ mod handle_application_tx { ); } assert!( - TributaryDb::is_fatally_slashed(&mut txn, set, target), + TributaryDb::is_fatally_slashed(&txn, set, target), "target should be fatally slashed after threshold is crossed" ); } @@ -465,7 +465,7 @@ mod handle_application_tx { ); assert!( - TributaryDb::is_fatally_slashed(&mut txn, set, addr0), + TributaryDb::is_fatally_slashed(&txn, set, addr0), "share without preceding preprocess should fatally slash" ); } @@ -565,7 +565,7 @@ mod handle_application_tx { Transaction::Cosign { substrate_block_hash: block_hash }, ); - assert_eq!(TributaryDb::latest_substrate_block_to_cosign(&mut txn, set), Some(block_hash)); + assert_eq!(TributaryDb::latest_substrate_block_to_cosign(&txn, set), Some(block_hash)); assert_eq!(TributaryDb::actively_cosigning(&mut txn, set), Some(block_hash)); assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); } @@ -590,7 +590,7 @@ mod handle_application_tx { Transaction::Cosign { substrate_block_hash: second_hash }, ); - assert_eq!(TributaryDb::latest_substrate_block_to_cosign(&mut txn, set), Some(second_hash)); + assert_eq!(TributaryDb::latest_substrate_block_to_cosign(&txn, set), Some(second_hash)); assert_eq!(TributaryDb::actively_cosigning(&mut txn, set), Some(first_hash)); } } @@ -699,7 +699,7 @@ mod handle_application_tx { for plan in &plans { let topic = expected_initially_recognized_sign_topic(VariantSignId::Transaction(*plan)); - assert!(RecognizedTopics::recognized(&mut txn, set, topic)); + assert!(RecognizedTopics::recognized(&txn, set, topic)); } } @@ -723,7 +723,7 @@ mod handle_application_tx { } let topic = expected_initially_recognized_sign_topic(VariantSignId::Batch(batch_hash)); - assert!(RecognizedTopics::recognized(&mut txn, set, topic)); + assert!(RecognizedTopics::recognized(&txn, set, topic)); } mod slash_report { @@ -754,14 +754,14 @@ mod handle_application_tx { scan_block.handle_application_tx( random_block_number(&mut OsRng), Transaction::SlashReport { - slash_points: vec![0; wrong_len as usize], + slash_points: vec![0; usize::from(wrong_len)], signed: new_signed(signer_key), }, ); } assert!( - TributaryDb::is_fatally_slashed(&mut txn, set, signer_addr), + TributaryDb::is_fatally_slashed(&txn, set, signer_addr), "signer should be fatally slashed for wrong-length slash report", ); assert!( @@ -773,14 +773,14 @@ mod handle_application_tx { #[test] fn fatal_slash_as_reported_median() { let num_validators = OsRng.gen_range(4u16 .. 10); - let num_reports = required_participation(num_validators) as usize; + let num_reports = usize::from(required_participation(num_validators)); let set = default_test_validator_set(); let (keys_addrs, validator_data, validators, weights, total_weight) = setup_n_validators_with_keys(num_validators); let set_info = new_test_set_info(&validator_data); - let mut report = vec![0u32; num_validators as usize]; + let mut report = vec![0u32; usize::from(num_validators)]; report[0] = u32::MAX; let reports: Vec> = vec![report; num_reports]; @@ -818,7 +818,7 @@ mod handle_application_tx { } // Compute the median for each validator position across all reporters - let mut medians = Vec::with_capacity(num_validators as usize); + let mut medians = Vec::with_capacity(usize::from(num_validators)); for i in 0 .. usize::from(num_validators) { let mut values: Vec = reports.iter().map(|r| r[i]).collect(); values.sort_unstable(); @@ -853,7 +853,7 @@ mod handle_application_tx { for _ in 0 .. 200 { // random even: 4, 6, 8, or 10 let n = OsRng.gen_range(2u16 ..= 5) * 2; - let num_reports = required_participation(n as u16); + let num_reports = required_participation(n); let set = default_test_validator_set(); @@ -896,7 +896,7 @@ mod handle_application_tx { let sign_topic = expected_initially_recognized_sign_topic(VariantSignId::SlashReport); assert!( - RecognizedTopics::recognized(&mut txn, set, sign_topic), + RecognizedTopics::recognized(&txn, set, sign_topic), "SlashReport sign topic should be recognized", ); } @@ -945,7 +945,7 @@ mod handle_application_tx { assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); let sign_topic = expected_initially_recognized_sign_topic(VariantSignId::SlashReport); - assert!(RecognizedTopics::recognized(&mut txn, set, sign_topic)); + assert!(RecognizedTopics::recognized(&txn, set, sign_topic)); } } } @@ -981,7 +981,7 @@ mod handle_application_tx { }, ); - assert!(TributaryDb::is_fatally_slashed(&mut txn, set, addr0)); + assert!(TributaryDb::is_fatally_slashed(&txn, set, addr0)); } // Valid data: threshold crossing sends ProcessorMessage @@ -1055,7 +1055,7 @@ mod handle_application_tx { assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); // Share topic should now be recognized - assert!(RecognizedTopics::recognized(&mut txn, set, share_topic)); + assert!(RecognizedTopics::recognized(&txn, set, share_topic)); // Step 2: All validators submit shares, crossing threshold -> sends Shares message. { @@ -1081,7 +1081,7 @@ mod handle_application_tx { // No validators should be slashed for v in &validators { - assert!(!TributaryDb::is_fatally_slashed(&mut txn, set, *v)); + assert!(!TributaryDb::is_fatally_slashed(&txn, set, *v)); } } } @@ -1117,7 +1117,7 @@ fn handle_block() { // Each application transaction type passes through handle_block. // Signed transactions use a real validator key so participant_indexes lookups succeed. // Cosign and SubstrateBlock need external state populated before they can run. - for tx in all_signed_transactions_and_attempts(signed) { + for tx in all_signed_transactions_and_attempts(&signed) { let mut db = MemDb::new(); let mut txn = db.txn(); @@ -1160,7 +1160,14 @@ fn handle_block() { let plans = vec![random_bytes_32(&mut OsRng)]; SubstrateBlockPlans::set(&mut txn, set, *hash, &plans); } - _ => {} + Transaction::RemoveParticipant { .. } | + Transaction::DkgParticipation { .. } | + Transaction::DkgConfirmationPreprocess { .. } | + Transaction::DkgConfirmationShare { .. } | + Transaction::Cosigned { .. } | + Transaction::Batch { .. } | + Transaction::Sign { .. } | + Transaction::SlashReport { .. } => {} } let block_txs = vec![TributaryTransaction::Application(tx)]; diff --git a/coordinator/tributary/src/tests/scan_tributary.rs b/coordinator/tributary/src/tests/scan_tributary.rs index 0e632f1cb..bb606071e 100644 --- a/coordinator/tributary/src/tests/scan_tributary.rs +++ b/coordinator/tributary/src/tests/scan_tributary.rs @@ -14,7 +14,7 @@ async fn make_tributary( db: MemDb, ) -> (Tributary, Zeroizing<::F>, [u8; 32]) { let key = random_key(&mut OsRng); - let pub_key = get_key_point(key.clone()); + let pub_key = get_key_point(&key); let genesis = random_genesis(&mut OsRng); let tributary = Tributary::::new( db, @@ -44,7 +44,7 @@ async fn new_scan_tributary_task() { assert_eq!(task.validators.len(), 1); assert_eq!(task.validators[0], addr); assert_eq!(task.total_weight, 3); - assert_eq!(*task.validator_weights.get(&addr).unwrap(), 3); + assert_eq!(task.validator_weights[&addr], 3); } // Multiple validators with different weights @@ -57,9 +57,9 @@ async fn new_scan_tributary_task() { assert_eq!(task.validators.len(), 3); assert_eq!(task.total_weight, 7); - assert_eq!(*task.validator_weights.get(&addr1).unwrap(), 1); - assert_eq!(*task.validator_weights.get(&addr2).unwrap(), 2); - assert_eq!(*task.validator_weights.get(&addr3).unwrap(), 4); + assert_eq!(task.validator_weights[&addr1], 1); + assert_eq!(task.validator_weights[&addr2], 2); + assert_eq!(task.validator_weights[&addr3], 4); } // Preserves set info @@ -84,9 +84,10 @@ async fn wait_for_block_after( if let Some(hash) = reader.block_after(parent) { return hash; } - if start.elapsed() > std::time::Duration::from_secs(30) { - panic!("timed out waiting for a block after {:?}", parent); - } + assert!( + start.elapsed() <= std::time::Duration::from_secs(30), + "timed out waiting for a block after {parent:?}" + ); tokio::time::sleep(std::time::Duration::from_millis(20)).await; } } @@ -99,7 +100,7 @@ fn inject_block( parent: [u8; 32], transactions: Vec>, ) -> [u8; 32] { - let tx_hashes: Vec<[u8; 32]> = transactions.iter().map(|tx| tx.hash()).collect(); + let tx_hashes: Vec<[u8; 32]> = transactions.iter().map(tributary_sdk::Transaction::hash).collect(); let txs_hash = Blake2s256::digest(tx_hashes.iter().flat_map(|h| h.iter().copied()).collect::>()).into(); let block = Block { header: BlockHeader { parent, transactions: txs_hash }, transactions }; @@ -197,9 +198,10 @@ async fn scan_tributary_task_run_iteration() { let mut parent = genesis2; let start = std::time::Instant::now(); loop { - if start.elapsed() > std::time::Duration::from_secs(30) { - panic!("timed out waiting for a block with the provided tx"); - } + assert!( + start.elapsed() <= std::time::Duration::from_secs(30), + "timed out waiting for a block with the provided tx" + ); if let Some(hash) = reader.block_after(&parent) { let block = reader.block(&hash).unwrap(); if block diff --git a/coordinator/tributary/src/tests/transaction.rs b/coordinator/tributary/src/tests/transaction.rs index 3956fd068..e90302314 100644 --- a/coordinator/tributary/src/tests/transaction.rs +++ b/coordinator/tributary/src/tests/transaction.rs @@ -2,11 +2,11 @@ use core::ops::Deref as _; use std::io::{self, Cursor, Read, Write}; use blake2::{digest::typenum::U32, Digest as _, Blake2b}; -use borsh::{BorshDeserialize, BorshSerialize}; -use rand::{RngCore, rngs::OsRng}; +use borsh::{BorshDeserialize as _, BorshSerialize as _}; +use rand::{RngCore as _, rngs::OsRng}; use ciphersuite::{ - group::{Group as _, GroupEncoding, ff::PrimeField}, + group::{Group as _, GroupEncoding as _, ff::PrimeField as _}, *, }; use dalek_ff_group::Ristretto; @@ -15,7 +15,7 @@ use messages::sign::VariantSignId; use serai_primitives::{test_helpers::*, validator_sets::KeyShares}; use tributary_sdk::{ ReadWrite, - transaction::{Transaction as TransactionTrait, TransactionError, TransactionKind}, + transaction::{Transaction as _, TransactionError, TransactionKind}, }; use super::*; @@ -84,7 +84,7 @@ mod signed { struct FailingWriter; impl Write for FailingWriter { fn write(&mut self, _buf: &[u8]) -> io::Result { - Err(io::Error::new(io::ErrorKind::Other, "simulated write failure")) + Err(io::Error::other("simulated write failure")) } fn flush(&mut self) -> io::Result<()> { Ok(()) @@ -141,7 +141,7 @@ mod signed { fn to_tributary_signed_matches_signed() { let signed = random_signed(&mut OsRng); for round in all_signing_protocol_rounds() { - let tributary_signed = signed.clone().to_tributary_signed(round); + let tributary_signed = signed.to_tributary_signed(round); assert_eq!(signed.signer(), tributary_signed.signer); assert_eq!(signed.signature, tributary_signed.signature); assert_eq!(tributary_signed.nonce, round.nonce()); @@ -158,6 +158,7 @@ mod signed { } } +#[allow(clippy::module_inception)] mod transaction { use super::*; @@ -178,7 +179,7 @@ mod transaction { } Transaction::DkgParticipation { participation, signed } => { let mut expected = vec![1u8]; - expected.extend(&(participation.len() as u32).to_le_bytes()); + expected.extend(&u32::try_from(participation.len()).unwrap().to_le_bytes()); expected.extend(participation); expected.extend(borsh::to_vec(signed).unwrap()); expected @@ -242,9 +243,9 @@ mod transaction { SigningProtocolRound::Preprocess => expected.push(0u8), SigningProtocolRound::Share => expected.push(1u8), } - expected.extend(&(data.len() as u32).to_le_bytes()); + expected.extend(&u32::try_from(data.len()).unwrap().to_le_bytes()); for d in data { - expected.extend(&(d.len() as u32).to_le_bytes()); + expected.extend(&u32::try_from(d.len()).unwrap().to_le_bytes()); expected.extend(d); } expected.extend(borsh::to_vec(signed).unwrap()); @@ -252,7 +253,7 @@ mod transaction { } Transaction::SlashReport { slash_points, signed } => { let mut expected = vec![9u8]; - expected.extend(&(slash_points.len() as u32).to_le_bytes()); + expected.extend(&u32::try_from(slash_points.len()).unwrap().to_le_bytes()); for &p in slash_points { expected.extend(&p.to_le_bytes()); } @@ -342,12 +343,12 @@ mod transaction { /// Borsh-encodes a byte-string label: `len(4 LE) || label` fn borsh_label(label: &[u8]) -> Vec { let mut out = Vec::new(); - out.extend(&(label.len() as u32).to_le_bytes()); + out.extend(&u32::try_from(label.len()).unwrap().to_le_bytes()); out.extend(label); out } - for mut tx in all_signed_transactions_and_attempts(random_signed(&mut OsRng)) { + for mut tx in all_signed_transactions_and_attempts(&random_signed(&mut OsRng)) { tx.sign(&mut OsRng, genesis, &key); let (expected_order, expected_nonce) = match &tx { @@ -391,7 +392,12 @@ mod transaction { (order, nonce) } Transaction::SlashReport { .. } => (borsh_label(b"SlashReport"), 0), - other => panic!("all_signed_transactions_and_attempts returned non-signed tx: {other:?}"), + other @ (Transaction::Cosign { .. } | + Transaction::Cosigned { .. } | + Transaction::SubstrateBlock { .. } | + Transaction::Batch { .. }) => { + panic!("all_signed_transactions_and_attempts returned non-signed tx: {other:?}") + } }; match tx.kind() { @@ -403,7 +409,9 @@ mod transaction { "Signature verification failed for {tx:?}" ); } - other => panic!("Expected Signed kind, got {other:?} for {tx:?}"), + other @ (TransactionKind::Provided(_) | TransactionKind::Unsigned) => { + panic!("Expected Signed kind, got {other:?} for {tx:?}") + } } } } @@ -416,14 +424,23 @@ mod transaction { Transaction::Cosigned { .. } => "Cosigned", Transaction::SubstrateBlock { .. } => "SubstrateBlock", Transaction::Batch { .. } => "Batch", - other => panic!("all_provided_transactions returned non-provided tx: {other:?}"), + other @ (Transaction::RemoveParticipant { .. } | + Transaction::DkgParticipation { .. } | + Transaction::DkgConfirmationPreprocess { .. } | + Transaction::DkgConfirmationShare { .. } | + Transaction::Sign { .. } | + Transaction::SlashReport { .. }) => { + panic!("all_provided_transactions returned non-provided tx: {other:?}") + } }; match tx.kind() { TransactionKind::Provided(actual_order) => { assert_eq!(actual_order, expected_order, "Wrong order for {tx:?}"); } - other => panic!("Expected Provided kind, got {other:?} for {tx:?}"), + other @ (TransactionKind::Unsigned | TransactionKind::Signed(..)) => { + panic!("Expected Provided kind, got {other:?} for {tx:?}") + } } } } @@ -515,8 +532,7 @@ mod transaction { Transaction::RemoveParticipant { .. } | Transaction::DkgParticipation { .. } | Transaction::DkgConfirmationPreprocess { .. } | - Transaction::DkgConfirmationShare { .. } => {} - + Transaction::DkgConfirmationShare { .. } | // Provided: no validation beyond structure Transaction::Cosign { .. } | Transaction::Cosigned { .. } | @@ -532,8 +548,9 @@ mod transaction { data, signed: *signed, }; - assert_eq!(with_data(vec![vec![]; 0]).verify(), Ok(())); - assert_eq!(with_data(vec![vec![]; OsRng.next_u32() as usize % max]).verify(), Ok(())); + assert_eq!(with_data(Vec::new()).verify(), Ok(())); + let random_len = usize::try_from(OsRng.next_u32()).unwrap() % max; + assert_eq!(with_data(vec![vec![]; random_len]).verify(), Ok(())); assert_eq!(with_data(vec![vec![]; max]).verify(), Ok(())); assert_eq!( with_data(vec![vec![]; max + 1]).verify(), @@ -545,8 +562,9 @@ mod transaction { Transaction::SlashReport { signed, .. } => { let with_points = |points| Transaction::SlashReport { slash_points: points, signed: *signed }; - assert_eq!(with_points(vec![0; 0]).verify(), Ok(())); - assert_eq!(with_points(vec![0; OsRng.next_u32() as usize % max]).verify(), Ok(())); + assert_eq!(with_points(vec![]).verify(), Ok(())); + let random_len = usize::try_from(OsRng.next_u32()).unwrap() % max; + assert_eq!(with_points(vec![0; random_len]).verify(), Ok(())); assert_eq!(with_points(vec![0; max]).verify(), Ok(())); assert_eq!(with_points(vec![0; max + 1]).verify(), Err(TransactionError::InvalidContent)); } @@ -561,7 +579,11 @@ mod transaction { Transaction::RemoveParticipant { participant, .. } => { Some(Topic::RemoveParticipant { participant: *participant }) } - Transaction::DkgParticipation { .. } => None, + Transaction::DkgParticipation { .. } | + Transaction::Cosign { .. } | + Transaction::Cosigned { .. } | + Transaction::SubstrateBlock { .. } | + Transaction::Batch { .. } => None, Transaction::DkgConfirmationPreprocess { attempt, .. } => Some(Topic::DkgConfirmation { attempt: *attempt, round: SigningProtocolRound::Preprocess, @@ -573,7 +595,6 @@ mod transaction { Some(Topic::Sign { id: *id, attempt: *attempt, round: *round }) } Transaction::SlashReport { .. } => Some(Topic::SlashReport), - _ => None, }; assert_eq!(tx.topic(), expected, "Wrong topic for {tx:?}"); } @@ -589,7 +610,7 @@ mod transaction { let genesis = random_genesis(&mut OsRng); // Sets correct signer and produces verifiable signature - for mut tx in all_signed_transactions_and_attempts(random_signed(&mut OsRng)) { + for mut tx in all_signed_transactions_and_attempts(&random_signed(&mut OsRng)) { tx.sign(&mut OsRng, genesis, &key); let sig_hash = tx.sig_hash(genesis); @@ -618,9 +639,8 @@ mod transaction { } let wrong_challenge = tx.sig_hash(wrong_genesis); if let TransactionKind::Signed(_, tributary_signed) = tx.kind() { - assert_eq!( - tributary_signed.signature.verify(tributary_signed.signer, wrong_challenge), - false, + assert!( + !tributary_signed.signature.verify(tributary_signed.signer, wrong_challenge), "Signature should not verify with wrong genesis" ); } diff --git a/coordinator/tributary/src/tests/tributary.rs b/coordinator/tributary/src/tests/tributary.rs index c2ee26bcf..567ee6dbe 100644 --- a/coordinator/tributary/src/tests/tributary.rs +++ b/coordinator/tributary/src/tests/tributary.rs @@ -1,4 +1,4 @@ -use serai_db::{Db, DbTxn, MemDb}; +use serai_db::{Db as _, DbTxn as _, MemDb}; use crate::*; use super::*; @@ -6,7 +6,15 @@ use super::*; fn unwrap_slash_report(tx: Transaction) -> (Vec, Signed) { match tx { Transaction::SlashReport { slash_points, signed } => (slash_points, signed), - other => panic!("expected SlashReport, got {other:?}"), + other @ (Transaction::RemoveParticipant { .. } | + Transaction::DkgParticipation { .. } | + Transaction::DkgConfirmationPreprocess { .. } | + Transaction::DkgConfirmationShare { .. } | + Transaction::Cosign { .. } | + Transaction::Cosigned { .. } | + Transaction::SubstrateBlock { .. } | + Transaction::Batch { .. } | + Transaction::Sign { .. }) => panic!("expected SlashReport, got {other:?}"), } } From 32fbb594100951f9b701ab6e7c1cb153dd66ba72 Mon Sep 17 00:00:00 2001 From: rafael_xmr Date: Wed, 15 Apr 2026 15:13:55 -0400 Subject: [PATCH 59/71] chore(coordinator/tributary): fmt errors --- coordinator/tributary/src/db.rs | 4 +--- coordinator/tributary/src/tests/scan_block.rs | 6 +----- coordinator/tributary/src/tests/scan_tributary.rs | 3 ++- coordinator/tributary/src/tests/transaction.rs | 3 +-- 4 files changed, 5 insertions(+), 11 deletions(-) diff --git a/coordinator/tributary/src/db.rs b/coordinator/tributary/src/db.rs index 45c17de1f..1f56f29ad 100644 --- a/coordinator/tributary/src/db.rs +++ b/coordinator/tributary/src/db.rs @@ -492,9 +492,7 @@ impl TributaryDb { // Accumulate the data accumulated_weight = accumulated_weight.checked_add(validator_weight).unwrap_or_else(|| { - panic!( - "accumulated_weight {accumulated_weight} overflowed adding validator_weight {validator_weight}", - ) + panic!("accumulated {accumulated_weight} overflowed adding validator's {validator_weight}") }); AccumulatedWeight::set(txn, set, topic, &accumulated_weight); Accumulated::set(txn, set, topic, validator, data); diff --git a/coordinator/tributary/src/tests/scan_block.rs b/coordinator/tributary/src/tests/scan_block.rs index 7f3d8b860..e5dc88da8 100644 --- a/coordinator/tributary/src/tests/scan_block.rs +++ b/coordinator/tributary/src/tests/scan_block.rs @@ -339,11 +339,7 @@ mod handle_application_tx { ); } assert!( - RecognizedTopics::recognized( - &txn, - set, - Topic::RemoveParticipant { participant: target } - ), + RecognizedTopics::recognized(&txn, set, Topic::RemoveParticipant { participant: target }), "RemoveParticipant topic should be recognized after handling the tx" ); assert!( diff --git a/coordinator/tributary/src/tests/scan_tributary.rs b/coordinator/tributary/src/tests/scan_tributary.rs index bb606071e..03f220dfc 100644 --- a/coordinator/tributary/src/tests/scan_tributary.rs +++ b/coordinator/tributary/src/tests/scan_tributary.rs @@ -100,7 +100,8 @@ fn inject_block( parent: [u8; 32], transactions: Vec>, ) -> [u8; 32] { - let tx_hashes: Vec<[u8; 32]> = transactions.iter().map(tributary_sdk::Transaction::hash).collect(); + let tx_hashes: Vec<[u8; 32]> = + transactions.iter().map(tributary_sdk::Transaction::hash).collect(); let txs_hash = Blake2s256::digest(tx_hashes.iter().flat_map(|h| h.iter().copied()).collect::>()).into(); let block = Block { header: BlockHeader { parent, transactions: txs_hash }, transactions }; diff --git a/coordinator/tributary/src/tests/transaction.rs b/coordinator/tributary/src/tests/transaction.rs index e90302314..9180f9840 100644 --- a/coordinator/tributary/src/tests/transaction.rs +++ b/coordinator/tributary/src/tests/transaction.rs @@ -528,12 +528,11 @@ mod transaction { // Test boundary conditions per variant match &tx { - // Fixed-length: no validation beyond structure + // No additional validation beyond structure Transaction::RemoveParticipant { .. } | Transaction::DkgParticipation { .. } | Transaction::DkgConfirmationPreprocess { .. } | Transaction::DkgConfirmationShare { .. } | - // Provided: no validation beyond structure Transaction::Cosign { .. } | Transaction::Cosigned { .. } | Transaction::SubstrateBlock { .. } | From bd7de96e166dad3f636e646bc10cac5128df0153 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 23 Apr 2026 16:16:00 -0400 Subject: [PATCH 60/71] Checkout the `Cargo.lock` from the latest commit from `next` this descends from This reverts a myriad of `cargo update`s. --- Cargo.lock | 533 +++++++++++------------------------------------------ 1 file changed, 110 insertions(+), 423 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ca605c126..107ed1689 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -86,9 +86,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.2.34" +version = "0.2.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84e0378e959aa6a885897522080a990e80eb317f1e9a222a604492ea50e13096" +checksum = "f4e9e31d834fe25fe991b8884e4b9f0e59db4a97d86e05d1464d6899c013cd62" dependencies = [ "alloy-primitives", "num_enum", @@ -110,7 +110,7 @@ dependencies = [ "auto_impl", "borsh", "c-kzg", - "derive_more 2.1.1", + "derive_more 2.0.1", "either", "k256", "once_cell", @@ -203,7 +203,7 @@ dependencies = [ "auto_impl", "borsh", "c-kzg", - "derive_more 2.1.1", + "derive_more 2.0.1", "either", "serde", "serde_with", @@ -282,7 +282,7 @@ dependencies = [ "alloy-sol-types", "async-trait", "auto_impl", - "derive_more 2.1.1", + "derive_more 2.0.1", "futures-utils-wasm", "serde", "serde_json", @@ -334,7 +334,7 @@ dependencies = [ "bytes", "cfg-if", "const-hex", - "derive_more 2.1.1", + "derive_more 2.0.1", "foldhash 0.2.0", "hashbrown 0.16.1", "indexmap 2.14.0", @@ -343,7 +343,7 @@ dependencies = [ "keccak-asm", "paste", "proptest", - "rand 0.9.4", + "rand 0.9.2", "rapidhash", "ruint", "rustc-hash", @@ -449,7 +449,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2145138f3214928f08cd13da3cb51ef7482b5920d8ac5a02ecd4e38d1a8f6d1e" dependencies = [ "alloy-primitives", - "derive_more 2.1.1", + "derive_more 2.0.1", "serde", "serde_with", ] @@ -621,7 +621,7 @@ dependencies = [ "alloy-json-rpc", "auto_impl", "base64", - "derive_more 2.1.1", + "derive_more 2.0.1", "futures", "futures-utils-wasm", "parking_lot", @@ -643,7 +643,7 @@ checksum = "3f14b5d9b2c2173980202c6ff470d96e7c5e202c65a9f67884ad565226df7fbb" dependencies = [ "alloy-primitives", "alloy-rlp", - "derive_more 2.1.1", + "derive_more 2.0.1", "nybbles", "serde", "smallvec", @@ -1027,16 +1027,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd307490d624467aa6f74b0eabb77633d1f758a7b25f12bceb0b22e08d9726f6" -[[package]] -name = "base256emoji" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e9430d9a245a77c92176e649af6e275f20839a48389859d1661e9a128d077c" -dependencies = [ - "const-str", - "match-lookup", -] - [[package]] name = "base58ck" version = "0.1.0" @@ -1071,7 +1061,7 @@ version = "0.72.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" dependencies = [ - "bitflags 2.11.1", + "bitflags 2.11.0", "cexpr", "clang-sys", "itertools 0.13.0", @@ -1186,9 +1176,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.11.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4512299f36f043ab09a583e57bceb5a5aab7a73db1805848e8fef3c9e8c78b3" +checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af" [[package]] name = "bitvec" @@ -1625,12 +1615,6 @@ dependencies = [ "tiny-keccak", ] -[[package]] -name = "const-str" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f421161cb492475f1661ddc9815a745a1c894592070661180fdec3d4872e9c3" - [[package]] name = "const_format" version = "0.2.35" @@ -1651,15 +1635,6 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "convert_case" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9" -dependencies = [ - "unicode-segmentation", -] - [[package]] name = "core-foundation" version = "0.9.4" @@ -2175,11 +2150,11 @@ dependencies = [ [[package]] name = "derive_more" -version = "2.1.1" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d751e9e49156b02b44f9c1815bcb94b984cdcc4396ecc32521c739452808b134" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" dependencies = [ - "derive_more-impl 2.1.1", + "derive_more-impl 2.0.1", ] [[package]] @@ -2195,14 +2170,12 @@ dependencies = [ [[package]] name = "derive_more-impl" -version = "2.1.1" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ - "convert_case", "proc-macro2", "quote", - "rustc_version", "syn 2.0.117", "unicode-xid", ] @@ -2649,14 +2622,12 @@ dependencies = [ [[package]] name = "expander" -version = "2.2.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2c470c71d91ecbd179935b24170459e926382eaaa86b590b78814e180d8a8e2" +checksum = "5f86a749cf851891866c10515ef6c299b5c69661465e9c3bbe7e07a2b77fb0f7" dependencies = [ "blake2 0.10.6", - "file-guard", "fs-err", - "prettyplease", "proc-macro2", "quote", "syn 2.0.117", @@ -2705,16 +2676,6 @@ version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" -[[package]] -name = "file-guard" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21ef72acf95ec3d7dbf61275be556299490a245f017cf084bd23b4f68cf9407c" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "finality-grandpa" version = "0.16.3" @@ -3217,9 +3178,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.33.0" +version = "0.33.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf7f043f89559805f8c7cacc432749b2fa0d0a0a9ee46ce47164ed5ba7f126c" +checksum = "19e16c5073773ccf057c282be832a59ee53ef5ff98db3aeff7f8314f52ffc196" dependencies = [ "fnv", "hashbrown 0.16.1", @@ -3250,7 +3211,7 @@ dependencies = [ "parking_lot", "portable-atomic", "quanta", - "rand 0.9.4", + "rand 0.9.2", "smallvec", "spinning_top", "web-time", @@ -3419,7 +3380,7 @@ dependencies = [ "idna", "ipnet", "once_cell", - "rand 0.9.4", + "rand 0.9.2", "ring", "socket2 0.5.10", "thiserror 2.0.18", @@ -3441,7 +3402,7 @@ dependencies = [ "lru-slab", "once_cell", "parking_lot", - "rand 0.9.4", + "rand 0.9.2", "resolv-conf", "smallvec", "thiserror 2.0.18", @@ -3570,15 +3531,16 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.9" +version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ca68d021ef39cf6463ab54c1d0f5daf03377b70561305bb89a8f83aab66e0f" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ "http", "hyper", "hyper-util", "rustls", "rustls-native-certs", + "rustls-pki-types", "tokio", "tokio-rustls", "tower-service", @@ -3644,88 +3606,6 @@ dependencies = [ "cc", ] -[[package]] -name = "icu_collections" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2984d1cd16c883d7935b9e07e44071dca8d917fd52ecc02c04d5fa0b5a3f191c" -dependencies = [ - "displaydoc", - "potential_utf", - "utf8_iter", - "yoke", - "zerofrom", - "zerovec", -] - -[[package]] -name = "icu_locale_core" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92219b62b3e2b4d88ac5119f8904c10f8f61bf7e95b640d25ba3075e6cac2c29" -dependencies = [ - "displaydoc", - "litemap", - "tinystr", - "writeable", - "zerovec", -] - -[[package]] -name = "icu_normalizer" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c56e5ee99d6e3d33bd91c5d85458b6005a22140021cc324cea84dd0e72cff3b4" -dependencies = [ - "icu_collections", - "icu_normalizer_data", - "icu_properties", - "icu_provider", - "smallvec", - "zerovec", -] - -[[package]] -name = "icu_normalizer_data" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da3be0ae77ea334f4da67c12f149704f19f81d1adf7c51cf482943e84a2bad38" - -[[package]] -name = "icu_properties" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bee3b67d0ea5c2cca5003417989af8996f8604e34fb9ddf96208a033901e70de" -dependencies = [ - "icu_collections", - "icu_locale_core", - "icu_properties_data", - "icu_provider", - "zerotrie", - "zerovec", -] - -[[package]] -name = "icu_properties_data" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e2bbb201e0c04f7b4b3e14382af113e17ba4f63e2c9d2ee626b720cbce54a14" - -[[package]] -name = "icu_provider" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "139c4cf31c8b5f33d7e199446eff9c1e02decfc2f0eec2c8d71f65befa45b421" -dependencies = [ - "displaydoc", - "icu_locale_core", - "writeable", - "yoke", - "zerofrom", - "zerotrie", - "zerovec", -] - [[package]] name = "ident_case" version = "1.0.1" @@ -3745,12 +3625,22 @@ dependencies = [ [[package]] name = "idna_adapter" -version = "1.2.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +checksum = "279259b0ac81c89d11c290495fdcfa96ea3643b7df311c138b6fe8ca5237f0f8" dependencies = [ - "icu_normalizer", - "icu_properties", + "idna_mapping", + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "idna_mapping" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11c13906586a4b339310541a274dd927aff6fcbb5b8e3af90634c4b31681c792" +dependencies = [ + "unicode-joining-type", ] [[package]] @@ -3801,7 +3691,7 @@ dependencies = [ "hyper", "hyper-util", "log", - "rand 0.9.4", + "rand 0.9.2", "tokio", "url", "xmltree", @@ -4102,9 +3992,9 @@ checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" [[package]] name = "libc" -version = "0.2.185" +version = "0.2.184" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ff2c0fe9bc6cb6b14a0592c2ff4fa9ceb83eea9db979b0487cd054946a2b8f" +checksum = "48f5d2a454e16a5ea0f4ced81bd44e4cfc7bd3a507b61887c99fd3538b28e4af" [[package]] name = "libm" @@ -4182,7 +4072,7 @@ dependencies = [ "futures-timer", "libp2p-identity", "multiaddr 0.18.2", - "multihash 0.19.4", + "multihash 0.19.3", "multistream-select", "parking_lot", "pin-project", @@ -4271,7 +4161,7 @@ dependencies = [ "bs58", "ed25519-dalek", "hkdf", - "multihash 0.19.4", + "multihash 0.19.3", "quick-protobuf", "rand 0.8.5", "sha2 0.10.9", @@ -4357,7 +4247,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "multiaddr 0.18.2", - "multihash 0.19.4", + "multihash 0.19.3", "quick-protobuf", "rand 0.8.5", "snow", @@ -4597,12 +4487,6 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53" -[[package]] -name = "litemap" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92daf443525c4cce67b150400bc2316076100ce0b3686209eb8cf3c31612e6f0" - [[package]] name = "lock_api" version = "0.4.14" @@ -4620,9 +4504,9 @@ checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" [[package]] name = "lru" -version = "0.16.4" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f66e8d5d03f609abc3a39e6f08e4164ebf1447a732906d39eb9b99b7919ef39" +checksum = "a1dc47f592c06f33f8e3aea9591776ec7c9f9e4124778ff8a3c3b87159f7e593" dependencies = [ "hashbrown 0.16.1", ] @@ -4720,17 +4604,6 @@ dependencies = [ "syn 2.0.117", ] -[[package]] -name = "match-lookup" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "757aee279b8bdbb9f9e676796fd459e4207a1f986e87886700abf589f5abf771" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.117", -] - [[package]] name = "matchers" version = "0.2.0" @@ -5095,7 +4968,7 @@ dependencies = [ "data-encoding", "libp2p-identity", "multibase", - "multihash 0.19.4", + "multihash 0.19.3", "percent-encoding", "serde", "static_assertions", @@ -5105,12 +4978,11 @@ dependencies = [ [[package]] name = "multibase" -version = "0.9.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8694bb4835f452b0e3bb06dbebb1d6fc5385b6ca1caf2e55fd165c042390ec77" +checksum = "9b3539ec3c1f04ac9748a260728e855f261b4977f5c3406612c884564f329404" dependencies = [ "base-x", - "base256emoji", "data-encoding", "data-encoding-macro", ] @@ -5143,11 +5015,11 @@ dependencies = [ [[package]] name = "multihash" -version = "0.19.4" +version = "0.19.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ace881e3f514092ce9efbcb8f413d0ad9763860b828981c2de51ddc666936c" +checksum = "6b430e7953c29dd6a09afc29ff0bb69c6e306329ee6794700aee27b76a1aea8d" dependencies = [ - "no_std_io2", + "core2", "unsigned-varint 0.8.0", ] @@ -5200,7 +5072,7 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ce3636fa715e988114552619582b530481fd5ef176a1e5c1bf024077c2c9445" dependencies = [ - "bitflags 2.11.1", + "bitflags 2.11.0", "libc", "log", "netlink-packet-core", @@ -5239,21 +5111,12 @@ version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" dependencies = [ - "bitflags 2.11.1", + "bitflags 2.11.0", "cfg-if", "cfg_aliases", "libc", ] -[[package]] -name = "no_std_io2" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a3564ce7035b1e4778d8cb6cacebb5d766b5e8fe5a75b9e441e33fb61a872c6" -dependencies = [ - "memchr", -] - [[package]] name = "nohash-hasher" version = "0.2.0" @@ -5531,7 +5394,7 @@ dependencies = [ "lz4", "memmap2", "parking_lot", - "rand 0.9.4", + "rand 0.9.2", "siphasher", "snap", "winapi", @@ -5685,9 +5548,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.33" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19f132c84eca552bf34cab8ec81f1c1dcc229b811638f9d283dceabe58c5569e" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "polling" @@ -5744,15 +5607,6 @@ dependencies = [ "serde", ] -[[package]] -name = "potential_utf" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0103b1cef7ec0cf76490e969665504990193874ea05c85ff9bab8b911d0a0564" -dependencies = [ - "zerovec", -] - [[package]] name = "powerfmt" version = "0.2.0" @@ -5934,9 +5788,9 @@ checksum = "4b45fcc2344c680f5025fe57779faef368840d0bd1f42f216291f0dc4ace4744" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.11.1", + "bitflags 2.11.0", "num-traits", - "rand 0.9.4", + "rand 0.9.2", "rand_chacha 0.9.0", "rand_xorshift", "regex-syntax", @@ -6092,7 +5946,7 @@ dependencies = [ "bytes", "getrandom 0.3.99", "lru-slab", - "rand 0.9.4", + "rand 0.9.2", "ring", "rustc-hash", "rustls", @@ -6115,7 +5969,7 @@ dependencies = [ "once_cell", "socket2 0.6.3", "tracing", - "windows-sys 0.60.2", + "windows-sys 0.52.0", ] [[package]] @@ -6153,9 +6007,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.9.4" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44c5af06bb1b7d3216d91932aed5265164bf384dc89cd6ba05cf59a35f5f76ea" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.5", @@ -6239,14 +6093,14 @@ version = "11.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "498cd0dc59d73224351ee52a95fee0f1a617a2eae0e7d9d720cc622c73a54186" dependencies = [ - "bitflags 2.11.1", + "bitflags 2.11.0", ] [[package]] name = "rayon" -version = "1.12.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb39b166781f92d482534ef4b4b1b2568f42613b53e5b6c160e24cfbfa30926d" +checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" dependencies = [ "either", "rayon-core", @@ -6281,7 +6135,7 @@ version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags 2.11.1", + "bitflags 2.11.0", ] [[package]] @@ -6535,7 +6389,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e480426a7d76b458789e4a1be3ffbce9df798f0145f0520c1cdf967755cfcbf" dependencies = [ "alloy-eip7928", - "bitflags 2.11.1", + "bitflags 2.11.0", "revm-bytecode", "revm-primitives", ] @@ -6631,7 +6485,7 @@ dependencies = [ "primitive-types", "proptest", "rand 0.8.5", - "rand 0.9.4", + "rand 0.9.2", "rlp", "ruint-macro", "serde_core", @@ -6679,7 +6533,7 @@ version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190" dependencies = [ - "bitflags 2.11.1", + "bitflags 2.11.0", "errno", "libc", "linux-raw-sys", @@ -6688,9 +6542,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.38" +version = "0.23.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69f9466fb2c14ea04357e91413efb882e2a6d4a406e625449bc0a5d360d53a21" +checksum = "758025cb5fccfd3bc2fd74708fd4682be41d99e5dff73c377c0646c6012c73a4" dependencies = [ "once_cell", "ring", @@ -6724,9 +6578,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.12" +version = "0.103.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8279bb85272c9f10811ae6a6c547ff594d6a7f3c6c6b02ee9726d1d0dcfcdd06" +checksum = "20a6af516fea4b20eccceaf166e8aa666ac996208e8a644ce3ef5aa783bc7cd4" dependencies = [ "ring", "rustls-pki-types", @@ -7271,7 +7125,7 @@ dependencies = [ "multiaddr 0.17.1", "multiaddr 0.18.2", "multihash 0.17.0", - "multihash 0.19.4", + "multihash 0.19.3", "rand 0.8.5", "serde_with", "thiserror 2.0.18", @@ -7715,7 +7569,7 @@ version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7f4bc775c73d9a02cde8bf7b2ec4c9d12743edf609006c7facc23998404cd1d" dependencies = [ - "bitflags 2.11.1", + "bitflags 2.11.0", "core-foundation 0.10.1", "core-foundation-sys", "libc", @@ -7962,21 +7816,15 @@ dependencies = [ "ciphersuite 0.4.2", "dalek-ff-group", "dkg", - "env_logger", - "rand 0.8.5", - "rand_chacha 0.3.1", + "log", "rand_core 0.6.4", "schnorr-signatures", "serai-coordinator-substrate", "serai-cosign-types", "serai-db", - "serai-env", "serai-primitives", "serai-processor-messages", - "serai-substrate-tests", "serai-task", - "tendermint-machine", - "tokio", "tributary-sdk", "zeroize", ] @@ -9862,7 +9710,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a13f3d0daba03132c0aa9767f98351b3488edc2c100cda2d2ec2b04f3d8d3c8b" dependencies = [ - "bitflags 2.11.1", + "bitflags 2.11.0", "core-foundation 0.9.4", "system-configuration-sys", ] @@ -9896,7 +9744,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32497e9a4c7b38532efcdebeef879707aa9f794296a4f0244f6f69e9bc8574bd" dependencies = [ "fastrand", - "getrandom 0.3.99", + "getrandom 0.4.2", "once_cell", "rustix", "windows-sys 0.61.2", @@ -10002,16 +9850,6 @@ dependencies = [ "crunchy", ] -[[package]] -name = "tinystr" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8323304221c2a851516f22236c5722a72eaa19749016521d6dff0824447d96d" -dependencies = [ - "displaydoc", - "zerovec", -] - [[package]] name = "tinyvec" version = "1.11.0" @@ -10029,9 +9867,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.52.0" +version = "1.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a91135f59b1cbf38c91e73cf3386fca9bb77915c45ce2771460c9d92f0f3d776" +checksum = "f66bf9585cda4b724d3e78ab34b73fb2bbaba9011b9bfdf69dc836382ea13b8c" dependencies = [ "bytes", "libc", @@ -10178,7 +10016,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ - "bitflags 2.11.1", + "bitflags 2.11.0", "bytes", "http", "http-body", @@ -10367,12 +10205,24 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" +[[package]] +name = "unicode-bidi" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5" + [[package]] name = "unicode-ident" version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" +[[package]] +name = "unicode-joining-type" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8d00a78170970967fdb83f9d49b92f959ab2bb829186b113e4f4604ad98e180" + [[package]] name = "unicode-normalization" version = "0.1.25" @@ -10382,12 +10232,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-segmentation" -version = "1.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9629274872b2bfaf8d66f5f15725007f635594914870f65218920345aa11aa8c" - [[package]] name = "unicode-width" version = "0.2.2" @@ -10573,7 +10417,7 @@ version = "0.245.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4f08c9adee0428b7bddf3890fc27e015ac4b761cc608c822667102b8bfd6995e" dependencies = [ - "bitflags 2.11.1", + "bitflags 2.11.0", "hashbrown 0.16.1", "indexmap 2.14.0", "semver", @@ -10599,7 +10443,7 @@ checksum = "ce205cd643d661b5ba5ba4717e13730262e8cdbc8f2eacbc7b906d45c1a74026" dependencies = [ "addr2line", "async-trait", - "bitflags 2.11.1", + "bitflags 2.11.0", "bumpalo", "cc", "cfg-if", @@ -10963,16 +10807,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-sys" -version = "0.60.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" -dependencies = [ - "windows-targets 0.53.5", + "windows-targets", ] [[package]] @@ -10990,31 +10825,14 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.6", - "windows_aarch64_msvc 0.52.6", - "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm 0.52.6", - "windows_i686_msvc 0.52.6", - "windows_x86_64_gnu 0.52.6", - "windows_x86_64_gnullvm 0.52.6", - "windows_x86_64_msvc 0.52.6", -] - -[[package]] -name = "windows-targets" -version = "0.53.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" -dependencies = [ - "windows-link", - "windows_aarch64_gnullvm 0.53.1", - "windows_aarch64_msvc 0.53.1", - "windows_i686_gnu 0.53.1", - "windows_i686_gnullvm 0.53.1", - "windows_i686_msvc 0.53.1", - "windows_x86_64_gnu 0.53.1", - "windows_x86_64_gnullvm 0.53.1", - "windows_x86_64_msvc 0.53.1", + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", ] [[package]] @@ -11032,96 +10850,48 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" - [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" -[[package]] -name = "windows_aarch64_msvc" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" - [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" -[[package]] -name = "windows_i686_gnu" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" - [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" -[[package]] -name = "windows_i686_gnullvm" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" - [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" -[[package]] -name = "windows_i686_msvc" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" - [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" -[[package]] -name = "windows_x86_64_gnu" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" - [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" - [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" -[[package]] -name = "windows_x86_64_msvc" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" - [[package]] name = "winnow" version = "0.7.15" @@ -11153,12 +10923,6 @@ dependencies = [ name = "wit-bindgen-rust-macro" version = "0.51.99" -[[package]] -name = "writeable" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ffae5123b2d3fc086436f8834ae3ab053a283cfac8fe0a0b8eaae044768a4c4" - [[package]] name = "wyz" version = "0.5.1" @@ -11238,7 +11002,7 @@ dependencies = [ "nohash-hasher", "parking_lot", "pin-project", - "rand 0.9.4", + "rand 0.9.2", "static_assertions", "web-time", ] @@ -11252,29 +11016,6 @@ dependencies = [ "time", ] -[[package]] -name = "yoke" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abe8c5fda708d9ca3df187cae8bfb9ceda00dd96231bed36e445a1a48e66f9ca" -dependencies = [ - "stable_deref_trait", - "yoke-derive", - "zerofrom", -] - -[[package]] -name = "yoke-derive" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de844c262c8848816172cef550288e7dc6c7b7814b4ee56b3e1553f275f1858e" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.117", - "synstructure 0.13.2", -] - [[package]] name = "zalloc" version = "0.1.0" @@ -11303,27 +11044,6 @@ dependencies = [ "syn 2.0.117", ] -[[package]] -name = "zerofrom" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69faa1f2a1ea75661980b013019ed6687ed0e83d069bc1114e2cc74c6c04c4df" -dependencies = [ - "zerofrom-derive", -] - -[[package]] -name = "zerofrom-derive" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11532158c46691caf0f2593ea8358fed6bbf68a0315e80aae9bd41fbade684a1" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.117", - "synstructure 0.13.2", -] - [[package]] name = "zeroize" version = "1.8.2" @@ -11344,39 +11064,6 @@ dependencies = [ "syn 2.0.117", ] -[[package]] -name = "zerotrie" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f9152d31db0792fa83f70fb2f83148effb5c1f5b8c7686c3459e361d9bc20bf" -dependencies = [ - "displaydoc", - "yoke", - "zerofrom", -] - -[[package]] -name = "zerovec" -version = "0.11.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90f911cbc359ab6af17377d242225f4d75119aec87ea711a880987b18cd7b239" -dependencies = [ - "yoke", - "zerofrom", - "zerovec-derive", -] - -[[package]] -name = "zerovec-derive" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "625dc425cab0dca6dc3c3319506e6593dcb08a9f387ea3b284dbd52a92c40555" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.117", -] - [[package]] name = "zmij" version = "1.0.21" From 93532366b050d6e7abbfe917ca09db14ca464074 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 23 Apr 2026 18:34:41 -0400 Subject: [PATCH 61/71] Use a `u64` for attempts on the Tributary A `u32`, when incremented by one, is technically feasible to overflow. This PR changed it to return `None` for the next topic if it did overflow, but that'd technically stop creating topics for ongoing protocols. This transformed a panic (which would crash the entire `coordinator`) into a liveness failure for this specific Tributary due to inaccurately claiming there was no next topic (an unsound definition which would become a more localized liveness issue). This uses a `u64` to ensure this state is unreachable when started from zero, and incremented by one, unless so many years pass this is definitively irrelevant. --- coordinator/src/dkg_confirmation.rs | 6 ++--- coordinator/tributary/src/db.rs | 20 +++++++------- coordinator/tributary/src/tests/db.rs | 26 +++++++++---------- coordinator/tributary/src/tests/mod.rs | 12 ++++----- coordinator/tributary/src/transaction.rs | 6 ++--- .../frost-attempt-manager/src/individual.rs | 12 ++++----- processor/messages/src/lib.rs | 2 +- 7 files changed, 41 insertions(+), 43 deletions(-) diff --git a/coordinator/src/dkg_confirmation.rs b/coordinator/src/dkg_confirmation.rs index 2564f17f0..9bc45fe08 100644 --- a/coordinator/src/dkg_confirmation.rs +++ b/coordinator/src/dkg_confirmation.rs @@ -108,9 +108,9 @@ fn handle_frost_error(result: Result) -> Result, share: [u8; 32], machine: Box>, @@ -151,7 +151,7 @@ impl ConfirmDkgTask { fn preprocess( db: &mut CD, set: ExternalValidatorSet, - attempt: u32, + attempt: u64, key: Zeroizing<::F>, signer: &mut Option, ) { diff --git a/coordinator/tributary/src/db.rs b/coordinator/tributary/src/db.rs index 1f56f29ad..72a0ca0ff 100644 --- a/coordinator/tributary/src/db.rs +++ b/coordinator/tributary/src/db.rs @@ -26,7 +26,7 @@ pub enum Topic { /// Participation in the signing protocol to confirm the DKG results on Substrate DkgConfirmation { /// The attempt number this is for - attempt: u32, + attempt: u64, /// The round of the signing protocol round: SigningProtocolRound, }, @@ -39,7 +39,7 @@ pub enum Topic { /// The ID of the signing protocol id: VariantSignId, /// The attempt number this is for - attempt: u32, + attempt: u64, /// The round of the signing protocol round: SigningProtocolRound, }, @@ -63,26 +63,24 @@ impl Topic { match self { Topic::RemoveParticipant { .. } => None, Topic::DkgConfirmation { attempt, round: _ } => Some(Topic::DkgConfirmation { - attempt: attempt.checked_add(1)?, + attempt: attempt + 1, round: SigningProtocolRound::Preprocess, }), Topic::SlashReport => None, - Topic::Sign { id, attempt, round: _ } => Some(Topic::Sign { - id, - attempt: attempt.checked_add(1)?, - round: SigningProtocolRound::Preprocess, - }), + Topic::Sign { id, attempt, round: _ } => { + Some(Topic::Sign { id, attempt: attempt + 1, round: SigningProtocolRound::Preprocess }) + } } } // The topic for the re-attempt to schedule - pub(crate) fn reattempt_topic(self) -> Option<(u32, Topic)> { + pub(crate) fn reattempt_topic(self) -> Option<(u64, Topic)> { #[expect(clippy::match_same_arms)] match self { Topic::RemoveParticipant { .. } => None, Topic::DkgConfirmation { attempt, round } => match round { SigningProtocolRound::Preprocess => { - let next_attempt = attempt.checked_add(1)?; + let next_attempt = attempt + 1; Some(( next_attempt, Topic::DkgConfirmation { @@ -96,7 +94,7 @@ impl Topic { Topic::SlashReport => None, Topic::Sign { id, attempt, round } => match round { SigningProtocolRound::Preprocess => { - let next_attempt = attempt.checked_add(1)?; + let next_attempt = attempt + 1; Some(( next_attempt, Topic::Sign { id, attempt: next_attempt, round: SigningProtocolRound::Preprocess }, diff --git a/coordinator/tributary/src/tests/db.rs b/coordinator/tributary/src/tests/db.rs index a0f115fd8..049462705 100644 --- a/coordinator/tributary/src/tests/db.rs +++ b/coordinator/tributary/src/tests/db.rs @@ -17,20 +17,20 @@ use crate::{ transaction::{RoundPayloads, Preprocess, Share, SigningProtocolRound}, }; -/// One of each topic kind, and attempts: at 0, a random attempt, and u32::MAX. +/// One of each topic kind, and attempts: at 0, a random attempt, and u64::MAX. fn all_topics_and_attempts() -> Vec { - let random_attempt = OsRng.gen_range(1u32 .. u32::MAX); + let random_attempt = OsRng.gen_range(1u64 .. u64::MAX); vec![ // RemoveParticipant Topic::RemoveParticipant { participant: random_serai_address(&mut OsRng) }, // DkgConfirmation Preprocess Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Preprocess }, Topic::DkgConfirmation { attempt: random_attempt, round: SigningProtocolRound::Preprocess }, - Topic::DkgConfirmation { attempt: u32::MAX, round: SigningProtocolRound::Preprocess }, + Topic::DkgConfirmation { attempt: u64::MAX, round: SigningProtocolRound::Preprocess }, // DkgConfirmation Share Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Share }, Topic::DkgConfirmation { attempt: random_attempt, round: SigningProtocolRound::Share }, - Topic::DkgConfirmation { attempt: u32::MAX, round: SigningProtocolRound::Share }, + Topic::DkgConfirmation { attempt: u64::MAX, round: SigningProtocolRound::Share }, // SlashReport Topic::SlashReport, // Sign Preprocess @@ -46,7 +46,7 @@ fn all_topics_and_attempts() -> Vec { }, Topic::Sign { id: random_transaction_id(), - attempt: u32::MAX, + attempt: u64::MAX, round: SigningProtocolRound::Preprocess, }, // Sign Share @@ -58,13 +58,13 @@ fn all_topics_and_attempts() -> Vec { }, Topic::Sign { id: random_transaction_id(), - attempt: u32::MAX, + attempt: u64::MAX, round: SigningProtocolRound::Share, }, ] } -/// Share-round topics only, with attempts: at 0, random, and u32::MAX. +/// Share-round topics only, with attempts: at 0, random, and u64::MAX. fn all_share_topics_and_attempts() -> Vec { all_topics_and_attempts() .into_iter() @@ -78,7 +78,7 @@ fn all_share_topics_and_attempts() -> Vec { .collect() } -/// Preprocess-round topics only, with attempts: at 0, random, and u32::MAX. +/// Preprocess-round topics only, with attempts: at 0, random, and u64::MAX. fn all_preprocess_topics_and_attempts() -> Vec { all_topics_and_attempts() .into_iter() @@ -848,10 +848,10 @@ mod tributary_db { let (set, _validator, validators, total_weight, _validator_weight) = default_accumulate_setup(); - // attempt just below u32::MAX so reattempt_topic() returns Some(u32::MAX) + // attempt just below u64::MAX so reattempt_topic() returns Some(u64::MAX) let topic = - Topic::DkgConfirmation { attempt: u32::MAX - 1, round: SigningProtocolRound::Preprocess }; - assert_eq!(topic.reattempt_topic().unwrap().0, u32::MAX); + Topic::DkgConfirmation { attempt: u64::MAX - 1, round: SigningProtocolRound::Preprocess }; + assert_eq!(topic.reattempt_topic().unwrap().0, u64::MAX); // block_number near u64::MAX forces checked_add to overflow let block_number = u64::MAX - 1; @@ -1129,7 +1129,7 @@ mod tributary_db { // Reattempt should be queued if topic is reattemptable. if let Some((reattempt_attempt, reattempt_topic)) = topic.reattempt_topic() { - let blocks_till = u64::from(reattempt_attempt) + let blocks_till = reattempt_attempt .checked_mul(u64::from(BASE_REATTEMPT_DELAY)) .expect("reattempt delay overflowed u64"); let recognize_at = @@ -1235,7 +1235,7 @@ mod tributary_db { let has_preceding_topic_accumulated = OsRng.gen::(); let topic_variant = OsRng.gen_range(0u8 .. 5); - let attempt = OsRng.gen_range(0u32 .. 100); + let attempt = OsRng.gen_range(0u64 .. 100); let round = if OsRng.gen::() { SigningProtocolRound::Preprocess } else { diff --git a/coordinator/tributary/src/tests/mod.rs b/coordinator/tributary/src/tests/mod.rs index 2ebcf9355..a89e3e110 100644 --- a/coordinator/tributary/src/tests/mod.rs +++ b/coordinator/tributary/src/tests/mod.rs @@ -63,9 +63,9 @@ pub(crate) fn random_signed(rng: &mut R) -> Signed { Signed { signer: signed.signer, signature: signed.signature } } -/// One of each signed transaction kind, and attempts: at 0, a random attempt, and u32::MAX. +/// One of each signed transaction kind, and attempts: at 0, a random attempt, and u64::MAX. pub(crate) fn all_signed_transactions_and_attempts(signed: &Signed) -> Vec { - let random_attempt = OsRng.gen_range(1u32 .. u32::MAX); + let random_attempt = OsRng.gen_range(1u64 .. u64::MAX); let signed = *signed; vec![ // RemoveParticipant @@ -84,7 +84,7 @@ pub(crate) fn all_signed_transactions_and_attempts(signed: &Signed) -> Vec Vec Vec Vec u32, + Attempted: (session: Session, id: VariantSignId) -> u64, } ); @@ -31,10 +31,10 @@ pub(crate) struct SigningProtocol { id: VariantSignId, // This accepts a vector of `root` machines in order to support signing with multiple key shares. root: Vec, - preprocessed: HashMap, HashMap>)>, + preprocessed: HashMap, HashMap>)>, // Here, we drop to a single machine as we only need one to complete the signature. shared: HashMap< - u32, + u64, ( >::SignatureMachine, HashMap>, @@ -67,7 +67,7 @@ impl SigningProtocol { /// Start a new attempt of the signing protocol. /// /// Returns the (serialized) preprocesses for the attempt. - pub(crate) fn attempt(&mut self, attempt: u32) -> Vec { + pub(crate) fn attempt(&mut self, attempt: u64) -> Vec { /* We'd get slashed as malicious if we: 1) Preprocessed @@ -134,7 +134,7 @@ impl SigningProtocol { /// Returns the (serialized) shares for the attempt. pub(crate) fn preprocesses( &mut self, - attempt: u32, + attempt: u64, serialized_preprocesses: HashMap>, ) -> Vec { log::debug!("handling preprocesses for signing protocol {:?}", self.id); @@ -226,7 +226,7 @@ impl SigningProtocol { /// Returns the signature produced by the protocol. pub(crate) fn shares( &mut self, - attempt: u32, + attempt: u64, serialized_shares: HashMap>, ) -> Result> { log::debug!("handling shares for signing protocol {:?}", self.id); diff --git a/processor/messages/src/lib.rs b/processor/messages/src/lib.rs index 3194a0bfc..496753270 100644 --- a/processor/messages/src/lib.rs +++ b/processor/messages/src/lib.rs @@ -176,7 +176,7 @@ pub mod sign { pub struct SignId { pub session: Session, pub id: VariantSignId, - pub attempt: u32, + pub attempt: u64, } #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] From 17dba2815e90f6ed6ba530211860aceee2254adb Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 23 Apr 2026 18:54:53 -0400 Subject: [PATCH 62/71] Correct the fix for the slash report's length exceeding `f` Only pushing some elements meant that this vector no longer corresponded to the list of validators. Since each element wasn't paired with a validator's ID, it became meaningless. The `assert` which triggered was the underlying fault, and it's been adjusted to only check the amount of non-zero elements is now less than or equal to `f`. --- coordinator/tributary/src/lib.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index 23b44c375..35d6907ae 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -476,18 +476,19 @@ impl ScanBlock<'_, TD, TDT, P> { } let amortized_slash_report = median_slash_report; - // Create the resulting slash report, only including validators who have non-zero - // slash points after amortization + // Create the resulting slash report let mut slash_report = vec![]; for points in amortized_slash_report { // TODO: Natively store this as a `Slash` if points == u32::MAX { slash_report.push(Slash::Fatal); - } else if points > 0 { + } else { slash_report.push(Slash::Points(points)); } } - assert!(slash_report.len() <= f); + assert!( + slash_report.iter().filter(|points| !matches!(points, Slash::Points(0))).count() <= f + ); // Recognize the topic for signing the slash report TributaryDb::recognize_topic( From 0e72ae9bfa1a0804ac293b79c89f622ed8572bc4 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 24 Apr 2026 15:08:43 -0400 Subject: [PATCH 63/71] Restore `Topic::required_participation` While it currently _can_ be collapsed to the function `required_participation`, the ability for per-topic thresholds is desired. The calculation is also updated to be infallible. --- coordinator/tributary/src/db.rs | 20 +++++++--- coordinator/tributary/src/tests/db.rs | 38 ++++++------------- coordinator/tributary/src/tests/scan_block.rs | 6 +-- 3 files changed, 28 insertions(+), 36 deletions(-) diff --git a/coordinator/tributary/src/db.rs b/coordinator/tributary/src/db.rs index 72a0ca0ff..f2078201c 100644 --- a/coordinator/tributary/src/db.rs +++ b/coordinator/tributary/src/db.rs @@ -51,11 +51,6 @@ pub(crate) enum Participating { Everyone, } -pub(crate) fn required_participation(n: u16) -> u16 { - // All of our topics require 2/3rds participation - n.checked_mul(2).unwrap_or_else(|| panic!("required_participation overflowed: {n} * 2")) / 3 + 1 -} - impl Topic { // The topic used by the next attempt of this protocol pub(crate) fn next_attempt_topic(self) -> Option { @@ -206,6 +201,18 @@ impl Topic { } } + pub(crate) fn required_participation(&self, n: u16) -> u16 { + // All of our current topics require 2/3rds participation + let _ = self; + + let wide = u32::from(n); + let fraction_lt_input = + wide.checked_mul(2).expect("widened integer overflowed when multiplied by `2`") / 3; + let result_lte_input = fraction_lt_input + 1; + u16::try_from(result_lte_input) + .expect("value less than or equal to `u16` input wasn't itself valid as a `u16`") + } + pub(crate) fn participating(&self) -> Participating { #[expect(clippy::match_same_arms)] match self { @@ -473,8 +480,9 @@ impl TributaryDb { } } - let required_participation = required_participation(total_weight); + let required_participation = topic.required_participation(total_weight); + // TODO: // The complete lack of validation on the data by these NOPs opens the potential for spam here // If we've already accumulated past the threshold, NOP diff --git a/coordinator/tributary/src/tests/db.rs b/coordinator/tributary/src/tests/db.rs index 049462705..8a7f9a1ef 100644 --- a/coordinator/tributary/src/tests/db.rs +++ b/coordinator/tributary/src/tests/db.rs @@ -132,32 +132,16 @@ where result } -mod required_participation_tests { - use super::*; - - #[test] - fn passes() { - assert_eq!(required_participation(0), 1); - - // No panics - { - let random_n = OsRng.gen_range(0 .. u16::MAX / 2); - let _ = required_participation(random_n); - let _ = required_participation(u16::MAX / 2); - } - } - - #[test] - fn panics_on_overflow() { - let res = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { - required_participation(u16::MAX); - })); - assert!(res.is_err()); - - let res = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { - required_participation((u16::MAX / 2) + 1); - })); - assert!(res.is_err()); +#[test] +fn required_participation() { + assert_eq!(Topic::SlashReport.required_participation(0), 1); + assert_eq!(Topic::SlashReport.required_participation(u16::MAX), 43691); + for _ in 0 .. 128 { + #[expect(clippy::as_conversions, clippy::cast_possible_truncation)] + let validators = OsRng.next_u64() as u16; + let required = Topic::SlashReport.required_participation(validators); + assert!(((2 * (validators - required)) + 1) <= required); + assert!((required - ((2 * (validators - required)) + 1)) <= 2); } } @@ -1068,7 +1052,7 @@ mod tributary_db { validator_in_list: bool, result: &DataSet>, ) { - let required = required_participation(total_weight); + let required = topic.required_participation(total_weight); let post_slashed = TributaryDb::is_fatally_slashed(db, set, validator); let post_weight = AccumulatedWeight::get(db, set, topic); diff --git a/coordinator/tributary/src/tests/scan_block.rs b/coordinator/tributary/src/tests/scan_block.rs index e5dc88da8..a6bf83f85 100644 --- a/coordinator/tributary/src/tests/scan_block.rs +++ b/coordinator/tributary/src/tests/scan_block.rs @@ -769,7 +769,7 @@ mod handle_application_tx { #[test] fn fatal_slash_as_reported_median() { let num_validators = OsRng.gen_range(4u16 .. 10); - let num_reports = usize::from(required_participation(num_validators)); + let num_reports = usize::from(Topic::SlashReport.required_participation(num_validators)); let set = default_test_validator_set(); let (keys_addrs, validator_data, validators, weights, total_weight) = @@ -849,7 +849,7 @@ mod handle_application_tx { for _ in 0 .. 200 { // random even: 4, 6, 8, or 10 let n = OsRng.gen_range(2u16 ..= 5) * 2; - let num_reports = required_participation(n); + let num_reports = Topic::SlashReport.required_participation(n); let set = default_test_validator_set(); @@ -904,7 +904,7 @@ mod handle_application_tx { // random odd: 5, 7, 9, or 11 let n = OsRng.gen_range(2u16 ..= 5) * 2 + 1; let f = usize::from((n - 1) / 3); - let num_reports = required_participation(n); + let num_reports = Topic::SlashReport.required_participation(n); let set = default_test_validator_set(); From 95b4b20aa0498d93f8f9643329f6c24719875e85 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 24 Apr 2026 15:17:09 -0400 Subject: [PATCH 64/71] Restore `to_signed` taking a nonce Moving to `SigningProtocolRound` would be nice if all such transactions fit that model. As shown, not all transactions do. Kludging them into `SigningProtocolRound` isn't preferred, and it appears to have been improperly done for `SigningProtocolRound` (causing a `clippy` lint which was allowed). --- coordinator/tributary/src/tests/transaction.rs | 2 +- coordinator/tributary/src/transaction.rs | 17 ++++++++--------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/coordinator/tributary/src/tests/transaction.rs b/coordinator/tributary/src/tests/transaction.rs index 9180f9840..46a418ea6 100644 --- a/coordinator/tributary/src/tests/transaction.rs +++ b/coordinator/tributary/src/tests/transaction.rs @@ -141,7 +141,7 @@ mod signed { fn to_tributary_signed_matches_signed() { let signed = random_signed(&mut OsRng); for round in all_signing_protocol_rounds() { - let tributary_signed = signed.to_tributary_signed(round); + let tributary_signed = signed.to_tributary_signed(round.nonce()); assert_eq!(signed.signer(), tributary_signed.signer); assert_eq!(signed.signature, tributary_signed.signature); assert_eq!(tributary_signed.nonce, round.nonce()); diff --git a/coordinator/tributary/src/transaction.rs b/coordinator/tributary/src/transaction.rs index c5c3e0079..9ce3fc503 100644 --- a/coordinator/tributary/src/transaction.rs +++ b/coordinator/tributary/src/transaction.rs @@ -77,8 +77,8 @@ impl Signed { } /// Provide a nonce to convert a `Signed` into a `tributary::Signed`. - pub(crate) fn to_tributary_signed(self, round: SigningProtocolRound) -> TributarySigned { - TributarySigned { signer: self.signer, nonce: round.nonce(), signature: self.signature } + pub(crate) fn to_tributary_signed(self, nonce: u32) -> TributarySigned { + TributarySigned { signer: self.signer, nonce, signature: self.signature } } } @@ -252,24 +252,23 @@ impl ReadWrite for Transaction { impl TransactionTrait for Transaction { fn kind(&self) -> TransactionKind { - #[expect(clippy::match_same_arms)] match self { Transaction::RemoveParticipant { participant, signed } => TransactionKind::Signed( borsh::to_vec(&(b"RemoveParticipant".as_slice(), participant)).unwrap(), - signed.to_tributary_signed(SigningProtocolRound::Preprocess), + signed.to_tributary_signed(0), ), Transaction::DkgParticipation { signed, .. } => TransactionKind::Signed( borsh::to_vec(b"DkgParticipation".as_slice()).unwrap(), - signed.to_tributary_signed(SigningProtocolRound::Preprocess), + signed.to_tributary_signed(0), ), Transaction::DkgConfirmationPreprocess { attempt, signed, .. } => TransactionKind::Signed( borsh::to_vec(&(b"DkgConfirmation".as_slice(), attempt)).unwrap(), - signed.to_tributary_signed(SigningProtocolRound::Share), + signed.to_tributary_signed(SigningProtocolRound::Preprocess.nonce()), ), Transaction::DkgConfirmationShare { attempt, signed, .. } => TransactionKind::Signed( borsh::to_vec(&(b"DkgConfirmation".as_slice(), attempt)).unwrap(), - signed.to_tributary_signed(SigningProtocolRound::Share), + signed.to_tributary_signed(SigningProtocolRound::Share.nonce()), ), Transaction::Cosign { .. } => TransactionKind::Provided("Cosign"), @@ -279,12 +278,12 @@ impl TransactionTrait for Transaction { Transaction::Sign { id, attempt, round, signed, .. } => TransactionKind::Signed( borsh::to_vec(&(b"Sign".as_slice(), id, attempt)).unwrap(), - signed.to_tributary_signed(*round), + signed.to_tributary_signed(round.nonce()), ), Transaction::SlashReport { signed, .. } => TransactionKind::Signed( borsh::to_vec(b"SlashReport".as_slice()).unwrap(), - signed.to_tributary_signed(SigningProtocolRound::Preprocess), + signed.to_tributary_signed(0), ), } } From da5e85d25ff17f9e395fb72cc29f55e54fd9fd1c Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 24 Apr 2026 14:46:41 -0400 Subject: [PATCH 65/71] Ban `borsh::from_reader` for reasons identified by @rafael-xmr --- clippy.toml | 4 ++++ .../tributary/src/tests/transaction.rs | 22 ------------------- coordinator/tributary/src/transaction.rs | 2 +- .../ethereum/src/primitives/transaction.rs | 8 ++++--- 4 files changed, 10 insertions(+), 26 deletions(-) diff --git a/clippy.toml b/clippy.toml index 7071c5b52..355c92bde 100644 --- a/clippy.toml +++ b/clippy.toml @@ -5,6 +5,10 @@ path = "Option::map_or" [[disallowed-methods]] path = "Option::map_or_else" +[[disallowed-methods]] +path = "borsh::from_reader" +reason = "`borsh::from_reader` errors if there's any bytes following the read item, which isn't documented behavior" + # TODO: https://github.com/rust-lang/rust-clippy/pull/12194 [[disallowed-methods]] path = "::add" diff --git a/coordinator/tributary/src/tests/transaction.rs b/coordinator/tributary/src/tests/transaction.rs index 46a418ea6..64b70469c 100644 --- a/coordinator/tributary/src/tests/transaction.rs +++ b/coordinator/tributary/src/tests/transaction.rs @@ -308,28 +308,6 @@ mod transaction { "reader should be exhausted after reading all transactions" ); } - - /// Counterpart to `sequential_reads_from_shared_reader`: proves `borsh::from_reader` rejects - /// a reader that has leftover bytes, which is why `Transaction::read` must use - /// `deserialize_reader` instead. - #[test] - fn borsh_from_reader_rejects_shared_reader_with_trailing_bytes() { - let txs = all_transactions(); - - let mut buf = Vec::new(); - buf.extend(&u32::try_from(txs.len()).unwrap().to_le_bytes()); - for tx in &txs { - tx.write(&mut buf).unwrap(); - } - - let mut cursor = Cursor::new(&buf); - let mut count = [0u8; 4]; - cursor.read_exact(&mut count).unwrap(); - - // borsh::from_reader should fail because subsequent tx bytes remain after the first - let result: io::Result = borsh::from_reader(&mut cursor); - assert!(result.is_err(), "borsh::from_reader should reject a reader with trailing bytes"); - } } mod kind { diff --git a/coordinator/tributary/src/transaction.rs b/coordinator/tributary/src/transaction.rs index 9ce3fc503..ac9215927 100644 --- a/coordinator/tributary/src/transaction.rs +++ b/coordinator/tributary/src/transaction.rs @@ -242,7 +242,7 @@ pub enum Transaction { impl ReadWrite for Transaction { fn read(reader: &mut R) -> io::Result { - borsh::BorshDeserialize::deserialize_reader(reader) + Self::deserialize_reader(reader) } fn write(&self, writer: &mut W) -> io::Result<()> { diff --git a/processor/ethereum/src/primitives/transaction.rs b/processor/ethereum/src/primitives/transaction.rs index d91455576..0e9922569 100644 --- a/processor/ethereum/src/primitives/transaction.rs +++ b/processor/ethereum/src/primitives/transaction.rs @@ -1,5 +1,7 @@ use std::io; +use borsh::BorshDeserialize as _; + use ciphersuite_kp256::Secp256k1; use frost::dkg::ThresholdKeys; @@ -122,7 +124,7 @@ impl SignableTransaction for Action { Action::SetKey { chain_id, router_address, nonce, key } } 1 => { - let coin = borsh::from_reader(reader)?; + let coin = <_>::deserialize_reader(reader)?; let mut fee = [0; 32]; reader.read_exact(&mut fee)?; @@ -134,7 +136,7 @@ impl SignableTransaction for Action { let mut outs = vec![]; for _ in 0 .. outs_len { - let address = borsh::from_reader(reader)?; + let address = <_>::deserialize_reader(reader)?; let mut amount = [0; 32]; reader.read_exact(&mut amount)?; @@ -202,7 +204,7 @@ impl primitives::Eventuality for Eventuality { } fn read(reader: &mut impl io::Read) -> io::Result { - Ok(Self(borsh::from_reader(reader)?)) + Ok(Self(<_>::deserialize_reader(reader)?)) } fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { borsh::BorshSerialize::serialize(&self.0, writer) From 709d04edf9da7d82ab863286073797b3bc52eefd Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 23 Apr 2026 18:29:23 -0400 Subject: [PATCH 66/71] Tidy the test helpers Some are made more flexible, removing the need for other approximate variants. Test helpers which encoded a non-syntactic semantic context via their name have been removed outright, especially as `substrate/primitives` shouldn't have helpers for Coordinator-specific definitions which may not be universal or applicable to the Substrate context. Similarly, these should likely be moved to `tests/helpers` where we have more flexibility. We want to publish `substrate/primitives` but I couldn't care less about the idea of publishing these test helpers and committing to them under our API in any way. While the `Cargo.toml` already has a comment on the `test-helpers` feature, their own crate entirely avoid any contamination here. --- Cargo.lock | 7 +- coordinator/cosign/src/tests/evaluator.rs | 2 +- coordinator/cosign/types/src/tests/mod.rs | 2 +- coordinator/tributary/src/tests/db.rs | 69 ++++--- coordinator/tributary/src/tests/mod.rs | 47 +++-- coordinator/tributary/src/tests/scan_block.rs | 182 ++++++++---------- .../tributary/src/tests/scan_tributary.rs | 6 +- .../tributary/src/tests/transaction.rs | 20 +- coordinator/tributary/src/tests/tributary.rs | 2 +- substrate/primitives/src/test_helpers.rs | 82 ++++---- 10 files changed, 204 insertions(+), 215 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 117661afe..8495dce8c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7835,15 +7835,20 @@ dependencies = [ "ciphersuite 0.4.2", "dalek-ff-group", "dkg", - "log", + "rand 0.8.6", + "rand_chacha 0.3.1", "rand_core 0.6.4", "schnorr-signatures", "serai-coordinator-substrate", "serai-cosign-types", "serai-db", + "serai-env", "serai-primitives", "serai-processor-messages", + "serai-substrate-tests", "serai-task", + "tendermint-machine", + "tokio", "tributary-sdk", "zeroize", ] diff --git a/coordinator/cosign/src/tests/evaluator.rs b/coordinator/cosign/src/tests/evaluator.rs index 7563cd98f..ce272a3b4 100644 --- a/coordinator/cosign/src/tests/evaluator.rs +++ b/coordinator/cosign/src/tests/evaluator.rs @@ -131,7 +131,7 @@ fn signed_cosign( block_hash: random_block_hash(&mut OsRng), cosigner, }, - signature: random_bytes_64(&mut OsRng), + signature: random_bytes(&mut OsRng), } } diff --git a/coordinator/cosign/types/src/tests/mod.rs b/coordinator/cosign/types/src/tests/mod.rs index 6246de3e0..8b748984c 100644 --- a/coordinator/cosign/types/src/tests/mod.rs +++ b/coordinator/cosign/types/src/tests/mod.rs @@ -21,7 +21,7 @@ pub fn random_external_network_id(rng: &mut (impl RngCore + CryptoRng)) -> Exter /// Generate a random global session ID (`[u8; 32]`). pub fn random_global_session(rng: &mut R) -> [u8; 32] { - serai_primitives::test_helpers::random_bytes_32(rng) + serai_primitives::test_helpers::random_bytes(rng) } /// Generate a random [`Cosign`] for testing. diff --git a/coordinator/tributary/src/tests/db.rs b/coordinator/tributary/src/tests/db.rs index 8a7f9a1ef..d9b4705ad 100644 --- a/coordinator/tributary/src/tests/db.rs +++ b/coordinator/tributary/src/tests/db.rs @@ -6,8 +6,7 @@ use serai_primitives::{ address::SeraiAddress, validator_sets::ExternalValidatorSet, test_helpers::{ - random_bytes_32, random_bytes_64, random_serai_address, random_block_number, - default_test_validator_set, random_validator_set, random_vec_u8, random_block_hash, + random_bytes, random_block_hash, random_serai_address, random_validator_set, random_vec_u8, }, }; @@ -35,29 +34,29 @@ fn all_topics_and_attempts() -> Vec { Topic::SlashReport, // Sign Preprocess Topic::Sign { - id: random_transaction_id(), + id: random_variant_sign_id(), attempt: 0, round: SigningProtocolRound::Preprocess, }, Topic::Sign { - id: random_transaction_id(), + id: random_variant_sign_id(), attempt: random_attempt, round: SigningProtocolRound::Preprocess, }, Topic::Sign { - id: random_transaction_id(), + id: random_variant_sign_id(), attempt: u64::MAX, round: SigningProtocolRound::Preprocess, }, // Sign Share - Topic::Sign { id: random_transaction_id(), attempt: 0, round: SigningProtocolRound::Share }, + Topic::Sign { id: random_variant_sign_id(), attempt: 0, round: SigningProtocolRound::Share }, Topic::Sign { - id: random_transaction_id(), + id: random_variant_sign_id(), attempt: random_attempt, round: SigningProtocolRound::Share, }, Topic::Sign { - id: random_transaction_id(), + id: random_variant_sign_id(), attempt: u64::MAX, round: SigningProtocolRound::Share, }, @@ -337,7 +336,7 @@ mod tributary_db { let mut db = MemDb::new(); let set = random_validator_set(&mut OsRng); let block_hash1 = random_block_hash(&mut OsRng); - let block_number1 = random_block_number(&mut OsRng); + let block_number1 = OsRng.next_u64(); let expected_topic = expected_initially_recognized_sign_topic(VariantSignId::Cosign(block_number1)); @@ -357,7 +356,7 @@ mod tributary_db { let retry = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { let block_hash2 = random_block_hash(&mut OsRng); - let block_number2 = random_block_number(&mut OsRng); + let block_number2 = OsRng.next_u64(); TributaryDb::start_cosigning(&mut txn, set, block_hash2, block_number2); })); @@ -385,7 +384,7 @@ mod tributary_db { { let mut txn = db.txn(); let block_hash2 = random_block_hash(&mut OsRng); - let block_number2 = random_block_number(&mut OsRng); + let block_number2 = OsRng.next_u64(); TributaryDb::start_cosigning(&mut txn, set, block_hash2, block_number2); assert_eq!(ActivelyCosigning::get(&txn, set), Some(block_hash2)); @@ -427,7 +426,7 @@ mod tributary_db { // Fresh DB per topic so recognized state doesn't leak between iterations let mut db = MemDb::new(); let mut txn = db.txn(); - let block_number = random_block_number(&mut OsRng); + let block_number = OsRng.next_u64(); // Randomly select which reattempt topics are queued for this block let reattempts: Vec = @@ -528,11 +527,11 @@ mod tributary_db { set, &validators, total_weight, - random_block_number(&mut OsRng), + OsRng.next_u64(), share_topic, validator, validator_weight, - &random_bytes_32(&mut OsRng), + &random_bytes(&mut OsRng), ); txn.commit(); @@ -564,7 +563,7 @@ mod tributary_db { set, share_topic.preceding_topic().unwrap(), validator, - &random_bytes_64(&mut OsRng), + &random_bytes(&mut OsRng), ); // Accumulate a share (Share) @@ -574,11 +573,11 @@ mod tributary_db { set, &validators, total_weight, - random_block_number(&mut OsRng), + OsRng.next_u64(), share_topic, validator, validator_weight, - &random_bytes_32(&mut OsRng), + &random_bytes(&mut OsRng), ); txn.commit(); @@ -615,7 +614,7 @@ mod tributary_db { set, &validators, total_weight, - random_block_number(&mut OsRng), + OsRng.next_u64(), preprocess_topic, |_| vec![random_vec_u8(&mut OsRng)], None::)>, @@ -628,7 +627,7 @@ mod tributary_db { set, &validators, total_weight, - random_block_number(&mut OsRng), + OsRng.next_u64(), share_topic, validator, validator_weight, @@ -658,7 +657,7 @@ mod tributary_db { let (set, _validator, validators, total_weight, _validator_weight) = default_accumulate_setup(); let mut db = MemDb::new(); - let block_number = random_block_number(&mut OsRng); + let block_number = OsRng.next_u64(); { let mut txn = db.txn(); @@ -725,16 +724,16 @@ mod tributary_db { let unrelated = Topic::SlashReport; { let mut txn = db.txn(); - let result = TributaryDb::accumulate::( + let result = TributaryDb::accumulate::<[u8; 32]>( &mut txn, set, &validators, total_weight, - random_block_number(&mut OsRng), + OsRng.next_u64(), unrelated, validators[0], validator_weight, - &random_bytes_32(&mut OsRng), + &random_bytes(&mut OsRng), ); assert!(matches!(result, DataSet::None)); txn.commit(); @@ -743,7 +742,7 @@ mod tributary_db { assert_eq!(AccumulatedWeight::get(&db, set, unrelated), Some(validator_weight)); // Accumulating for our topic proceeds (not NOP'd by unrelated weight) - let data = random_bytes_32(&mut OsRng); + let data = random_bytes(&mut OsRng); { let mut txn = db.txn(); if topic.requires_recognition() { @@ -754,7 +753,7 @@ mod tributary_db { set, &validators, total_weight, - random_block_number(&mut OsRng), + OsRng.next_u64(), topic, validators[1], validator_weight, @@ -879,7 +878,7 @@ mod tributary_db { set, &validators, total_weight, - random_block_number(&mut OsRng), + OsRng.next_u64(), topic, |i| [u8::try_from(i).unwrap(); 32], None::, @@ -918,11 +917,11 @@ mod tributary_db { set, &validators, total_weight, - random_block_number(&mut OsRng), + OsRng.next_u64(), topic, validator, validator_weight, - &random_vec_u8(&mut OsRng), + &random_vec_u8(&mut OsRng, 0 ..= 128), ); // Second call with same (validator, topic) should panic @@ -931,11 +930,11 @@ mod tributary_db { set, &validators, total_weight, - random_block_number(&mut OsRng), + OsRng.next_u64(), topic, validator, validator_weight, - &random_vec_u8(&mut OsRng), + &random_vec_u8(&mut OsRng, 0 ..= 128), ); } @@ -950,7 +949,7 @@ mod tributary_db { default_accumulate_setup(); let mut db = MemDb::new(); let mut txn = db.txn(); - let block_number = random_block_number(&mut OsRng); + let block_number = OsRng.next_u64(); TributaryDb::recognize_topic(&mut txn, set, topic); @@ -975,7 +974,7 @@ mod tributary_db { topic, validator, validator_weight, - &random_vec_u8(&mut OsRng), + &random_vec_u8(&mut OsRng, 0 ..= 128), ); } @@ -990,7 +989,7 @@ mod tributary_db { default_accumulate_setup(); let mut db = MemDb::new(); let mut txn = db.txn(); - let block_number = random_block_number(&mut OsRng); + let block_number = OsRng.next_u64(); accumulate_to_threshold::, _, _>( &mut txn, @@ -1016,7 +1015,7 @@ mod tributary_db { topic, validator, validator_weight, - &random_vec_u8(&mut OsRng), + &random_vec_u8(&mut OsRng, 0 ..= 128), ); assert!(matches!(result, DataSet::None), "should be NOP after threshold"); @@ -1246,7 +1245,7 @@ mod tributary_db { }; let mut db = MemDb::new(); - let set = default_test_validator_set(); + let set = random_validator_set(&mut OsRng); let validators: Vec = (0 .. num_validators).map(|_i| random_serai_address(&mut OsRng)).collect(); diff --git a/coordinator/tributary/src/tests/mod.rs b/coordinator/tributary/src/tests/mod.rs index a89e3e110..9ee93b953 100644 --- a/coordinator/tributary/src/tests/mod.rs +++ b/coordinator/tributary/src/tests/mod.rs @@ -11,8 +11,7 @@ use rand_core::OsRng; use serai_primitives::{ address::SeraiAddress, test_helpers::{ - random_block_hash, random_bytes_32, random_bytes_64, random_serai_address, random_vec_u8, - default_test_validator_set, + random_bytes, random_block_hash, random_serai_address, random_vec_u8, random_validator_set, }, }; @@ -71,77 +70,77 @@ pub(crate) fn all_signed_transactions_and_attempts(signed: &Signed) -> Vec Vec { Transaction::Cosign { substrate_block_hash: random_block_hash(&mut OsRng) }, Transaction::Cosigned { substrate_block_hash: random_block_hash(&mut OsRng) }, Transaction::SubstrateBlock { hash: random_block_hash(&mut OsRng) }, - Transaction::Batch { hash: random_block_hash(&mut OsRng).0 }, + Transaction::Batch { hash: random_bytes(&mut OsRng) }, ] } @@ -290,8 +289,8 @@ pub(crate) fn new_test_set_info(validators: &[(SeraiAddress, u16)]) -> NewSetInf } NewSetInformation { - set: default_test_validator_set(), - serai_block: random_bytes_32(&mut OsRng), + set: random_validator_set(&mut OsRng), + serai_block: random_bytes(&mut OsRng), declaration_time: OsRng.next_u64(), threshold: OsRng.gen_range(0 ..= u16::MAX), validators: validators.to_vec(), diff --git a/coordinator/tributary/src/tests/scan_block.rs b/coordinator/tributary/src/tests/scan_block.rs index a6bf83f85..b615a7f47 100644 --- a/coordinator/tributary/src/tests/scan_block.rs +++ b/coordinator/tributary/src/tests/scan_block.rs @@ -2,9 +2,10 @@ use core::marker::PhantomData; use schnorr::SchnorrSignature; -use serai_db::{Db as _, DbTxn, MemDb}; -use serai_primitives::test_helpers::{random_block_hash, random_block_number, random_vec_of_len}; +use serai_primitives::test_helpers::{random_block_hash, random_vec_u8}; use serai_cosign_types::CosignIntent; + +use serai_db::{Db as _, DbTxn, MemDb}; use tributary_sdk::{ Block, BlockHeader, Transaction as TributaryTransaction, Evidence, tendermint::tx::TendermintTx, }; @@ -107,14 +108,10 @@ fn potentially_start_cosign() { { let mut db = MemDb::new(); let block_hash = random_block_hash(&mut OsRng); - let global_session = random_bytes_32(&mut OsRng); + let global_session = random_bytes(&mut OsRng); - let intent = CosignIntent { - global_session, - block_number: random_block_number(&mut OsRng), - block_hash, - notable: false, - }; + let intent = + CosignIntent { global_session, block_number: OsRng.next_u64(), block_hash, notable: false }; { let mut txn = db.txn(); @@ -137,7 +134,7 @@ fn potentially_start_cosign() { { let mut db = MemDb::new(); let block_hash = random_block_hash(&mut OsRng); - let global_session = random_bytes_32(&mut OsRng); + let global_session = random_bytes(&mut OsRng); { let mut txn = db.txn(); @@ -151,7 +148,7 @@ fn potentially_start_cosign() { block_hash, &CosignIntent { global_session, - block_number: random_block_number(&mut OsRng), + block_number: OsRng.next_u64(), // but the intent's block_hash field is a new_block_hash block_hash: new_block_hash, notable: false, @@ -191,9 +188,9 @@ fn accumulate_dkg_confirmation() { let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.accumulate_dkg_confirmation( - random_block_number(&mut OsRng), + OsRng.next_u64(), Topic::RemoveParticipant { participant: random_serai_address(&mut OsRng) }, - &random_vec_of_len(&mut OsRng, 4), + &random_vec_u8(&mut OsRng, 4 ..= 4), validators[0], ); })); @@ -205,12 +202,12 @@ fn accumulate_dkg_confirmation() { { let mut db = MemDb::new(); let mut txn = db.txn(); - let block_number = random_block_number(&mut OsRng); + let block_number = OsRng.next_u64(); { - let data1 = random_vec_of_len(&mut OsRng, 4); - let data2 = random_vec_of_len(&mut OsRng, 4); - let data3 = random_vec_of_len(&mut OsRng, 4); + let data1 = random_vec_u8(&mut OsRng, 4 ..= 4); + let data2 = random_vec_u8(&mut OsRng, 4 ..= 4); + let data3 = random_vec_u8(&mut OsRng, 4 ..= 4); let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); @@ -244,7 +241,7 @@ fn accumulate_dkg_confirmation() { weights_4.insert(v4, 1); let set_info_4 = new_test_set_info(&validator_data_4); - let data4 = random_vec_of_len(&mut OsRng, 4); + let data4 = random_vec_u8(&mut OsRng, 4 ..= 4); { let mut scan_block = new_scan_block(&mut txn, &set_info_4, &validators_4, 4, &weights_4); @@ -262,7 +259,7 @@ mod handle_application_tx { #[test] fn dont_handle_signed_kind_from_fatally_slashed() { - let set = default_test_validator_set(); + let set = random_validator_set(&mut OsRng); let (_, validator_data, validators, weights, total_weight) = setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); @@ -282,7 +279,7 @@ mod handle_application_tx { { let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.handle_application_tx(random_block_number(&mut OsRng), tx.clone()); + scan_block.handle_application_tx(OsRng.next_u64(), tx.clone()); } assert!( @@ -294,7 +291,7 @@ mod handle_application_tx { #[test] fn remove_participant() { - let set = default_test_validator_set(); + let set = random_validator_set(&mut OsRng); let (_, validator_data, validators, weights, total_weight) = setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); @@ -309,7 +306,7 @@ mod handle_application_tx { let nonexistent = random_serai_address(&mut OsRng); scan_block.handle_application_tx( - random_block_number(&mut OsRng), + OsRng.next_u64(), Transaction::RemoveParticipant { participant: nonexistent, signed: Signed::default() }, ); @@ -325,7 +322,7 @@ mod handle_application_tx { let (key2, _) = keys_addrs[2]; let target = addr0; - let block_number = random_block_number(&mut OsRng); + let block_number = OsRng.next_u64(); let mut db = MemDb::new(); let mut txn = db.txn(); @@ -370,7 +367,7 @@ mod handle_application_tx { fn dkg_participation() { let mut db = MemDb::new(); - let set = default_test_validator_set(); + let set = random_validator_set(&mut OsRng); let (keys_addrs, validator_data, validators, weights, total_weight) = setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); @@ -381,7 +378,7 @@ mod handle_application_tx { { let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_application_tx( - random_block_number(&mut OsRng), + OsRng.next_u64(), Transaction::DkgParticipation { participation: vec![1, 2, 3], signed: new_signed(signer_key), @@ -394,7 +391,7 @@ mod handle_application_tx { #[test] fn dkg_confirmation_preprocess() { - let set = default_test_validator_set(); + let set = random_validator_set(&mut OsRng); let (keys_addrs, validator_data, validators, weights, total_weight) = setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); @@ -402,7 +399,7 @@ mod handle_application_tx { let mut db = MemDb::new(); let mut txn = db.txn(); - let block_number = random_block_number(&mut OsRng); + let block_number = OsRng.next_u64(); // Below threshold: no DkgConfirmationMessages sent { @@ -412,7 +409,7 @@ mod handle_application_tx { block_number, Transaction::DkgConfirmationPreprocess { attempt: 0, - preprocess: random_bytes_64(&mut OsRng), + preprocess: random_bytes(&mut OsRng), signed: new_signed(key0), }, ); @@ -427,7 +424,7 @@ mod handle_application_tx { block_number, Transaction::DkgConfirmationPreprocess { attempt: 0, - preprocess: random_bytes_64(&mut OsRng), + preprocess: random_bytes(&mut OsRng), signed: new_signed(key), }, ); @@ -438,7 +435,7 @@ mod handle_application_tx { #[test] fn dkg_confirmation_share() { - let set = default_test_validator_set(); + let set = random_validator_set(&mut OsRng); let (keys_addrs, validator_data, validators, weights, total_weight) = setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); @@ -452,10 +449,10 @@ mod handle_application_tx { let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_application_tx( - random_block_number(&mut OsRng), + OsRng.next_u64(), Transaction::DkgConfirmationShare { attempt: 0, - share: random_bytes_32(&mut OsRng), + share: random_bytes(&mut OsRng), signed: new_signed(key0), }, ); @@ -469,7 +466,7 @@ mod handle_application_tx { // Full preprocess->share flow let mut db = MemDb::new(); let mut txn = db.txn(); - let block_number = random_block_number(&mut OsRng); + let block_number = OsRng.next_u64(); // All 3 validators submit preprocesses (threshold crossed -> DkgConfirmationMessages sent) { @@ -479,7 +476,7 @@ mod handle_application_tx { block_number, Transaction::DkgConfirmationPreprocess { attempt: 0, - preprocess: random_bytes_64(&mut OsRng), + preprocess: random_bytes(&mut OsRng), signed: new_signed(key), }, ); @@ -497,7 +494,7 @@ mod handle_application_tx { block_number, Transaction::DkgConfirmationShare { attempt: 0, - share: random_bytes_32(&mut OsRng), + share: random_bytes(&mut OsRng), signed: new_signed(key0), }, ); @@ -515,7 +512,7 @@ mod handle_application_tx { block_number, Transaction::DkgConfirmationShare { attempt: 0, - share: random_bytes_32(&mut OsRng), + share: random_bytes(&mut OsRng), signed: new_signed(key), }, ); @@ -529,20 +526,16 @@ mod handle_application_tx { #[test] fn cosign() { - let set = default_test_validator_set(); + let set = random_validator_set(&mut OsRng); let (_, validator_data, validators, weights, total_weight) = setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); let block_hash = random_block_hash(&mut OsRng); - let global_session = random_bytes_32(&mut OsRng); + let global_session = random_bytes(&mut OsRng); - let intent = CosignIntent { - global_session, - block_number: random_block_number(&mut OsRng), - block_hash, - notable: false, - }; + let intent = + CosignIntent { global_session, block_number: OsRng.next_u64(), block_hash, notable: false }; // Sets LatestSubstrateBlockToCosign and starts cosigning { @@ -557,7 +550,7 @@ mod handle_application_tx { let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_application_tx( - random_block_number(&mut OsRng), + OsRng.next_u64(), Transaction::Cosign { substrate_block_hash: block_hash }, ); @@ -582,7 +575,7 @@ mod handle_application_tx { let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_application_tx( - random_block_number(&mut OsRng), + OsRng.next_u64(), Transaction::Cosign { substrate_block_hash: second_hash }, ); @@ -593,7 +586,7 @@ mod handle_application_tx { #[test] fn cosigned() { - let set = default_test_validator_set(); + let set = random_validator_set(&mut OsRng); let (_, validator_data, validators, weights, total_weight) = setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); @@ -608,7 +601,7 @@ mod handle_application_tx { let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_application_tx( - random_block_number(&mut OsRng), + OsRng.next_u64(), Transaction::Cosigned { substrate_block_hash: block_hash }, ); } @@ -634,7 +627,7 @@ mod handle_application_tx { let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_application_tx( - random_block_number(&mut OsRng), + OsRng.next_u64(), Transaction::Cosigned { substrate_block_hash: block_hash }, ); } @@ -658,7 +651,7 @@ mod handle_application_tx { let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_application_tx( - random_block_number(&mut OsRng), + OsRng.next_u64(), Transaction::Cosigned { substrate_block_hash: other_hash }, ); } @@ -669,14 +662,14 @@ mod handle_application_tx { #[test] fn substrate_block() { - let set = default_test_validator_set(); + let set = random_validator_set(&mut OsRng); let (_, validator_data, validators, weights, total_weight) = setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); let mut db = MemDb::new(); let block_hash = random_block_hash(&mut OsRng); - let plans = vec![random_bytes_32(&mut OsRng), random_bytes_32(&mut OsRng)]; + let plans = vec![random_bytes(&mut OsRng), random_bytes(&mut OsRng)]; { let mut txn = db.txn(); @@ -687,10 +680,8 @@ mod handle_application_tx { let mut txn = db.txn(); { let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.handle_application_tx( - random_block_number(&mut OsRng), - Transaction::SubstrateBlock { hash: block_hash }, - ); + scan_block + .handle_application_tx(OsRng.next_u64(), Transaction::SubstrateBlock { hash: block_hash }); } for plan in &plans { @@ -701,21 +692,18 @@ mod handle_application_tx { #[test] fn batch() { - let set = default_test_validator_set(); + let set = random_validator_set(&mut OsRng); let (_, validator_data, validators, weights, total_weight) = setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); let mut db = MemDb::new(); - let batch_hash = random_bytes_32(&mut OsRng); + let batch_hash = random_bytes(&mut OsRng); let mut txn = db.txn(); { let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.handle_application_tx( - random_block_number(&mut OsRng), - Transaction::Batch { hash: batch_hash }, - ); + scan_block.handle_application_tx(OsRng.next_u64(), Transaction::Batch { hash: batch_hash }); } let topic = expected_initially_recognized_sign_topic(VariantSignId::Batch(batch_hash)); @@ -733,7 +721,7 @@ mod handle_application_tx { wrong_len = if wrong_len == 1 { 2 } else { wrong_len - 1 }; } - let set = default_test_validator_set(); + let set = random_validator_set(&mut OsRng); let (keys_addrs, validator_data, validators, weights, total_weight) = setup_n_validators_with_keys(num_validators); @@ -748,7 +736,7 @@ mod handle_application_tx { let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_application_tx( - random_block_number(&mut OsRng), + OsRng.next_u64(), Transaction::SlashReport { slash_points: vec![0; usize::from(wrong_len)], signed: new_signed(signer_key), @@ -771,7 +759,7 @@ mod handle_application_tx { let num_validators = OsRng.gen_range(4u16 .. 10); let num_reports = usize::from(Topic::SlashReport.required_participation(num_validators)); - let set = default_test_validator_set(); + let set = random_validator_set(&mut OsRng); let (keys_addrs, validator_data, validators, weights, total_weight) = setup_n_validators_with_keys(num_validators); let set_info = new_test_set_info(&validator_data); @@ -789,7 +777,7 @@ mod handle_application_tx { for (i, report) in reports.iter().enumerate() { let (key, _) = keys_addrs[i]; scan_block.handle_application_tx( - random_block_number(&mut OsRng), + OsRng.next_u64(), Transaction::SlashReport { slash_points: report.clone(), signed: new_signed(key) }, ); } @@ -851,7 +839,7 @@ mod handle_application_tx { let n = OsRng.gen_range(2u16 ..= 5) * 2; let num_reports = Topic::SlashReport.required_participation(n); - let set = default_test_validator_set(); + let set = random_validator_set(&mut OsRng); let (keys_addrs, validator_data, validators, weights, total_weight) = setup_n_validators_with_keys(n); @@ -869,7 +857,7 @@ mod handle_application_tx { for (i, report) in reports.iter().enumerate() { let (key, _) = keys_addrs[i]; scan_block.handle_application_tx( - random_block_number(&mut OsRng), + OsRng.next_u64(), Transaction::SlashReport { slash_points: report.clone(), signed: new_signed(key) }, ); } @@ -906,7 +894,7 @@ mod handle_application_tx { let f = usize::from((n - 1) / 3); let num_reports = Topic::SlashReport.required_participation(n); - let set = default_test_validator_set(); + let set = random_validator_set(&mut OsRng); let (keys_addrs, validator_data, validators, weights, total_weight) = setup_n_validators_with_keys(n); @@ -924,7 +912,7 @@ mod handle_application_tx { for (i, report) in reports.iter().enumerate() { let (key, _) = keys_addrs[i]; scan_block.handle_application_tx( - random_block_number(&mut OsRng), + OsRng.next_u64(), Transaction::SlashReport { slash_points: report.clone(), signed: new_signed(key) }, ); } @@ -949,14 +937,14 @@ mod handle_application_tx { #[test] fn sign() { - let set = default_test_validator_set(); + let set = random_validator_set(&mut OsRng); let (keys_addrs, validator_data, validators, weights, total_weight) = setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); let (key0, addr0) = keys_addrs[0]; let (key1, key2) = (keys_addrs[1].0, keys_addrs[2].0); - let sign_id = VariantSignId::Transaction(random_bytes_32(&mut OsRng)); + let sign_id = VariantSignId::Transaction(random_bytes(&mut OsRng)); let topic = expected_initially_recognized_sign_topic(sign_id); // Wrong data length: signer has weight 1 but submits 2 entries -> fatal slash @@ -967,7 +955,7 @@ mod handle_application_tx { let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); scan_block.handle_application_tx( - random_block_number(&mut OsRng), + OsRng.next_u64(), Transaction::Sign { id: sign_id, attempt: 0, @@ -991,7 +979,7 @@ mod handle_application_tx { new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); for key in [key0, key1, key2] { scan_block.handle_application_tx( - random_block_number(&mut OsRng), + OsRng.next_u64(), Transaction::Sign { id: sign_id, attempt: 0, @@ -1012,13 +1000,13 @@ mod handle_application_tx { /// and stores preceding data), then accumulating shares to threshold. #[test] fn sign_share_sends_shares_message() { - let set = default_test_validator_set(); + let set = random_validator_set(&mut OsRng); let (keys_addrs, validator_data, validators, weights, total_weight) = setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); let (key0, key1, key2) = (keys_addrs[0].0, keys_addrs[1].0, keys_addrs[2].0); - let sign_id = VariantSignId::Transaction(random_bytes_32(&mut OsRng)); + let sign_id = VariantSignId::Transaction(random_bytes(&mut OsRng)); let preprocess_topic = expected_initially_recognized_sign_topic(sign_id); let share_topic = Topic::Sign { id: sign_id, attempt: 0, round: SigningProtocolRound::Share }; @@ -1031,7 +1019,7 @@ mod handle_application_tx { // Step 1: All validators submit preprocesses, crossing threshold. // This auto-recognizes the Share topic (succeeding_topic) and stores preprocess data. { - let block_number = random_block_number(&mut OsRng); + let block_number = OsRng.next_u64(); let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); for key in [key0, key1, key2] { scan_block.handle_application_tx( @@ -1055,7 +1043,7 @@ mod handle_application_tx { // Step 2: All validators submit shares, crossing threshold -> sends Shares message. { - let block_number = random_block_number(&mut OsRng); + let block_number = OsRng.next_u64(); let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); for key in [key0, key1, key2] { scan_block.handle_application_tx( @@ -1084,7 +1072,7 @@ mod handle_application_tx { #[test] fn handle_block() { - let set = default_test_validator_set(); + let set = random_validator_set(&mut OsRng); let (keys_addrs, validator_data, validators, weights, total_weight) = setup_n_validators_with_keys(3); let set_info = new_test_set_info(&validator_data); @@ -1097,15 +1085,15 @@ fn handle_block() { let mut txn = db.txn(); let block = Block { header: BlockHeader { - parent: random_bytes_32(&mut OsRng), - transactions: random_bytes_32(&mut OsRng), + parent: random_bytes(&mut OsRng), + transactions: random_bytes(&mut OsRng), }, transactions: vec![], }; { let scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.handle_block(random_block_number(&mut OsRng), block); + scan_block.handle_block(OsRng.next_u64(), block); } assert_no_pending_messages(&mut txn, set); } @@ -1120,15 +1108,15 @@ fn handle_block() { let block_txs = vec![TributaryTransaction::Application(tx)]; let block = Block { header: BlockHeader { - parent: random_bytes_32(&mut OsRng), - transactions: random_bytes_32(&mut OsRng), + parent: random_bytes(&mut OsRng), + transactions: random_bytes(&mut OsRng), }, transactions: block_txs.clone(), }; { let scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.handle_block(random_block_number(&mut OsRng), block); + scan_block.handle_block(OsRng.next_u64(), block); } assert_block_side_effects(&mut txn, set, &block_txs); } @@ -1145,15 +1133,15 @@ fn handle_block() { &mut txn, set, &CosignIntent { - global_session: random_bytes_32(&mut OsRng), - block_number: random_block_number(&mut OsRng), + global_session: random_bytes(&mut OsRng), + block_number: OsRng.next_u64(), block_hash: *substrate_block_hash, notable: false, }, ); } Transaction::SubstrateBlock { hash } => { - let plans = vec![random_bytes_32(&mut OsRng)]; + let plans = vec![random_bytes(&mut OsRng)]; SubstrateBlockPlans::set(&mut txn, set, *hash, &plans); } Transaction::RemoveParticipant { .. } | @@ -1169,15 +1157,15 @@ fn handle_block() { let block_txs = vec![TributaryTransaction::Application(tx)]; let block = Block { header: BlockHeader { - parent: random_bytes_32(&mut OsRng), - transactions: random_bytes_32(&mut OsRng), + parent: random_bytes(&mut OsRng), + transactions: random_bytes(&mut OsRng), }, transactions: block_txs.clone(), }; { let scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.handle_block(random_block_number(&mut OsRng), block); + scan_block.handle_block(OsRng.next_u64(), block); } assert_block_side_effects(&mut txn, set, &block_txs); } @@ -1199,8 +1187,8 @@ fn handle_block() { let block = Block { header: BlockHeader { - parent: random_bytes_32(&mut OsRng), - transactions: random_bytes_32(&mut OsRng), + parent: random_bytes(&mut OsRng), + transactions: random_bytes(&mut OsRng), }, transactions: vec![TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(evidence))], }; @@ -1244,7 +1232,7 @@ fn handle_block() { has_evidence = true; } else { // Random application transaction, use Batch so we can assert recognition - let hash = random_bytes_32(&mut OsRng); + let hash = random_bytes(&mut OsRng); batch_hashes.push(hash); transactions.push(TributaryTransaction::Application(Transaction::Batch { hash })); } @@ -1252,15 +1240,15 @@ fn handle_block() { let block = Block { header: BlockHeader { - parent: random_bytes_32(&mut OsRng), - transactions: random_bytes_32(&mut OsRng), + parent: random_bytes(&mut OsRng), + transactions: random_bytes(&mut OsRng), }, transactions: transactions.clone(), }; { let scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.handle_block(random_block_number(&mut OsRng), block); + scan_block.handle_block(OsRng.next_u64(), block); } if has_evidence { diff --git a/coordinator/tributary/src/tests/scan_tributary.rs b/coordinator/tributary/src/tests/scan_tributary.rs index 03f220dfc..6b764f442 100644 --- a/coordinator/tributary/src/tests/scan_tributary.rs +++ b/coordinator/tributary/src/tests/scan_tributary.rs @@ -153,13 +153,13 @@ async fn scan_tributary_task_run_iteration() { TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; let (last_handled_block_number, last_handled_block_hash) = - TributaryDb::last_handled_tributary_block(&db, default_test_validator_set()).unwrap(); + TributaryDb::last_handled_tributary_block(&db, set_info.set).unwrap(); assert!(last_handled_block_number >= 1, "expected at least block 1 to be handled"); // Processes block with provided and signed txs - inject after the actual last handled block { let batch_tx = - TributaryTransaction::Application(Transaction::Batch { hash: random_bytes_32(&mut OsRng) }); + TributaryTransaction::Application(Transaction::Batch { hash: random_bytes(&mut OsRng) }); let fake_evidence = TributaryTransaction::Tendermint(TendermintTx::SlashEvidence( Evidence::InvalidPrecommit(make_signed_message_bytes(addr.0)), )); @@ -183,7 +183,7 @@ async fn scan_tributary_task_run_iteration() { TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; let mut txn = db.txn(); - assert_block_side_effects(&mut txn, default_test_validator_set(), &block_txs); + assert_block_side_effects(&mut txn, set_info.set, &block_txs); } // Errors when locally provided txs are missing diff --git a/coordinator/tributary/src/tests/transaction.rs b/coordinator/tributary/src/tests/transaction.rs index 64b70469c..0fd5643de 100644 --- a/coordinator/tributary/src/tests/transaction.rs +++ b/coordinator/tributary/src/tests/transaction.rs @@ -268,7 +268,7 @@ mod transaction { assert_eq!(tx, deserialized); if let TransactionKind::Signed(_, _) = tx.kind() { - tx.sign(&mut OsRng, random_genesis(&mut OsRng), &random_key(&mut OsRng)); + tx.sign(&mut OsRng, random_bytes(&mut OsRng), &random_key(&mut OsRng)); let serialized = ReadWrite::serialize(&tx); let deserialized = Transaction::read(&mut serialized.as_slice()).unwrap(); assert_eq!(tx, deserialized, "ReadWrite failed after signing for {tx:?}"); @@ -316,7 +316,7 @@ mod transaction { #[test] fn signed_transactions_match_kind_and_nonce_and_sig() { let key = random_key(&mut OsRng); - let genesis = random_genesis(&mut OsRng); + let genesis = random_bytes(&mut OsRng); /// Borsh-encodes a byte-string label: `len(4 LE) || label` fn borsh_label(label: &[u8]) -> Vec { @@ -430,7 +430,7 @@ mod transaction { #[test] fn hash_format_and_determinism() { let key = random_key(&mut OsRng); - let genesis = random_genesis(&mut OsRng); + let genesis = random_bytes(&mut OsRng); for tx in all_transactions() { assert_eq!(tx.hash(), tx.hash(), "Hash not deterministic for {tx:?}"); @@ -584,7 +584,7 @@ mod transaction { fn tx_sign() { let key = random_key(&mut OsRng); let expected_signer = Ristretto::generator() * key.deref(); - let genesis = random_genesis(&mut OsRng); + let genesis = random_bytes(&mut OsRng); // Sets correct signer and produces verifiable signature for mut tx in all_signed_transactions_and_attempts(&random_signed(&mut OsRng)) { @@ -606,10 +606,10 @@ mod transaction { participant: random_serai_address(&mut OsRng), signed: random_signed(&mut OsRng), }; - let genesis = random_genesis(&mut OsRng); + let genesis = random_bytes(&mut OsRng); tx.sign(&mut OsRng, genesis, &key); - let mut wrong_genesis = random_genesis(&mut OsRng); + let mut wrong_genesis = random_bytes(&mut OsRng); // guaranteed to be the wrong genesis if wrong_genesis == genesis { wrong_genesis[0] ^= 1; @@ -629,7 +629,7 @@ mod transaction { fn panics_on_cosign() { let key = random_key(&mut OsRng); let mut tx = Transaction::Cosign { substrate_block_hash: random_block_hash(&mut OsRng) }; - tx.sign(&mut OsRng, random_genesis(&mut OsRng), &key); + tx.sign(&mut OsRng, random_bytes(&mut OsRng), &key); } #[test] @@ -637,7 +637,7 @@ mod transaction { fn panics_on_cosigned() { let key = random_key(&mut OsRng); let mut tx = Transaction::Cosigned { substrate_block_hash: random_block_hash(&mut OsRng) }; - tx.sign(&mut OsRng, random_genesis(&mut OsRng), &key); + tx.sign(&mut OsRng, random_bytes(&mut OsRng), &key); } #[test] @@ -645,7 +645,7 @@ mod transaction { fn panics_on_substrate_block() { let key = random_key(&mut OsRng); let mut tx = Transaction::SubstrateBlock { hash: random_block_hash(&mut OsRng) }; - tx.sign(&mut OsRng, random_genesis(&mut OsRng), &key); + tx.sign(&mut OsRng, random_bytes(&mut OsRng), &key); } #[test] @@ -653,7 +653,7 @@ mod transaction { fn panics_on_batch() { let key = random_key(&mut OsRng); let mut tx = Transaction::Batch { hash: random_block_hash(&mut OsRng).0 }; - tx.sign(&mut OsRng, random_genesis(&mut OsRng), &key); + tx.sign(&mut OsRng, random_bytes(&mut OsRng), &key); } } } diff --git a/coordinator/tributary/src/tests/tributary.rs b/coordinator/tributary/src/tests/tributary.rs index 567ee6dbe..e4f441d55 100644 --- a/coordinator/tributary/src/tests/tributary.rs +++ b/coordinator/tributary/src/tests/tributary.rs @@ -20,7 +20,7 @@ fn unwrap_slash_report(tx: Transaction) -> (Vec, Signed) { #[test] fn slash_report() { - let set = default_test_validator_set(); + let set = random_validator_set(&mut OsRng); // No slash points set: all zeros { diff --git a/substrate/primitives/src/test_helpers.rs b/substrate/primitives/src/test_helpers.rs index 2776f1980..0026b459e 100644 --- a/substrate/primitives/src/test_helpers.rs +++ b/substrate/primitives/src/test_helpers.rs @@ -1,5 +1,6 @@ //! Test helpers for generating random instances of primitive types. +use core::ops::{Bound, RangeBounds}; use alloc::{vec, vec::Vec}; use rand_core::{RngCore, CryptoRng}; @@ -9,36 +10,53 @@ use crate::{ address::{SeraiAddress, ExternalAddress}, crypto::{Public, ExternalKey}, network_id::ExternalNetworkId, - validator_sets::{ExternalValidatorSet, Session}, + validator_sets::{Session, ExternalValidatorSet}, }; -/// Generate a random 32-byte array. -pub fn random_bytes_32(rng: &mut R) -> [u8; 32] { - let mut bytes = [0u8; 32]; +/// Generate a random byte array. +pub fn random_bytes(rng: &mut R) -> [u8; N] { + let mut bytes = [0u8; N]; rng.fill_bytes(&mut bytes); bytes } -/// Generate a random 64-byte array. -pub fn random_bytes_64(rng: &mut R) -> [u8; 64] { - let mut bytes = [0u8; 64]; - rng.fill_bytes(&mut bytes); - bytes -} - -/// Generate a random `Vec` with a random length between 1 and 128. -pub fn random_vec_u8(rng: &mut R) -> Vec { - let len = usize::try_from(rng.next_u32() % 128).unwrap() + 1; - random_vec_of_len(rng, len) -} +/// Generate a random byte vector of a length within a range. +pub fn random_vec_u8(rng: &mut R, len: impl RangeBounds) -> Vec { + let len = { + let inclusive_start = match len.start_bound() { + Bound::Included(start) => *start, + Bound::Excluded(start) => start + 1, + Bound::Unbounded => 0, + }; + let inclusive_end = match len.end_bound() { + Bound::Included(end) => *end, + Bound::Excluded(end) => end - 1, + Bound::Unbounded => panic!("do not request a random vector of unbounded length"), + }; + let range_len = inclusive_end + .checked_sub(inclusive_start) + .expect("requested a random vector for a length within a range with no elements") + + 1; + let i = usize::try_from(rng.next_u64() % u64::try_from(range_len).unwrap()).unwrap(); + inclusive_start + i + }; -/// Generate a random byte vector of a specific length. -pub fn random_vec_of_len(rng: &mut R, len: usize) -> Vec { let mut bytes = vec![0u8; len]; rng.fill_bytes(&mut bytes); bytes } +#[test] +fn random_vec_u8_handles_ranges_correctly() { + use rand_core::OsRng; + for _ in 0 .. 128 { + assert_eq!(random_vec_u8(&mut OsRng, 0 ..= 0).len(), 0); + assert_eq!(random_vec_u8(&mut OsRng, 0 .. 1).len(), 0); + assert_eq!(random_vec_u8(&mut OsRng, ..= 0).len(), 0); + assert_eq!(random_vec_u8(&mut OsRng, .. 1).len(), 0); + } +} + /// Generate a random [`ExternalAddress`]. pub fn random_external_address(rng: &mut R) -> ExternalAddress { let len = usize::try_from(rng.next_u32() % ExternalAddress::MAX_SIZE).unwrap(); @@ -56,12 +74,12 @@ fn random_external_address_is_in_range() { /// Generate a random [`SeraiAddress`]. pub fn random_serai_address(rng: &mut R) -> SeraiAddress { - SeraiAddress(random_bytes_32(rng)) + SeraiAddress(random_bytes(rng)) } /// Generate a random [`Public`]. pub fn random_public(rng: &mut R) -> Public { - Public(random_bytes_32(rng)) + Public(random_bytes(rng)) } /// Generate a random schnorrkel keypair and its [`Public`] wrapper. @@ -88,28 +106,13 @@ fn random_external_key_is_in_range() { /// Generate a random [`BlockHash`]. pub fn random_block_hash(rng: &mut R) -> BlockHash { - BlockHash(random_bytes_32(rng)) -} - -/// Generate a random global session ID (`[u8; 32]`). -pub fn random_global_session(rng: &mut R) -> [u8; 32] { - random_bytes_32(rng) -} - -/// Generate a random genesis -pub fn random_genesis(rng: &mut R) -> [u8; 32] { - random_bytes_32(rng) -} - -/// Generate a random block number. -pub fn random_block_number(rng: &mut R) -> u64 { - rng.next_u64() + BlockHash(random_bytes(rng)) } /// Generate a random [`ExternalNetworkId`]. pub fn random_external_network_id(rng: &mut R) -> ExternalNetworkId { let all: Vec<_> = ExternalNetworkId::all().collect(); - all[usize::try_from(rng.next_u32()).unwrap() % all.len()] + all[usize::try_from(rng.next_u64() % u64::try_from(all.len()).unwrap()).unwrap()] } /// Generate a random [`ExternalValidatorSet`]. @@ -119,8 +122,3 @@ pub fn random_validator_set(rng: &mut R) -> ExternalVali session: Session(rng.next_u32()), } } - -/// A default [`ExternalValidatorSet`] for tests where the set value doesn't matter. -pub fn default_test_validator_set() -> ExternalValidatorSet { - ExternalValidatorSet { network: ExternalNetworkId::Bitcoin, session: Session(0) } -} From 6d64dd1fe4f25a89b9e247d8a00087d1ccb8dec7 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 24 Apr 2026 15:12:00 -0400 Subject: [PATCH 67/71] Remove the `Preprocess`, `Share`, `GenericSignPayload`, `RoundPayloads` type aliases I understand the intent. Unfortunately, these definitions were incorrect. For a FROST protocol over Ristretto, the preprocess will be 64 bytes and the share 32 bytes. This is used for the DKG confirmation, batches, where the former hard-coded such a definition. For batches, the process is routed via `VariantSignId` where the same processing occurs for transactions. Signing transactions on external networks has a preprocess, share, of length variable to the external network and its signing protocol. For Ethereum, it'd be a 66-byte preprocess and 32-byte share. For Bitcoin, it's a 64-byte preprocess and 32-byte share _per input_ (concatenated into a single byte blob). For Monero, it's a 160-byte preprocess and 32-byte share _per input_ (again so concatenated). --- coordinator/tributary/src/tests/db.rs | 42 ++++++++++++------------ coordinator/tributary/src/transaction.rs | 21 +++--------- 2 files changed, 25 insertions(+), 38 deletions(-) diff --git a/coordinator/tributary/src/tests/db.rs b/coordinator/tributary/src/tests/db.rs index d9b4705ad..8a47e419a 100644 --- a/coordinator/tributary/src/tests/db.rs +++ b/coordinator/tributary/src/tests/db.rs @@ -91,7 +91,7 @@ fn all_preprocess_topics_and_attempts() -> Vec { .collect() } -type NoEachFn = fn(usize, &DataSet); +type NoEachFn = fn(usize, &DataSet<[u8; 32]>); /// Cross threshold by accumulating from all validators, returning the final result. #[expect(clippy::too_many_arguments)] @@ -522,7 +522,7 @@ mod tributary_db { // Do not store any preceding Preprocess data // Validator should be slashed with reason: // "participated in topic without participating in prior" - let result = TributaryDb::accumulate::( + let result = TributaryDb::accumulate::<[u8; 32]>( &mut txn, set, &validators, @@ -557,8 +557,8 @@ mod tributary_db { TributaryDb::recognize_topic(&mut txn, set, share_topic); } - // Store preceding preprocess data (Preprocess) - Accumulated::::set( + // Store preceding preprocess data + Accumulated::<[u8; 64]>::set( &mut txn, set, share_topic.preceding_topic().unwrap(), @@ -566,9 +566,9 @@ mod tributary_db { &random_bytes(&mut OsRng), ); - // Accumulate a share (Share) + // Accumulate a share // The preceding check should find the key despite the type mismatch and NOT slash. - let result = TributaryDb::accumulate::( + let result = TributaryDb::accumulate::<[u8; 32]>( &mut txn, set, &validators, @@ -586,10 +586,10 @@ mod tributary_db { // Below threshold (1 of 3) so result is None but data is stored assert!(matches!(result, DataSet::None)); // Confirm data is stored - assert!(Accumulated::::get(&db, set, share_topic, validator).is_some()); + assert!(Accumulated::<[u8; 32]>::get(&db, set, share_topic, validator).is_some()); } - // Same types: stores type of RoundPayloads for both Preprocess and Share. + // Same types: stores type of Vec> for both Preprocess and Share. // Only topics where the preprocess data survives after threshold // (reattempt exists). for share_topic in all_share_topics_and_attempts() @@ -616,13 +616,13 @@ mod tributary_db { total_weight, OsRng.next_u64(), preprocess_topic, - |_| vec![random_vec_u8(&mut OsRng)], - None::)>, + |_| vec![random_vec_u8(&mut OsRng, 0 ..= 128)], + None::>>)>, ); - // Accumulate a share with the same RoundPayloads type - let share_data: RoundPayloads = vec![random_vec_u8(&mut OsRng)]; - let result = TributaryDb::accumulate::( + // Accumulate a share with the same Vec> type + let share_data: Vec> = vec![random_vec_u8(&mut OsRng, 0 ..= 128)]; + let result = TributaryDb::accumulate::>>( &mut txn, set, &validators, @@ -641,7 +641,7 @@ mod tributary_db { ); assert!(matches!(result, DataSet::None), "below threshold (1 of 3)"); assert_eq!( - Accumulated::::get(&db, set, share_topic, validator), + Accumulated::>>::get(&db, set, share_topic, validator), Some(share_data) ); } @@ -673,7 +673,7 @@ mod tributary_db { block_number, topic, |i| [u8::try_from(i).unwrap(); 32], - Some(|i: usize, result: &DataSet| { + Some(|i: usize, result: &DataSet<[u8; 32]>| { if i < 2 { assert!(matches!(result, DataSet::None)); } else { @@ -695,12 +695,12 @@ mod tributary_db { assert!(!TributaryDb::is_fatally_slashed(&db, set, *v)); if has_reattempt { assert!( - Accumulated::::get(&db, set, topic, *v).is_some(), + Accumulated::<[u8; 32]>::get(&db, set, topic, *v).is_some(), "data should be preserved when reattempt exists: {topic:?}" ); } else { assert!( - Accumulated::::get(&db, set, topic, *v).is_none(), + Accumulated::<[u8; 32]>::get(&db, set, topic, *v).is_none(), "data should be cleaned up when no reattempt: {topic:?}" ); } @@ -748,7 +748,7 @@ mod tributary_db { if topic.requires_recognition() { TributaryDb::recognize_topic(&mut txn, set, topic); } - let result = TributaryDb::accumulate::( + let result = TributaryDb::accumulate::<[u8; 32]>( &mut txn, set, &validators, @@ -764,7 +764,7 @@ mod tributary_db { } // Data was stored (not NOP'd) - assert_eq!(Accumulated::::get(&db, set, topic, validators[1]), Some(data)); + assert_eq!(Accumulated::<[u8; 32]>::get(&db, set, topic, validators[1]), Some(data)); assert_eq!(AccumulatedWeight::get(&db, set, topic), Some(validator_weight)); } } @@ -804,7 +804,7 @@ mod tributary_db { if topic.reattempt_topic().is_some() { for (i, v) in validators.iter().enumerate() { assert_eq!( - Accumulated::::get(&db, set, topic, *v), + Accumulated::<[u8; 32]>::get(&db, set, topic, *v), Some([u8::try_from(i).unwrap(); 32]), "data should be preserved when reattempt exists: {topic:?}" ); @@ -816,7 +816,7 @@ mod tributary_db { ); for v in &validators { assert!( - Accumulated::::get(&db, set, topic, *v).is_none(), + Accumulated::<[u8; 32]>::get(&db, set, topic, *v).is_none(), "data should be cleaned up when no reattempt: {topic:?}" ); } diff --git a/coordinator/tributary/src/transaction.rs b/coordinator/tributary/src/transaction.rs index ac9215927..53cc229b4 100644 --- a/coordinator/tributary/src/transaction.rs +++ b/coordinator/tributary/src/transaction.rs @@ -94,18 +94,6 @@ impl Default for Signed { } } -/// The type used for preprocess payloads in the signing protocol. -pub type Preprocess = [u8; 64]; -/// The type used for share payloads in the signing protocol. -pub type Share = [u8; 32]; -/// A generic, less constrained type used for either share or preprocess payloads -/// in the signing protocol. -pub type GenericSignPayload = Vec; -/// One serialized payload per key share held by the sending validator. -/// The outer Vec has one entry per key share; each inner Vec is a -/// serialized preprocess (64 bytes) or share (32 bytes), depending on `round`. -pub type RoundPayloads = Vec; - /// The Tributary transaction definition used by Serai #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum Transaction { @@ -129,7 +117,7 @@ pub enum Transaction { /// The attempt number of this signing protocol attempt: u64, /// The preprocess - preprocess: Preprocess, + preprocess: [u8; 64], /// The transaction's signer and signature signed: Signed, }, @@ -138,7 +126,7 @@ pub enum Transaction { /// The attempt number of this signing protocol attempt: u64, /// The signature share - share: Share, + share: [u8; 32], /// The transaction's signer and signature signed: Signed, }, @@ -224,9 +212,8 @@ pub enum Transaction { /// The data itself /// /// There will be `n` blobs of data where `n` is the amount of key shares the validator sending - /// this transaction has, and each blob is a serialized preprocess (64 bytes) or share - /// (32 bytes), uniform across all entries as determined by `round`. - data: RoundPayloads, + /// this transaction has. + data: Vec>, /// The transaction's signer and signature signed: Signed, }, From 84e50fccbc515785fbd50ca158e42b5c547f2e3e Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 24 Apr 2026 18:12:45 -0400 Subject: [PATCH 68/71] Redefine how Tributary genesis values are decided The tests in `coordinator/tributary/src/tests/scan_tributary.rs` demonstrated creating a tributary with one `NewSetInformation`, before reading from it with another, which should be obviously invalid? `NewSetInformation::genesis` has been added to make the genesis deterministic and binding to the `NewSetInformation`, allowing the `ScanTributaryTask` to check its initialized with values consistent to how the Tributary itself was initialized. This updates the cited tests accordingly. --- Cargo.lock | 1 + coordinator/src/tributary.rs | 4 +- coordinator/substrate/Cargo.toml | 1 + coordinator/substrate/src/lib.rs | 35 ++++ coordinator/tributary/src/lib.rs | 8 + coordinator/tributary/src/tests/mod.rs | 43 ++--- .../tributary/src/tests/scan_tributary.rs | 165 +++++++++--------- 7 files changed, 149 insertions(+), 108 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8495dce8c..3569a0585 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7816,6 +7816,7 @@ dependencies = [ name = "serai-coordinator-substrate" version = "0.1.0" dependencies = [ + "blake2 0.11.0-rc.6", "borsh", "dkg", "futures", diff --git a/coordinator/src/tributary.rs b/coordinator/src/tributary.rs index f23ee95b8..60495113b 100644 --- a/coordinator/src/tributary.rs +++ b/coordinator/src/tributary.rs @@ -3,7 +3,6 @@ use std::sync::Arc; use zeroize::Zeroizing; use rand_core::OsRng; -use blake2::{digest::typenum::U32, Digest as _, Blake2s}; use ciphersuite::*; use dalek_ff_group::Ristretto; @@ -482,8 +481,7 @@ pub(crate) async fn spawn_tributary( return; } - let genesis = - <[u8; 32]>::from(Blake2s::::digest(borsh::to_vec(&(set.serai_block, set.set)).unwrap())); + let genesis = set.tributary_genesis(); // Since the Serai block will be finalized, then cosigned, before we handle this, this time will // be a couple of minutes stale. While the Tributary will still function with a start time in the diff --git a/coordinator/substrate/Cargo.toml b/coordinator/substrate/Cargo.toml index cfe31b423..a4c0c81bd 100644 --- a/coordinator/substrate/Cargo.toml +++ b/coordinator/substrate/Cargo.toml @@ -19,6 +19,7 @@ workspace = true [dependencies] borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } +blake2 = { version = "0.11.0-rc.5", default-features = false, features = ["alloc"] } dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] } serai-client-serai = { path = "../../substrate/client/serai", default-features = false } diff --git a/coordinator/substrate/src/lib.rs b/coordinator/substrate/src/lib.rs index e6aa70465..e0f5414e7 100644 --- a/coordinator/substrate/src/lib.rs +++ b/coordinator/substrate/src/lib.rs @@ -7,6 +7,10 @@ use std::collections::HashMap; use borsh::{BorshSerialize, BorshDeserialize}; +use blake2::{ + digest::{typenum::U32, Digest as _}, + Blake2b, +}; use dkg::Participant; use serai_client_serai::abi::{ @@ -80,6 +84,37 @@ impl NewSetInformation { self.participant_indexes.insert(*validator, these_is); } } + + /// Create a new [`NewSetInformation`]. + pub fn new( + set: ExternalValidatorSet, + serai_block: [u8; 32], + declaration_time: u64, + threshold: u16, + validators: Vec<(SeraiAddress, u16)>, + evrf_public_keys: Vec<([u8; 32], Vec)>, + ) -> Self { + let mut result = Self { + set, + serai_block, + declaration_time, + threshold, + validators, + evrf_public_keys, + participant_indexes: Default::default(), + participant_indexes_reverse_lookup: Default::default(), + }; + result.init_participant_indexes(); + result + } +} + +impl NewSetInformation { + /// The hash to use for the genesis of the corresponding Tributary. + pub fn tributary_genesis(&self) -> [u8; 32] { + // This MUST only hash data completely deterministic to the Substrate blockchain. + Blake2b::::digest(borsh::to_vec(self).unwrap()).into() + } } mod _public_db { diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index 35d6907ae..01ed7b22c 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -620,11 +620,19 @@ pub struct ScanTributaryTask { impl ScanTributaryTask { /// Create a new instance of this task. + /// + /// This will panic if the Tributary read does not correspond to the set. pub fn new( tributary_db: TD, set: NewSetInformation, tributary: TributaryReader, ) -> Self { + assert_eq!( + set.tributary_genesis(), + tributary.genesis(), + "set information is inconsistent with the tributary" + ); + let mut validators = Vec::with_capacity(set.validators.len()); let mut total_weight = 0; let mut validator_weights = HashMap::with_capacity(set.validators.len()); diff --git a/coordinator/tributary/src/tests/mod.rs b/coordinator/tributary/src/tests/mod.rs index 9ee93b953..10d1ebb60 100644 --- a/coordinator/tributary/src/tests/mod.rs +++ b/coordinator/tributary/src/tests/mod.rs @@ -273,31 +273,24 @@ pub(crate) fn assert_block_side_effects( assert_no_pending_messages(txn, set); } -pub(crate) fn new_test_set_info(validators: &[(SeraiAddress, u16)]) -> NewSetInformation { - let mut participant_indexes = HashMap::new(); - let mut reverse_lookup = HashMap::new(); - let mut i = 1u16; - for (address, weight) in validators { - let mut indices = Vec::new(); - for _ in 0 .. *weight { - let p = Participant::new(i).unwrap(); - indices.push(p); - reverse_lookup.insert(p, *address); - i += 1; - } - participant_indexes.insert(*address, indices); - } - - NewSetInformation { - set: random_validator_set(&mut OsRng), - serai_block: random_bytes(&mut OsRng), - declaration_time: OsRng.next_u64(), - threshold: OsRng.gen_range(0 ..= u16::MAX), - validators: validators.to_vec(), - evrf_public_keys: vec![], - participant_indexes, - participant_indexes_reverse_lookup: reverse_lookup, - } +fn new_test_set_info(validators: &[(SeraiAddress, u16)]) -> NewSetInformation { + let set = random_validator_set(&mut OsRng); + let serai_block_hash = random_bytes(&mut OsRng); + let serai_block_time = OsRng.next_u64(); + let threshold = u16::try_from( + ((usize::from(validators.iter().map(|(_validator, weight)| *weight).sum::()) * 2) / 3) + 1, + ) + .unwrap(); + let validators = validators.to_vec(); + let evrf_public_keys = vec![]; + NewSetInformation::new( + set, + serai_block_hash, + serai_block_time, + threshold, + validators, + evrf_public_keys, + ) } pub(crate) type ValidatorSetup = ( diff --git a/coordinator/tributary/src/tests/scan_tributary.rs b/coordinator/tributary/src/tests/scan_tributary.rs index 6b764f442..34607cbff 100644 --- a/coordinator/tributary/src/tests/scan_tributary.rs +++ b/coordinator/tributary/src/tests/scan_tributary.rs @@ -1,98 +1,106 @@ +use core::time::Duration; +use std::time::Instant; + use blake2::{Digest as _, Blake2s256}; -use serai_primitives::test_helpers::random_genesis; +use serai_primitives::test_helpers::random_bytes; use serai_task::test_helpers::TaskTest; use tributary_sdk::{ - Tributary, ReadWrite as _, Block, BlockHeader, Transaction as TributaryTransaction, Evidence, - tendermint::tx::TendermintTx, + ReadWrite as _, Evidence, tendermint::tx::TendermintTx, Transaction as TributaryTransaction, + BlockHeader, Block, Tributary, }; use super::*; -/// Create a Tributary with a single validator. Returns the Tributary (kept alive so -/// the Tendermint machine keeps running) and the validator's signing key. +/// Create a Tributary with a single validator. +/// +/// This returns the Tributary (kept alive so the Tendermint machine keeps running) and the +/// validator's signing key. async fn make_tributary( db: MemDb, -) -> (Tributary, Zeroizing<::F>, [u8; 32]) { - let key = random_key(&mut OsRng); - let pub_key = get_key_point(&key); - let genesis = random_genesis(&mut OsRng); - let tributary = Tributary::::new( + weights: &[u16], +) -> (NewSetInformation, Tributary) { + let mut key = None; + let mut validator_keys = vec![]; + let mut validators = vec![]; + for weight in weights.iter().copied() { + let this_key = random_key(&mut OsRng); + let pub_key = get_key_point(&this_key); + key = Some(this_key); + validator_keys.push((pub_key, u64::from(weight))); + let addr = SeraiAddress(pub_key.to_bytes()); + validators.push((addr, weight)); + } + let set_info = new_test_set_info(&validators); + let tributary = Tributary::::new( db, - genesis, + set_info.tributary_genesis(), // Use a past start_time so TendermintMachine::new doesn't sleep waiting for block end time 1, - key.clone(), - vec![(pub_key, 1)], - MockP2p, + key.unwrap(), + validator_keys, + NopP2p, ) .await - .expect("Tributary::new returned None"); - (tributary, key, genesis) + .expect("Tributary::new returned `None`?"); + (set_info, tributary) } #[tokio::test] async fn new_scan_tributary_task() { - let db = MemDb::new(); - let (tributary, _, _) = make_tributary(db.clone()).await; - // Single validator with weight > 1 { - let (_, addr) = random_serai_address_and_key(&mut OsRng); - let set_info = new_test_set_info(&[(addr, 3)]); - let task = ScanTributaryTask::::new(db.clone(), set_info, tributary.reader()); + let db = MemDb::new(); + let (set_info, tributary) = make_tributary(db.clone(), &[3]).await; + + let task = + ScanTributaryTask::::new(db.clone(), set_info.clone(), tributary.reader()); + assert_eq!(task.set.set, set_info.set); assert_eq!(task.validators.len(), 1); - assert_eq!(task.validators[0], addr); + assert_eq!(task.validators[0], set_info.validators[0].0); assert_eq!(task.total_weight, 3); - assert_eq!(task.validator_weights[&addr], 3); + assert_eq!(task.validator_weights.len(), 1); + assert_eq!(task.validator_weights[&set_info.validators[0].0], 3); } // Multiple validators with different weights { - let (_, addr1) = random_serai_address_and_key(&mut OsRng); - let (_, addr2) = random_serai_address_and_key(&mut OsRng); - let (_, addr3) = random_serai_address_and_key(&mut OsRng); - let set_info = new_test_set_info(&[(addr1, 1), (addr2, 2), (addr3, 4)]); - let task = ScanTributaryTask::::new(db.clone(), set_info, tributary.reader()); + let db = MemDb::new(); + let (set_info, tributary) = make_tributary(db.clone(), &[1, 2, 4]).await; + let task = + ScanTributaryTask::::new(db.clone(), set_info.clone(), tributary.reader()); + assert_eq!(task.set.set, set_info.set); assert_eq!(task.validators.len(), 3); assert_eq!(task.total_weight, 7); - assert_eq!(task.validator_weights[&addr1], 1); - assert_eq!(task.validator_weights[&addr2], 2); - assert_eq!(task.validator_weights[&addr3], 4); - } - - // Preserves set info - { - let (_, addr) = random_serai_address_and_key(&mut OsRng); - let set_info = new_test_set_info(&[(addr, 1)]); - let expected_set = set_info.set; - let task = ScanTributaryTask::::new(db.clone(), set_info, tributary.reader()); - - assert_eq!(task.set.set, expected_set); + assert_eq!(task.validator_weights.len(), 3); + assert_eq!(task.validator_weights[&set_info.validators[0].0], 1); + assert_eq!(task.validator_weights[&set_info.validators[1].0], 2); + assert_eq!(task.validator_weights[&set_info.validators[2].0], 4); } } /// Wait until `block_after(parent)` returns `Some`, with a 30s timeout. async fn wait_for_block_after( - tributary: &Tributary, + tributary: &Tributary, parent: &[u8; 32], ) -> [u8; 32] { let reader = tributary.reader(); - let start = std::time::Instant::now(); + let start = Instant::now(); loop { if let Some(hash) = reader.block_after(parent) { return hash; } assert!( - start.elapsed() <= std::time::Duration::from_secs(30), + start.elapsed() <= Duration::from_secs(30), "timed out waiting for a block after {parent:?}" ); - tokio::time::sleep(std::time::Duration::from_millis(20)).await; + tokio::time::sleep(Duration::from_millis(20)).await; } } /// Write a fake block into the DB so the TributaryReader can find it. +/// /// Returns the block's hash. fn inject_block( mut txn: impl DbTxn, @@ -125,43 +133,40 @@ fn inject_block( #[tokio::test(flavor = "multi_thread")] async fn scan_tributary_task_run_iteration() { - let (_, addr) = random_serai_address_and_key(&mut OsRng); - let set_info = new_test_set_info(&[(addr, 1)]); - // No blocks committed yet: returns false { let db = MemDb::new(); - let (tributary, _, _) = make_tributary(db.clone()).await; + let (set_info, tributary) = make_tributary(db.clone(), &[1]).await; - let mut task = - ScanTributaryTask::::new(db, set_info.clone(), tributary.reader()); + let mut task = ScanTributaryTask::::new(db, set_info, tributary.reader()); TaskTest::task_runs_once_and_matches_progress(&mut task, false).await; } - let mut db = MemDb::new(); - let (tributary, _, genesis) = make_tributary(db.clone()).await; + { + let mut db = MemDb::new(); + let (set_info, tributary) = make_tributary(db.clone(), &[1]).await; + let genesis = set_info.tributary_genesis(); - // Wait for at least one real committed block - wait_for_block_after(&tributary, &genesis).await; + // Wait for at least one real committed block + wait_for_block_after(&tributary, &genesis).await; - // Create one task that persists across the remaining steps so each run_iteration - // continues from where the previous one left off. - let mut task = - ScanTributaryTask::::new(db.clone(), set_info.clone(), tributary.reader()); + // Create one task that persists across the remaining steps so each run_iteration + // continues from where the previous one left off. + let mut task = + ScanTributaryTask::::new(db.clone(), set_info.clone(), tributary.reader()); - // Processes committed block(s) and records progress - TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; + // Processes committed block(s) and records progress + TaskTest::task_runs_once_and_matches_progress(&mut task, true).await; - let (last_handled_block_number, last_handled_block_hash) = - TributaryDb::last_handled_tributary_block(&db, set_info.set).unwrap(); - assert!(last_handled_block_number >= 1, "expected at least block 1 to be handled"); + let (last_handled_block_number, last_handled_block_hash) = + TributaryDb::last_handled_tributary_block(&db, set_info.set).unwrap(); + assert!(last_handled_block_number >= 1, "expected at least block 1 to be handled"); - // Processes block with provided and signed txs - inject after the actual last handled block - { + // Processes block with provided and signed txs - inject after the actual last handled block let batch_tx = TributaryTransaction::Application(Transaction::Batch { hash: random_bytes(&mut OsRng) }); let fake_evidence = TributaryTransaction::Tendermint(TendermintTx::SlashEvidence( - Evidence::InvalidPrecommit(make_signed_message_bytes(addr.0)), + Evidence::InvalidPrecommit(make_signed_message_bytes(set_info.validators[0].0 .0)), )); let block_txs = vec![fake_evidence, batch_tx]; @@ -188,19 +193,20 @@ async fn scan_tributary_task_run_iteration() { // Errors when locally provided txs are missing { - let mut db2 = MemDb::new(); - let (tributary2, _, genesis2) = make_tributary(db2.clone()).await; + let mut db = MemDb::new(); + let (set_info, tributary) = make_tributary(db.clone(), &[1]).await; + let genesis = set_info.tributary_genesis(); let cosign_tx = Transaction::Cosign { substrate_block_hash: random_block_hash(&mut OsRng) }; - tributary2.provide_transaction(cosign_tx).await.unwrap(); + tributary.provide_transaction(cosign_tx).await.unwrap(); // Wait for a block that includes the provided transaction - let reader = tributary2.reader(); - let mut parent = genesis2; - let start = std::time::Instant::now(); + let reader = tributary.reader(); + let mut parent = genesis; + let start = Instant::now(); loop { assert!( - start.elapsed() <= std::time::Duration::from_secs(30), + start.elapsed() <= Duration::from_secs(30), "timed out waiting for a block with the provided tx" ); if let Some(hash) = reader.block_after(&parent) { @@ -214,19 +220,18 @@ async fn scan_tributary_task_run_iteration() { } parent = hash; } else { - tokio::time::sleep(std::time::Duration::from_millis(100)).await; + tokio::time::sleep(Duration::from_millis(100)).await; } } // Delete the locally_provided_quantity to trigger the error let local_qty_key = - MemDb::key(b"tributary_provided", b"local_quantity", [genesis2.as_ref(), b"Cosign"].concat()); - let mut txn = db2.txn(); + MemDb::key(b"tributary_provided", b"local_quantity", [genesis.as_ref(), b"Cosign"].concat()); + let mut txn = db.txn(); txn.del(local_qty_key); txn.commit(); - let set_info = new_test_set_info(&[(addr, 1)]); - let mut task = ScanTributaryTask::::new(db2, set_info, reader); + let mut task = ScanTributaryTask::::new(db, set_info, reader); TaskTest::task_runs_and_fails_with(&mut task, "didn't have the provided Transactions").await; } } From 747bf454ce908ec3f5bb243645fdad584981f98e Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 28 Apr 2026 12:18:37 -0400 Subject: [PATCH 69/71] Tweak some `scan_block` tests One has an unclear story. Others have a pattern of checking a single message (less than the threshold) causes no events, but doesn't check all messages less than the threshold cause no events. --- coordinator/tributary/src/tests/mod.rs | 1 + coordinator/tributary/src/tests/scan_block.rs | 70 +++++++++---------- 2 files changed, 35 insertions(+), 36 deletions(-) diff --git a/coordinator/tributary/src/tests/mod.rs b/coordinator/tributary/src/tests/mod.rs index 10d1ebb60..e5720ed1b 100644 --- a/coordinator/tributary/src/tests/mod.rs +++ b/coordinator/tributary/src/tests/mod.rs @@ -239,6 +239,7 @@ pub(crate) fn assert_block_side_effects( transactions: &[tributary_sdk::Transaction], ) { for tx in transactions { + // TODO: Expand from checking the message is `Some(_)` to the exact expected message match tx { tributary_sdk::Transaction::Application(app_tx) => match app_tx { Transaction::DkgParticipation { .. } => { diff --git a/coordinator/tributary/src/tests/scan_block.rs b/coordinator/tributary/src/tests/scan_block.rs index b615a7f47..67b3dbbab 100644 --- a/coordinator/tributary/src/tests/scan_block.rs +++ b/coordinator/tributary/src/tests/scan_block.rs @@ -401,25 +401,9 @@ mod handle_application_tx { let mut txn = db.txn(); let block_number = OsRng.next_u64(); - // Below threshold: no DkgConfirmationMessages sent { let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - - scan_block.handle_application_tx( - block_number, - Transaction::DkgConfirmationPreprocess { - attempt: 0, - preprocess: random_bytes(&mut OsRng), - signed: new_signed(key0), - }, - ); - } - assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_none()); - - // Threshold crossed: sends DkgConfirmationMessages (Preprocesses) - { - let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - for key in [key1, key2] { + for (i, key) in [key0, key1, key2].into_iter().enumerate() { scan_block.handle_application_tx( block_number, Transaction::DkgConfirmationPreprocess { @@ -428,8 +412,14 @@ mod handle_application_tx { signed: new_signed(key), }, ); + if i != 2 { + // Below threshold: no DkgConfirmationMessages sent + assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_none()); + } } } + // Threshold crossed: sends DkgConfirmationMessages (Preprocesses) + // TODO: Check the received message is the expected one assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_some()); } @@ -471,7 +461,7 @@ mod handle_application_tx { // All 3 validators submit preprocesses (threshold crossed -> DkgConfirmationMessages sent) { let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - for key in [key0, key1, key2] { + for (i, key) in [key0, key1, key2].into_iter().enumerate() { scan_block.handle_application_tx( block_number, Transaction::DkgConfirmationPreprocess { @@ -480,34 +470,21 @@ mod handle_application_tx { signed: new_signed(key), }, ); + if i != 2 { + assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_none()); + } } } + // TODO: Check the exact message received assert!( DkgConfirmationMessages::try_recv(&mut txn, set).is_some(), "preprocesses crossing threshold should produce DkgConfirmationMessages" ); - // Below threshold: no DkgConfirmationMessages sent - { - let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - scan_block.handle_application_tx( - block_number, - Transaction::DkgConfirmationShare { - attempt: 0, - share: random_bytes(&mut OsRng), - signed: new_signed(key0), - }, - ); - } - assert!( - DkgConfirmationMessages::try_recv(&mut txn, set).is_none(), - "single share should not produce DkgConfirmationMessages" - ); - // Threshold crossed: sends DkgConfirmationMessages (Shares) { let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, total_weight, &weights); - for key in [key1, key2] { + for (i, key) in [key0, key1, key2].into_iter().enumerate() { scan_block.handle_application_tx( block_number, Transaction::DkgConfirmationShare { @@ -516,8 +493,15 @@ mod handle_application_tx { signed: new_signed(key), }, ); + if i != 2 { + assert!( + DkgConfirmationMessages::try_recv(&mut txn, set).is_none(), + "less than threshold should not produce DkgConfirmationMessages" + ); + } } } + // TODO: Check the exact message received assert!( DkgConfirmationMessages::try_recv(&mut txn, set).is_some(), "shares crossing threshold should produce DkgConfirmationMessages" @@ -635,6 +619,20 @@ mod handle_application_tx { } // Does not finish active cosign when block doesn't match + /* + TODO: The story for this test is unclear. + + The intent is that if we are to cosign block #500, then block #501, we don't interrupt + cosigning block #500 to begin on block #501. Instead, we finish #500, by which point we may + be asked to cosign block #501, or maybe even #502. The intent is by finishing #500, we + inherently begin the latest block to cosign. + + This test asserts that if we're cosigning X, but then finish Y (which should be an + unreachable invariant, as we shouldn't start cosinging while already cosigning), that we + continue on X. Presumably, this is a byproduct of how if we finish #500 but have #501 + pending, we're intended to immediately rollover to #501, presented here as explicit + functionality to test for. This has to be straightened out. + */ { let mut db = MemDb::new(); let active_hash = random_block_hash(&mut OsRng); From 1d6270410cf126519e11afff449da4d541c8e2c2 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 28 Apr 2026 13:31:32 -0400 Subject: [PATCH 70/71] Note other incongruent test --- coordinator/tributary/src/tests/db.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/coordinator/tributary/src/tests/db.rs b/coordinator/tributary/src/tests/db.rs index 8a47e419a..0eacfabd8 100644 --- a/coordinator/tributary/src/tests/db.rs +++ b/coordinator/tributary/src/tests/db.rs @@ -981,6 +981,15 @@ mod tributary_db { /// After threshold without a reattempt topic, Accumulated entries are /// cleaned up. The duplicate call does not hit the assertion (key is gone) /// and instead falls through to the weight >= threshold NOP. + /* + TODO: This test is unclear. + + It should test an unreachable case (double accumulate), which is why that is allowed to + generally panic. This test shows the literal behavior where if the topic's data is pruned, + then those asserts for an unreachable case disappear, which is fine. Why are we testing + this behavior though? It should be unreachable and unobservable. This is more akin to a bug + report that sanity checks disappear than functionality we want to assert the behavior of. + */ #[test] fn double_call_after_threshold_without_reattempt_is_nop() { // RemoveParticipant has no reattempt, so entries are cleaned up post-threshold From c637c9b9c39143e8bc999c1db416fce4a1b3c586 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 28 Apr 2026 14:24:31 -0400 Subject: [PATCH 71/71] Nits Some of these fix behavior which was updated with prior commits, but for which all the tests/cases had yet to be updated. --- Cargo.lock | 1 - coordinator/Cargo.toml | 1 - coordinator/tributary/Cargo.toml | 4 +- coordinator/tributary/src/db.rs | 55 ++--- coordinator/tributary/src/lib.rs | 4 +- coordinator/tributary/src/tests/db.rs | 146 +++++------- coordinator/tributary/src/tests/mod.rs | 137 +++++------ coordinator/tributary/src/tests/scan_block.rs | 220 ++++++++++-------- .../tributary/src/tests/scan_tributary.rs | 2 +- .../tributary/src/tests/transaction.rs | 85 ++++--- coordinator/tributary/src/tests/tributary.rs | 47 ++-- coordinator/tributary/src/transaction.rs | 6 +- 12 files changed, 353 insertions(+), 355 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3569a0585..7d8b6c247 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7748,7 +7748,6 @@ name = "serai-coordinator" version = "0.1.0" dependencies = [ "bitvec", - "blake2 0.11.0-rc.6", "borsh", "ciphersuite 0.4.2", "dalek-ff-group", diff --git a/coordinator/Cargo.toml b/coordinator/Cargo.toml index a495df1fe..edc269735 100644 --- a/coordinator/Cargo.toml +++ b/coordinator/Cargo.toml @@ -21,7 +21,6 @@ zeroize = { version = "^1.5", default-features = false, features = ["std"] } bitvec = { version = "1", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false, features = ["std"] } -blake2 = { version = "0.11.0-rc.5", default-features = false, features = ["alloc"] } schnorrkel = { version = "0.11", default-features = false, features = ["std"] } dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"] } diff --git a/coordinator/tributary/Cargo.toml b/coordinator/tributary/Cargo.toml index 805e61381..ad97a3a09 100644 --- a/coordinator/tributary/Cargo.toml +++ b/coordinator/tributary/Cargo.toml @@ -43,12 +43,14 @@ messages = { package = "serai-processor-messages", path = "../../processor/messa serai-env = { path = "../../common/env", version = "0.1.0" } [dev-dependencies] -env_logger = { version = "0.10", default-features = false, features = ["humantime"] } rand = { version = "0.8", default-features = false, features = ["std"] } rand_chacha = { version = "0.3", default-features = false, features = ["std"] } + tendermint = { package = "tendermint-machine", path = "../tributary-sdk/tendermint" } tributary-sdk = { path = "../tributary-sdk", features = ["tests"] } + tokio = { version = "1", default-features = false, features = ["rt", "time", "macros", "rt-multi-thread"] } + serai-task = { path = "../../common/task", features = ["test-helpers"] } serai-substrate-tests = { path = "../../tests/substrate" } diff --git a/coordinator/tributary/src/db.rs b/coordinator/tributary/src/db.rs index f2078201c..fa9a203d3 100644 --- a/coordinator/tributary/src/db.rs +++ b/coordinator/tributary/src/db.rs @@ -2,13 +2,15 @@ use std::collections::HashMap; use borsh::{BorshSerialize, BorshDeserialize}; -use serai_primitives::{BlockHash, validator_sets::ExternalValidatorSet, address::SeraiAddress}; - -use messages::sign::{VariantSignId, SignId}; +use serai_primitives::{ + BlockHash, + validator_sets::{KeyShares, ExternalValidatorSet}, + address::SeraiAddress, +}; use serai_db::*; - use serai_cosign_types::CosignIntent; +use messages::sign::{VariantSignId, SignId}; use crate::transaction::SigningProtocolRound; @@ -45,7 +47,7 @@ pub enum Topic { }, } -#[derive(Debug, PartialEq)] +#[derive(PartialEq, Eq, Debug)] pub(crate) enum Participating { Participated, Everyone, @@ -353,8 +355,10 @@ impl TributaryDb { ); } pub(crate) fn finish_cosigning(txn: &mut impl DbTxn, set: ExternalValidatorSet) { - ActivelyCosigning::take(txn, set) - .expect("tried to finish cosigning but wasn't actively cosigning"); + assert!( + ActivelyCosigning::take(txn, set).is_some(), + "tried to finish cosigning but wasn't actively cosigning" + ); } pub(crate) fn mark_cosigned( txn: &mut impl DbTxn, @@ -371,13 +375,6 @@ impl TributaryDb { Cosigned::get(txn, set, substrate_block_hash).is_some() } - /// The next topic requiring recognition which has been recognized by this Tributary. - pub fn try_recv_topic_requiring_recognition( - txn: &mut impl DbTxn, - set: ExternalValidatorSet, - ) -> Option { - RecognizedTopics::try_recv(txn, set) - } pub(crate) fn recognize_topic(txn: &mut impl DbTxn, set: ExternalValidatorSet, topic: Topic) { AccumulatedWeight::set(txn, set, topic, &0); RecognizedTopics::send(txn, set, &topic); @@ -385,6 +382,13 @@ impl TributaryDb { pub(crate) fn recognized(getter: &impl Get, set: ExternalValidatorSet, topic: Topic) -> bool { AccumulatedWeight::get(getter, set, topic).is_some() } + /// The next topic which required recognition which has now been recognized by this Tributary. + pub(crate) fn try_recv_topic_requiring_recognition( + txn: &mut impl DbTxn, + set: ExternalValidatorSet, + ) -> Option { + RecognizedTopics::try_recv(txn, set) + } pub(crate) fn start_of_block(txn: &mut impl DbTxn, set: ExternalValidatorSet, block_number: u64) { for topic in Reattempt::take(txn, set, block_number).unwrap_or(vec![]) { @@ -448,8 +452,7 @@ impl TributaryDb { // nonces on transactions (deterministically to the topic) assert!( txn.get(Accumulated::::key(set, topic, validator)).is_none(), - "accumulate called twice for the same (validator, topic) tuple: \ - the nonce system should have prevented this" + "accumulate called twice for the same (validator, topic) tuple", ); let accumulated_weight = AccumulatedWeight::get(txn, set, topic); @@ -497,9 +500,11 @@ impl TributaryDb { } // Accumulate the data - accumulated_weight = accumulated_weight.checked_add(validator_weight).unwrap_or_else(|| { - panic!("accumulated {accumulated_weight} overflowed adding validator's {validator_weight}") - }); + const { + // If this is true, the following addition won't trip unless we're accumulating past the max + assert!(KeyShares::MAX_PER_SET < u16::MAX); + } + accumulated_weight += validator_weight; AccumulatedWeight::set(txn, set, topic, &accumulated_weight); Accumulated::set(txn, set, topic, validator, data); @@ -508,14 +513,10 @@ impl TributaryDb { // Queue this for re-attempt after enough time passes let reattempt_topic = topic.reattempt_topic(); if let Some((attempt, reattempt_topic)) = reattempt_topic { - // Linearly scale the time for the protocol with the attempt number - let blocks_till_reattempt = u64::from(attempt) * u64::from(BASE_REATTEMPT_DELAY); - - let recognize_at = block_number.checked_add(blocks_till_reattempt).unwrap_or_else(|| { - panic!( - "recognize_at overflowed: block_number {block_number} + delay {blocks_till_reattempt}", - ); - }); + // Linearly scale the time for the protocol with the attempt number, up to 10x + let blocks_till_reattempt = attempt.min(10).saturating_mul(u64::from(BASE_REATTEMPT_DELAY)); + + let recognize_at = block_number + blocks_till_reattempt; let mut queued = Reattempt::get(txn, set, recognize_at).unwrap_or(Vec::with_capacity(1)); queued.push(reattempt_topic); Reattempt::set(txn, set, recognize_at, &queued); diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index 01ed7b22c..436d893a2 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -195,7 +195,7 @@ impl ScanBlock<'_, TD, TDT, P> { ) -> Option<(SignId, HashMap>)> { assert!( matches!(topic, Topic::DkgConfirmation { .. }), - "accumulate_dkg_confirmation called with non-DkgConfirmation topic: {topic:?}" + "`accumulate_dkg_confirmation` called with non-`DkgConfirmation` topic: {topic:?}" ); match TributaryDb::accumulate::( self.tributary_txn, @@ -712,7 +712,7 @@ impl ContinuallyRan for ScanTributaryTask { } } -/// Create the Transaction::SlashReport to publish per the local view. +/// Create the `Transaction::SlashReport` to publish per the local view. pub fn slash_report_transaction(getter: &impl Get, set: &NewSetInformation) -> Transaction { let mut slash_points = Vec::with_capacity(set.validators.len()); for (validator, _weight) in set.validators.iter().copied() { diff --git a/coordinator/tributary/src/tests/db.rs b/coordinator/tributary/src/tests/db.rs index 0eacfabd8..98a28e629 100644 --- a/coordinator/tributary/src/tests/db.rs +++ b/coordinator/tributary/src/tests/db.rs @@ -1,22 +1,22 @@ use rand::{Rng as _, RngCore as _, rngs::OsRng}; -use messages::sign::{SignId, VariantSignId}; -use serai_db::{Db as _, DbTxn, MemDb}; use serai_primitives::{ address::SeraiAddress, - validator_sets::ExternalValidatorSet, + validator_sets::{ExternalValidatorSet, KeyShares}, test_helpers::{ random_bytes, random_block_hash, random_serai_address, random_validator_set, random_vec_u8, }, }; +use messages::sign::{SignId, VariantSignId}; +use serai_db::{Db as _, DbTxn, MemDb}; use crate::{ + transaction::SigningProtocolRound, db::{*, ProcessorMessages, DkgConfirmationMessages}, tests::*, - transaction::{RoundPayloads, Preprocess, Share, SigningProtocolRound}, }; -/// One of each topic kind, and attempts: at 0, a random attempt, and u64::MAX. +/// One of each topic kind, and attempts: at 0 and a random attempt. fn all_topics_and_attempts() -> Vec { let random_attempt = OsRng.gen_range(1u64 .. u64::MAX); vec![ @@ -25,11 +25,9 @@ fn all_topics_and_attempts() -> Vec { // DkgConfirmation Preprocess Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Preprocess }, Topic::DkgConfirmation { attempt: random_attempt, round: SigningProtocolRound::Preprocess }, - Topic::DkgConfirmation { attempt: u64::MAX, round: SigningProtocolRound::Preprocess }, // DkgConfirmation Share Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Share }, Topic::DkgConfirmation { attempt: random_attempt, round: SigningProtocolRound::Share }, - Topic::DkgConfirmation { attempt: u64::MAX, round: SigningProtocolRound::Share }, // SlashReport Topic::SlashReport, // Sign Preprocess @@ -43,11 +41,6 @@ fn all_topics_and_attempts() -> Vec { attempt: random_attempt, round: SigningProtocolRound::Preprocess, }, - Topic::Sign { - id: random_variant_sign_id(), - attempt: u64::MAX, - round: SigningProtocolRound::Preprocess, - }, // Sign Share Topic::Sign { id: random_variant_sign_id(), attempt: 0, round: SigningProtocolRound::Share }, Topic::Sign { @@ -55,15 +48,10 @@ fn all_topics_and_attempts() -> Vec { attempt: random_attempt, round: SigningProtocolRound::Share, }, - Topic::Sign { - id: random_variant_sign_id(), - attempt: u64::MAX, - round: SigningProtocolRound::Share, - }, ] } -/// Share-round topics only, with attempts: at 0, random, and u64::MAX. +/// Share-round topics only, with attempts: at 0 and random. fn all_share_topics_and_attempts() -> Vec { all_topics_and_attempts() .into_iter() @@ -77,7 +65,7 @@ fn all_share_topics_and_attempts() -> Vec { .collect() } -/// Preprocess-round topics only, with attempts: at 0, random, and u64::MAX. +/// Preprocess-round topics only, with attempts: at 0 and random. fn all_preprocess_topics_and_attempts() -> Vec { all_topics_and_attempts() .into_iter() @@ -123,7 +111,7 @@ where 1, &data, ); - if let Some(ref mut f) = on_each { + if let Some(f) = &mut on_each { f(i, &result); } } @@ -180,21 +168,23 @@ mod topic { Topic::DkgConfirmation { attempt, round } => match round { SigningProtocolRound::Preprocess => assert_eq!( topic.reattempt_topic(), - attempt.checked_add(1).map(|next| { - ( - next, - Topic::DkgConfirmation { attempt: next, round: SigningProtocolRound::Preprocess }, - ) - }) + Some(( + attempt + 1, + Topic::DkgConfirmation { + attempt: attempt + 1, + round: SigningProtocolRound::Preprocess + }, + )) ), SigningProtocolRound::Share => assert_eq!(topic.reattempt_topic(), None), }, Topic::Sign { id, attempt, round } => match round { SigningProtocolRound::Preprocess => assert_eq!( topic.reattempt_topic(), - attempt.checked_add(1).map(|next| { - (next, Topic::Sign { id, attempt: next, round: SigningProtocolRound::Preprocess }) - }) + Some(( + attempt + 1, + Topic::Sign { id, attempt: attempt + 1, round: SigningProtocolRound::Preprocess } + )) ), SigningProtocolRound::Share => assert_eq!(topic.reattempt_topic(), None), }, @@ -338,14 +328,13 @@ mod tributary_db { let block_hash1 = random_block_hash(&mut OsRng); let block_number1 = OsRng.next_u64(); - let expected_topic = - expected_initially_recognized_sign_topic(VariantSignId::Cosign(block_number1)); + let expected_topic = initial_sign_topic(VariantSignId::Cosign(block_number1)); // Recognizes topic { let mut txn = db.txn(); TributaryDb::start_cosigning(&mut txn, set, block_hash1, block_number1); - assert_cosigning_invariants(&mut txn, set, block_hash1, block_number1); + assert_start_cosigning_invariants(&mut txn, set, block_hash1, block_number1); txn.commit(); } @@ -364,8 +353,6 @@ mod tributary_db { // Previous topic still recognized assert!(TributaryDb::recognized(&txn, set, expected_topic)); - - txn.commit(); } // Finish cosigning @@ -396,7 +383,7 @@ mod tributary_db { assert!(TributaryDb::recognized( &txn, set, - expected_initially_recognized_sign_topic(VariantSignId::Cosign(block_number2)) + initial_sign_topic(VariantSignId::Cosign(block_number2)) )); // Previous topic also remains recognized assert!(TributaryDb::recognized(&txn, set, expected_topic)); @@ -407,7 +394,7 @@ mod tributary_db { #[test] fn start_of_block() { - let _ = env_logger::try_init(); + serai_env::init_logger(); let set = random_validator_set(&mut OsRng); let reattemptable_topics: Vec = all_topics_and_attempts() @@ -886,6 +873,7 @@ mod tributary_db { txn.commit(); } + assert!(TributaryDb::recognized(&db, set, succeeding)); assert_eq!( AccumulatedWeight::get(&db, set, succeeding), Some(0), @@ -945,6 +933,7 @@ mod tributary_db { fn double_call_after_threshold_with_reattempt_panics() { // DkgConfirmation Preprocess has a reattempt topic, so entries survive post-threshold let topic = Topic::DkgConfirmation { attempt: 0, round: SigningProtocolRound::Preprocess }; + assert!(topic.reattempt_topic().is_some()); let (set, validator, validators, total_weight, validator_weight) = default_accumulate_setup(); let mut db = MemDb::new(); @@ -1079,7 +1068,7 @@ mod tributary_db { let weight_before = pre_weight.unwrap_or(0); // Slash for participating without completing the preceding topic. - if topic.preceding_topic().is_some() && !has_preceding_accumulated { + if topic.preceding_topic().is_some() && (!has_preceding_accumulated) { assert!(post_slashed, "should be fatally slashed for missing preceding participation"); assert!(matches!(result, DataSet::None)); assert_eq!(post_weight, pre_weight, "weight unchanged after preceding slash"); @@ -1122,6 +1111,7 @@ mod tributary_db { // Reattempt should be queued if topic is reattemptable. if let Some((reattempt_attempt, reattempt_topic)) = topic.reattempt_topic() { let blocks_till = reattempt_attempt + .min(10) .checked_mul(u64::from(BASE_REATTEMPT_DELAY)) .expect("reattempt delay overflowed u64"); let recognize_at = @@ -1218,11 +1208,11 @@ mod tributary_db { fn fuzz_accumulate() { for _ in 0 .. 1000 { let has_initial_weight = OsRng.gen::(); - let initial_weight = OsRng.gen_range(0u16 .. u16::MAX); - let total_weight = OsRng.gen_range(1u16 .. u16::MAX); + let initial_weight = OsRng.gen_range(0u16 .. KeyShares::MAX_PER_SET); + let total_weight = OsRng.gen_range(1u16 .. KeyShares::MAX_PER_SET); let has_next_topic_weight = OsRng.gen::(); - let next_topic_initial_weight = OsRng.gen_range(0u16 .. u16::MAX); + let next_topic_initial_weight = OsRng.gen_range(0u16 .. KeyShares::MAX_PER_SET); let has_preceding_topic_accumulated = OsRng.gen::(); @@ -1235,12 +1225,12 @@ mod tributary_db { }; let cosign_block = OsRng.next_u64(); let batch_id: [u8; 32] = OsRng.gen(); - let validator_weight = OsRng.gen_range(1u16 .. u16::MAX); + let validator_weight = OsRng.gen_range(1u16 .. KeyShares::MAX_PER_SET); let block_number = OsRng.gen_range(1u64 .. u64::MAX); let data: Vec = (0 .. OsRng.gen_range(0usize .. 64)).map(|_| OsRng.gen()).collect(); - let num_validators = OsRng.gen_range(1u16 .. u16::MAX); - let cur_validator = OsRng.gen_range(0u16 .. u16::MAX); + let num_validators = OsRng.gen_range(1u16 .. u16::from(u8::MAX)); + let cur_validator = OsRng.gen_range(0u16 .. u16::from(u8::MAX)); let validator_in_list = OsRng.gen::(); let topic = match topic_variant % 5 { @@ -1293,50 +1283,36 @@ mod tributary_db { let pre_weight = AccumulatedWeight::get(&txn, set, topic); let pre_slashed = TributaryDb::is_fatally_slashed(&txn, set, validator); - let catch_result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { - let result = TributaryDb::accumulate::>( - &mut txn, - set, - &validators, - total_weight, - block_number, - topic, - validator, - validator_weight, - &data, - ); + let result = TributaryDb::accumulate::>( + &mut txn, + set, + &validators, + total_weight, + block_number, + topic, + validator, + validator_weight, + &data, + ); - txn.commit(); + txn.commit(); - verify_accumulate_invariants( - &db_clone, - set, - total_weight, - block_number, - topic, - validator, - validator_weight, - &data, - pre_weight, - pre_slashed, - has_preceding_topic_accumulated, - has_next_topic_weight, - validator_in_list, - &result, - ); - })); - - if let Err(panic) = catch_result { - let msg = panic - .downcast_ref::() - .map(String::as_str) - .or_else(|| panic.downcast_ref::<&str>().copied()) - .unwrap_or(""); - if msg.contains("overflowed") { - continue; - } - std::panic::resume_unwind(panic); - } + verify_accumulate_invariants( + &db_clone, + set, + total_weight, + block_number, + topic, + validator, + validator_weight, + &data, + pre_weight, + pre_slashed, + has_preceding_topic_accumulated, + has_next_topic_weight, + validator_in_list, + &result, + ); } } } diff --git a/coordinator/tributary/src/tests/mod.rs b/coordinator/tributary/src/tests/mod.rs index e5720ed1b..7e742eb68 100644 --- a/coordinator/tributary/src/tests/mod.rs +++ b/coordinator/tributary/src/tests/mod.rs @@ -1,71 +1,66 @@ +use core::future::Future; use std::collections::HashMap; -use ciphersuite::group::GroupEncoding as _; -use ciphersuite::WrappedGroup; -use dalek_ff_group::{Ristretto, RistrettoPoint}; -use messages::sign::VariantSignId; +use zeroize::Zeroizing; +use rand::{RngCore, CryptoRng, Rng, rngs::OsRng}; -use rand::{CryptoRng, Rng, RngCore}; -use rand_core::OsRng; +use ciphersuite::{group::GroupEncoding as _, WrappedGroup}; +use dalek_ff_group::Ristretto; use serai_primitives::{ address::SeraiAddress, + validator_sets::ExternalValidatorSet, test_helpers::{ random_bytes, random_block_hash, random_serai_address, random_vec_u8, random_validator_set, }, }; -use tributary_sdk::{P2p, tendermint::TendermintBlock}; use tendermint::{ SignedMessage, Message, Data, ext::{BlockNumber, RoundNumber}, }; -use zeroize::Zeroizing; -use dkg::Participant; +use tributary_sdk::{P2p, tendermint::TendermintBlock}; + +use messages::sign::VariantSignId; use serai_coordinator_substrate::NewSetInformation; use crate::*; -pub mod transaction; -pub mod db; -pub mod scan_block; -pub mod scan_tributary; -pub mod tributary; +mod transaction; +mod db; +mod scan_block; +mod scan_tributary; +mod tributary; +/// A P2P implementation which is a NOP and does nothing. #[derive(Clone)] -struct MockP2p; -impl P2p for MockP2p { - fn broadcast(&self, _: [u8; 32], _: Vec) -> impl Send + core::future::Future { +struct NopP2p; +impl P2p for NopP2p { + fn broadcast(&self, _: [u8; 32], _: Vec) -> impl Send + Future { async {} } } -pub(crate) fn random_key( - rng: &mut R, -) -> Zeroizing<::F> { +fn random_key(rng: &mut R) -> Zeroizing<::F> { Zeroizing::new(::F::random(&mut *rng)) } -pub(crate) fn get_key_point(key: &Zeroizing<::F>) -> RistrettoPoint { - Ristretto::generator() * **key -} - -pub(crate) fn random_serai_address_and_key( +fn random_serai_address_and_key( rng: &mut R, -) -> (RistrettoPoint, SeraiAddress) { - let key = get_key_point(&random_key(rng)); +) -> (::G, SeraiAddress) { + let key = Ristretto::generator() * *random_key(rng); (key, SeraiAddress(key.to_bytes())) } -pub(crate) fn random_signed(rng: &mut R) -> Signed { +fn random_signed(rng: &mut R) -> Signed { let signed = tributary_sdk::tests::random_signed(&mut *rng); Signed { signer: signed.signer, signature: signed.signature } } -/// One of each signed transaction kind, and attempts: at 0, a random attempt, and u64::MAX. -pub(crate) fn all_signed_transactions_and_attempts(signed: &Signed) -> Vec { - let random_attempt = OsRng.gen_range(1u64 .. u64::MAX); - let signed = *signed; +/// One of each signed transaction kind, and attempts: at 0 and a random attempt. +#[expect(clippy::large_types_passed_by_value)] +fn all_signed_transactions_and_attempts(signed: Signed) -> Vec { + let random_attempt = OsRng.next_u64().saturating_add(1); vec![ // RemoveParticipant Transaction::RemoveParticipant { participant: random_serai_address(&mut OsRng), signed }, @@ -82,11 +77,6 @@ pub(crate) fn all_signed_transactions_and_attempts(signed: &Signed) -> Vec Vec Vec Vec Vec { +fn all_provided_transactions() -> Vec { vec![ Transaction::Cosign { substrate_block_hash: random_block_hash(&mut OsRng) }, Transaction::Cosigned { substrate_block_hash: random_block_hash(&mut OsRng) }, @@ -159,48 +130,45 @@ pub(crate) fn all_provided_transactions() -> Vec { } /// One of each of all transaction kinds. -pub(crate) fn all_transactions() -> Vec { - let mut txs = all_signed_transactions_and_attempts(&random_signed(&mut OsRng)); +fn all_transactions() -> Vec { + let mut txs = all_signed_transactions_and_attempts(random_signed(&mut OsRng)); txs.extend(all_provided_transactions()); txs } /// Assert that no messages remain in either the processor or DKG confirmation queues. -pub(crate) fn assert_no_pending_messages( - txn: &mut impl serai_db::DbTxn, - set: serai_primitives::validator_sets::ExternalValidatorSet, -) { +fn assert_no_pending_messages(txn: &mut impl serai_db::DbTxn, set: ExternalValidatorSet) { assert!( crate::ProcessorMessages::try_recv(txn, set).is_none(), - "unexpected remaining ProcessorMessage", + "unexpected remaining `ProcessorMessages`", ); assert!( crate::DkgConfirmationMessages::try_recv(txn, set).is_none(), - "unexpected remaining DkgConfirmationMessage", + "unexpected remaining `DkgConfirmationMessages`", ); } -pub(crate) fn random_transaction_id() -> VariantSignId { - VariantSignId::Transaction(random_bytes_32(&mut OsRng)) +fn random_variant_sign_id() -> VariantSignId { + // TODO: Randomly select a variant + VariantSignId::Transaction(random_bytes(&mut OsRng)) } -/// The expected topic to be recognized after start_cosigning runs. -pub(crate) fn expected_initially_recognized_sign_topic(id: VariantSignId) -> Topic { +/// The topic for a sign protocol for a just-recognized ID. +fn initial_sign_topic(id: VariantSignId) -> Topic { Topic::Sign { id, attempt: 0, round: SigningProtocolRound::Preprocess } } /// Assert the DB invariants established by `TributaryDb::start_cosigning`: /// - `ActivelyCosigning` is set to the given block hash. -/// - The cosign topic is recognized (AccumulatedWeight initialized). -/// - The cosign topic was queued for recognition (RecognizedTopics). -pub(crate) fn assert_cosigning_invariants( +/// - The cosign topic is recognized (`AccumulatedWeight` initialized). +/// - The cosign topic was queued for recognition (`RecognizedTopics`). +fn assert_start_cosigning_invariants( txn: &mut impl serai_db::DbTxn, - set: serai_primitives::validator_sets::ExternalValidatorSet, + set: ExternalValidatorSet, block_hash: serai_primitives::BlockHash, block_number: u64, ) { - let expected_topic = - expected_initially_recognized_sign_topic(VariantSignId::Cosign(block_number)); + let expected_topic = initial_sign_topic(VariantSignId::Cosign(block_number)); assert_eq!( ActivelyCosigning::get(txn, set), @@ -218,8 +186,8 @@ pub(crate) fn assert_cosigning_invariants( ); } -/// Construct a borsh-encoded `SignedMessage` for `TendermintNetwork`. -pub(crate) fn make_signed_message_bytes(sender: [u8; 32]) -> Vec { +/// Construct a `borsh`-encoded `SignedMessage` for our `TendermintNetwork`. +fn make_signed_message_bytes(sender: [u8; 32]) -> Vec { let msg = Message::<[u8; 32], TendermintBlock, [u8; 64]> { sender, block: BlockNumber(0), @@ -233,9 +201,9 @@ pub(crate) fn make_signed_message_bytes(sender: [u8; 32]) -> Vec { /// /// Some transactions produce messages on first submission (DkgParticipation, Cosign, SlashReport). /// This function drains those expected messages before calling `assert_no_pending_messages`. -pub(crate) fn assert_block_side_effects( +fn assert_block_side_effects( txn: &mut impl serai_db::DbTxn, - set: serai_primitives::validator_sets::ExternalValidatorSet, + set: ExternalValidatorSet, transactions: &[tributary_sdk::Transaction], ) { for tx in transactions { @@ -260,6 +228,7 @@ pub(crate) fn assert_block_side_effects( "SlashReport topic should be recognized", ); } + // TODO: Some of these will cause effects, but only conditionally Transaction::RemoveParticipant { .. } | Transaction::DkgConfirmationPreprocess { .. } | Transaction::DkgConfirmationShare { .. } | @@ -294,8 +263,8 @@ fn new_test_set_info(validators: &[(SeraiAddress, u16)]) -> NewSetInformation { ) } -pub(crate) type ValidatorSetup = ( - Vec<(RistrettoPoint, SeraiAddress)>, +type Setup = ( + Vec<(::G, SeraiAddress)>, Vec<(SeraiAddress, u16)>, Vec, HashMap, @@ -303,8 +272,8 @@ pub(crate) type ValidatorSetup = ( ); /// Generate `n` random validators (weight 1 each) with keys, returning all derived collections. -pub(crate) fn setup_n_validators_with_keys(n: u16) -> ValidatorSetup { - let keys_addrs: Vec<(RistrettoPoint, SeraiAddress)> = +fn setup_n_validators_with_keys(n: u16) -> Setup { + let keys_addrs: Vec<(::G, SeraiAddress)> = (0 .. n).map(|_| random_serai_address_and_key(&mut OsRng)).collect(); let validator_data: Vec<(SeraiAddress, u16)> = keys_addrs.iter().map(|(_, addr)| (*addr, 1u16)).collect(); @@ -316,6 +285,6 @@ pub(crate) fn setup_n_validators_with_keys(n: u16) -> ValidatorSetup { } /// Common test setup with 3 random validators each with weight 1, total_weight = 3. -pub(crate) fn setup_test_validators_and_weights_with_keys() -> ValidatorSetup { +fn setup_test_validators_and_weights_with_keys() -> Setup { setup_n_validators_with_keys(3) } diff --git a/coordinator/tributary/src/tests/scan_block.rs b/coordinator/tributary/src/tests/scan_block.rs index 67b3dbbab..dc6575745 100644 --- a/coordinator/tributary/src/tests/scan_block.rs +++ b/coordinator/tributary/src/tests/scan_block.rs @@ -3,14 +3,14 @@ use core::marker::PhantomData; use schnorr::SchnorrSignature; use serai_primitives::test_helpers::{random_block_hash, random_vec_u8}; -use serai_cosign_types::CosignIntent; use serai_db::{Db as _, DbTxn, MemDb}; use tributary_sdk::{ - Block, BlockHeader, Transaction as TributaryTransaction, Evidence, tendermint::tx::TendermintTx, + tendermint::tx::TendermintTx, Evidence, Transaction as TributaryTransaction, BlockHeader, Block, }; -use crate::{*, db::CosignIntents as DbCosignIntents}; +use serai_cosign_types::CosignIntent; +use crate::{db::CosignIntents as DbCosignIntents, *}; use super::*; fn new_scan_block<'a, TDT: DbTxn>( @@ -19,7 +19,7 @@ fn new_scan_block<'a, TDT: DbTxn>( validators: &'a [SeraiAddress], total_weight: u16, validator_weights: &'a HashMap, -) -> ScanBlock<'a, MemDb, TDT, MockP2p> { +) -> ScanBlock<'a, MemDb, TDT, NopP2p> { ScanBlock { _td: PhantomData, _p2p: PhantomData, @@ -32,7 +32,7 @@ fn new_scan_block<'a, TDT: DbTxn>( } /// Create a Signed with the given signer key and a random signature. -fn new_signed(signer: RistrettoPoint) -> Signed { +fn random_signed_for_key(signer: ::G) -> Signed { Signed { signer, signature: SchnorrSignature { @@ -126,7 +126,7 @@ fn potentially_start_cosign() { scan_block.potentially_start_cosign(); } - assert_cosigning_invariants(&mut txn, set, block_hash, intent.block_number); + assert_start_cosigning_invariants(&mut txn, set, block_hash, intent.block_number); assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); } @@ -233,6 +233,7 @@ fn accumulate_dkg_confirmation() { // Past threshold: further accumulations from a new validator are nops { // Add a 4th validator so we have a fresh signer after threshold is crossed. + // TODO: The set should have 4 validators from the start do we don't have a conflict here let v4 = random_serai_address(&mut OsRng); let mut validator_data_4 = validator_data.clone(); validator_data_4.push((v4, 1)); @@ -259,10 +260,10 @@ mod handle_application_tx { #[test] fn dont_handle_signed_kind_from_fatally_slashed() { - let set = random_validator_set(&mut OsRng); let (_, validator_data, validators, weights, total_weight) = setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); + let set = set_info.set; let default_signer = SeraiAddress(Signed::default().signer().to_bytes()); let mut db = MemDb::new(); @@ -270,10 +271,11 @@ mod handle_application_tx { { let mut txn = db.txn(); TributaryDb::fatal_slash(&mut txn, set, default_signer, "test reason"); + assert!(TributaryDb::is_fatally_slashed(&txn, set, default_signer)); txn.commit(); } - for tx in all_signed_transactions_and_attempts(&Signed::default()) { + for tx in all_signed_transactions_and_attempts(Signed::default()) { let mut txn = db.txn(); { @@ -291,10 +293,10 @@ mod handle_application_tx { #[test] fn remove_participant() { - let set = random_validator_set(&mut OsRng); let (_, validator_data, validators, weights, total_weight) = setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); + let set = set_info.set; let default_signer = SeraiAddress(Signed::default().signer().to_bytes()); // The signer is fatally slashed if the participant voted to be removed is nonexistent @@ -317,6 +319,7 @@ mod handle_application_tx { { let (keys_addrs, validator_data, validators, weights, _) = setup_n_validators_with_keys(3); let set_info = new_test_set_info(&validator_data); + let set = set_info.set; let (key0, addr0) = keys_addrs[0]; let (key1, _) = keys_addrs[1]; let (key2, _) = keys_addrs[2]; @@ -332,7 +335,10 @@ mod handle_application_tx { let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, 3, &weights); scan_block.handle_application_tx( block_number, - Transaction::RemoveParticipant { participant: target, signed: new_signed(key0) }, + Transaction::RemoveParticipant { + participant: target, + signed: random_signed_for_key(key0), + }, ); } assert!( @@ -349,11 +355,21 @@ mod handle_application_tx { let mut scan_block = new_scan_block(&mut txn, &set_info, &validators, 3, &weights); scan_block.handle_application_tx( block_number, - Transaction::RemoveParticipant { participant: target, signed: new_signed(key1) }, + Transaction::RemoveParticipant { + participant: target, + signed: random_signed_for_key(key1), + }, + ); + assert!( + !TributaryDb::is_fatally_slashed(scan_block.tributary_txn, set, target), + "target should not be fatally slashed after two votes" ); scan_block.handle_application_tx( block_number, - Transaction::RemoveParticipant { participant: target, signed: new_signed(key2) }, + Transaction::RemoveParticipant { + participant: target, + signed: random_signed_for_key(key2), + }, ); } assert!( @@ -367,10 +383,10 @@ mod handle_application_tx { fn dkg_participation() { let mut db = MemDb::new(); - let set = random_validator_set(&mut OsRng); let (keys_addrs, validator_data, validators, weights, total_weight) = setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); + let set = set_info.set; let (signer_key, _) = keys_addrs[0]; let mut txn = db.txn(); @@ -381,20 +397,21 @@ mod handle_application_tx { OsRng.next_u64(), Transaction::DkgParticipation { participation: vec![1, 2, 3], - signed: new_signed(signer_key), + signed: random_signed_for_key(signer_key), }, ); } + // TODO: Check the received message is the expected one assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); } #[test] fn dkg_confirmation_preprocess() { - let set = random_validator_set(&mut OsRng); let (keys_addrs, validator_data, validators, weights, total_weight) = setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); + let set = set_info.set; let (key0, key1, key2) = (keys_addrs[0].0, keys_addrs[1].0, keys_addrs[2].0); let mut db = MemDb::new(); @@ -409,12 +426,12 @@ mod handle_application_tx { Transaction::DkgConfirmationPreprocess { attempt: 0, preprocess: random_bytes(&mut OsRng), - signed: new_signed(key), + signed: random_signed_for_key(key), }, ); if i != 2 { // Below threshold: no DkgConfirmationMessages sent - assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_none()); + assert!(DkgConfirmationMessages::try_recv(scan_block.tributary_txn, set).is_none()); } } } @@ -425,10 +442,10 @@ mod handle_application_tx { #[test] fn dkg_confirmation_share() { - let set = random_validator_set(&mut OsRng); let (keys_addrs, validator_data, validators, weights, total_weight) = setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); + let set = set_info.set; let (key0, addr0) = keys_addrs[0]; let (key1, key2) = (keys_addrs[1].0, keys_addrs[2].0); @@ -443,7 +460,7 @@ mod handle_application_tx { Transaction::DkgConfirmationShare { attempt: 0, share: random_bytes(&mut OsRng), - signed: new_signed(key0), + signed: random_signed_for_key(key0), }, ); @@ -467,11 +484,11 @@ mod handle_application_tx { Transaction::DkgConfirmationPreprocess { attempt: 0, preprocess: random_bytes(&mut OsRng), - signed: new_signed(key), + signed: random_signed_for_key(key), }, ); if i != 2 { - assert!(DkgConfirmationMessages::try_recv(&mut txn, set).is_none()); + assert!(DkgConfirmationMessages::try_recv(scan_block.tributary_txn, set).is_none()); } } } @@ -490,14 +507,14 @@ mod handle_application_tx { Transaction::DkgConfirmationShare { attempt: 0, share: random_bytes(&mut OsRng), - signed: new_signed(key), + signed: random_signed_for_key(key), }, ); if i != 2 { - assert!( - DkgConfirmationMessages::try_recv(&mut txn, set).is_none(), - "less than threshold should not produce DkgConfirmationMessages" - ); + assert!( + DkgConfirmationMessages::try_recv(scan_block.tributary_txn, set).is_none(), + "less than threshold should not produce DkgConfirmationMessages" + ); } } } @@ -510,10 +527,10 @@ mod handle_application_tx { #[test] fn cosign() { - let set = random_validator_set(&mut OsRng); let (_, validator_data, validators, weights, total_weight) = setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); + let set = set_info.set; let block_hash = random_block_hash(&mut OsRng); let global_session = random_bytes(&mut OsRng); @@ -570,10 +587,10 @@ mod handle_application_tx { #[test] fn cosigned() { - let set = random_validator_set(&mut OsRng); let (_, validator_data, validators, weights, total_weight) = setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); + let set = set_info.set; // Marks block as cosigned { @@ -660,10 +677,10 @@ mod handle_application_tx { #[test] fn substrate_block() { - let set = random_validator_set(&mut OsRng); let (_, validator_data, validators, weights, total_weight) = setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); + let set = set_info.set; let mut db = MemDb::new(); let block_hash = random_block_hash(&mut OsRng); @@ -683,17 +700,17 @@ mod handle_application_tx { } for plan in &plans { - let topic = expected_initially_recognized_sign_topic(VariantSignId::Transaction(*plan)); + let topic = initial_sign_topic(VariantSignId::Transaction(*plan)); assert!(RecognizedTopics::recognized(&txn, set, topic)); } } #[test] fn batch() { - let set = random_validator_set(&mut OsRng); let (_, validator_data, validators, weights, total_weight) = setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); + let set = set_info.set; let mut db = MemDb::new(); let batch_hash = random_bytes(&mut OsRng); @@ -704,7 +721,7 @@ mod handle_application_tx { scan_block.handle_application_tx(OsRng.next_u64(), Transaction::Batch { hash: batch_hash }); } - let topic = expected_initially_recognized_sign_topic(VariantSignId::Batch(batch_hash)); + let topic = initial_sign_topic(VariantSignId::Batch(batch_hash)); assert!(RecognizedTopics::recognized(&txn, set, topic)); } @@ -719,11 +736,10 @@ mod handle_application_tx { wrong_len = if wrong_len == 1 { 2 } else { wrong_len - 1 }; } - let set = random_validator_set(&mut OsRng); - let (keys_addrs, validator_data, validators, weights, total_weight) = setup_n_validators_with_keys(num_validators); let set_info = new_test_set_info(&validator_data); + let set = set_info.set; let mut db = MemDb::new(); let mut txn = db.txn(); @@ -737,7 +753,7 @@ mod handle_application_tx { OsRng.next_u64(), Transaction::SlashReport { slash_points: vec![0; usize::from(wrong_len)], - signed: new_signed(signer_key), + signed: random_signed_for_key(signer_key), }, ); } @@ -757,10 +773,10 @@ mod handle_application_tx { let num_validators = OsRng.gen_range(4u16 .. 10); let num_reports = usize::from(Topic::SlashReport.required_participation(num_validators)); - let set = random_validator_set(&mut OsRng); let (keys_addrs, validator_data, validators, weights, total_weight) = setup_n_validators_with_keys(num_validators); let set_info = new_test_set_info(&validator_data); + let set = set_info.set; let mut report = vec![0u32; usize::from(num_validators)]; report[0] = u32::MAX; @@ -776,12 +792,16 @@ mod handle_application_tx { let (key, _) = keys_addrs[i]; scan_block.handle_application_tx( OsRng.next_u64(), - Transaction::SlashReport { slash_points: report.clone(), signed: new_signed(key) }, + Transaction::SlashReport { + slash_points: report.clone(), + signed: random_signed_for_key(key), + }, ); } } // A ProcessorMessage should be produced containing a Fatal slash + // TODO: Check the exact message received let msg = ProcessorMessages::try_recv(&mut txn, set); assert!(msg.is_some(), "expected ProcessorMessage for fatal slash report"); } @@ -793,11 +813,8 @@ mod handle_application_tx { /// produce when `DataSet::Participating` is reached, mirroring the production logic. /// /// Returns `None` if `f == 0` (the slash report would be empty and nothing is sent). - fn expected_slash_report(num_validators: u16, reports: &[Vec]) -> Option> { + fn expected_slash_report(num_validators: u16, reports: &[Vec]) -> Vec { let f = (num_validators - 1) / 3; - if f == 0 { - return None; - } // Compute the median for each validator position across all reporters let mut medians = Vec::with_capacity(usize::from(num_validators)); @@ -814,11 +831,7 @@ mod handle_application_tx { sorted.sort_unstable(); let amortization = sorted[usize::from(num_validators - f - 1)]; - let amortized: Vec = medians.iter().map(|p| p.saturating_sub(amortization)).collect(); - - // Filter to non-zero entries only - let result: Vec = amortized.into_iter().filter(|&p| p > 0).collect(); - Some(result) + medians.iter().map(|p| p.saturating_sub(amortization)).collect::>() } /// Generate `count` slash report vectors, each of length `num_validators`. @@ -837,11 +850,10 @@ mod handle_application_tx { let n = OsRng.gen_range(2u16 ..= 5) * 2; let num_reports = Topic::SlashReport.required_participation(n); - let set = random_validator_set(&mut OsRng); - let (keys_addrs, validator_data, validators, weights, total_weight) = setup_n_validators_with_keys(n); let set_info = new_test_set_info(&validator_data); + let set = set_info.set; let reports = random_slash_reports(&mut OsRng, n, num_reports); let expected = expected_slash_report(n, &reports); @@ -856,27 +868,34 @@ mod handle_application_tx { let (key, _) = keys_addrs[i]; scan_block.handle_application_tx( OsRng.next_u64(), - Transaction::SlashReport { slash_points: report.clone(), signed: new_signed(key) }, + Transaction::SlashReport { + slash_points: report.clone(), + signed: random_signed_for_key(key), + }, ); } } - match expected { - Some(result) if !result.is_empty() => { - assert!( - ProcessorMessages::try_recv(&mut txn, set).is_some(), - "expected ProcessorMessage for non-empty slash report {result:?}", - ); - } - _ => { - assert!( - ProcessorMessages::try_recv(&mut txn, set).is_some(), - "expected ProcessorMessage even for empty slash report", - ); - } - } + assert_eq!( + ProcessorMessages::try_recv(&mut txn, set), + Some(messages::CoordinatorMessage::from( + messages::coordinator::CoordinatorMessage::SignSlashReport { + session: set.session, + slash_report: expected + .into_iter() + .map(|points| if points == u32::MAX { + Slash::Fatal + } else { + Slash::Points(points) + }) + .collect::>() + .try_into() + .unwrap(), + } + )) + ); - let sign_topic = expected_initially_recognized_sign_topic(VariantSignId::SlashReport); + let sign_topic = initial_sign_topic(VariantSignId::SlashReport); assert!( RecognizedTopics::recognized(&txn, set, sign_topic), "SlashReport sign topic should be recognized", @@ -889,14 +908,12 @@ mod handle_application_tx { for _ in 0 .. 200 { // random odd: 5, 7, 9, or 11 let n = OsRng.gen_range(2u16 ..= 5) * 2 + 1; - let f = usize::from((n - 1) / 3); let num_reports = Topic::SlashReport.required_participation(n); - let set = random_validator_set(&mut OsRng); - let (keys_addrs, validator_data, validators, weights, total_weight) = setup_n_validators_with_keys(n); let set_info = new_test_set_info(&validator_data); + let set = set_info.set; let reports = random_slash_reports(&mut OsRng, n, num_reports); let expected = expected_slash_report(n, &reports); @@ -911,22 +928,33 @@ mod handle_application_tx { let (key, _) = keys_addrs[i]; scan_block.handle_application_tx( OsRng.next_u64(), - Transaction::SlashReport { slash_points: report.clone(), signed: new_signed(key) }, + Transaction::SlashReport { + slash_points: report.clone(), + signed: random_signed_for_key(key), + }, ); } } - match expected { - Some(result) => { - assert!(result.len() <= f, "slash report len {} should be <= f={f}", result.len()); - } - None => { - unreachable!(); - } - } - - assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); - let sign_topic = expected_initially_recognized_sign_topic(VariantSignId::SlashReport); + assert_eq!( + ProcessorMessages::try_recv(&mut txn, set), + Some(messages::CoordinatorMessage::from( + messages::coordinator::CoordinatorMessage::SignSlashReport { + session: set.session, + slash_report: expected + .into_iter() + .map(|points| if points == u32::MAX { + Slash::Fatal + } else { + Slash::Points(points) + }) + .collect::>() + .try_into() + .unwrap(), + } + )) + ); + let sign_topic = initial_sign_topic(VariantSignId::SlashReport); assert!(RecognizedTopics::recognized(&txn, set, sign_topic)); } } @@ -935,15 +963,15 @@ mod handle_application_tx { #[test] fn sign() { - let set = random_validator_set(&mut OsRng); let (keys_addrs, validator_data, validators, weights, total_weight) = setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); + let set = set_info.set; let (key0, addr0) = keys_addrs[0]; let (key1, key2) = (keys_addrs[1].0, keys_addrs[2].0); let sign_id = VariantSignId::Transaction(random_bytes(&mut OsRng)); - let topic = expected_initially_recognized_sign_topic(sign_id); + let topic = initial_sign_topic(sign_id); // Wrong data length: signer has weight 1 but submits 2 entries -> fatal slash { @@ -959,7 +987,7 @@ mod handle_application_tx { attempt: 0, round: SigningProtocolRound::Preprocess, data: vec![vec![1], vec![2]], - signed: new_signed(key0), + signed: random_signed_for_key(key0), }, ); @@ -983,12 +1011,13 @@ mod handle_application_tx { attempt: 0, round: SigningProtocolRound::Preprocess, data: vec![vec![1, 2, 3]], - signed: new_signed(key), + signed: random_signed_for_key(key), }, ); } } + // TODO: Check the exact message received assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); } } @@ -998,14 +1027,14 @@ mod handle_application_tx { /// and stores preceding data), then accumulating shares to threshold. #[test] fn sign_share_sends_shares_message() { - let set = random_validator_set(&mut OsRng); let (keys_addrs, validator_data, validators, weights, total_weight) = setup_test_validators_and_weights_with_keys(); let set_info = new_test_set_info(&validator_data); + let set = set_info.set; let (key0, key1, key2) = (keys_addrs[0].0, keys_addrs[1].0, keys_addrs[2].0); let sign_id = VariantSignId::Transaction(random_bytes(&mut OsRng)); - let preprocess_topic = expected_initially_recognized_sign_topic(sign_id); + let preprocess_topic = initial_sign_topic(sign_id); let share_topic = Topic::Sign { id: sign_id, attempt: 0, round: SigningProtocolRound::Share }; let mut db = MemDb::new(); @@ -1027,13 +1056,14 @@ mod handle_application_tx { attempt: 0, round: SigningProtocolRound::Preprocess, data: vec![vec![1, 2, 3]], - signed: new_signed(key), + signed: random_signed_for_key(key), }, ); } } // Drain the Preprocesses message from step 1 + // TODO: Check the exact message received assert!(ProcessorMessages::try_recv(&mut txn, set).is_some()); // Share topic should now be recognized @@ -1051,7 +1081,7 @@ mod handle_application_tx { attempt: 0, round: SigningProtocolRound::Share, data: vec![vec![4, 5, 6]], - signed: new_signed(key), + signed: random_signed_for_key(key), }, ); } @@ -1059,6 +1089,7 @@ mod handle_application_tx { // The Shares message should have been sent let msg = ProcessorMessages::try_recv(&mut txn, set); + // TODO: Check the exact message received assert!(msg.is_some(), "expected Shares processor message"); // No validators should be slashed @@ -1070,12 +1101,12 @@ mod handle_application_tx { #[test] fn handle_block() { - let set = random_validator_set(&mut OsRng); let (keys_addrs, validator_data, validators, weights, total_weight) = setup_n_validators_with_keys(3); let set_info = new_test_set_info(&validator_data); + let set = set_info.set; let addr0 = validator_data[0].0; - let signed = new_signed(keys_addrs[0].0); + let signed = random_signed_for_key(keys_addrs[0].0); // Empty block only calls start of block { @@ -1099,7 +1130,7 @@ fn handle_block() { // Each application transaction type passes through handle_block. // Signed transactions use a real validator key so participant_indexes lookups succeed. // Cosign and SubstrateBlock need external state populated before they can run. - for tx in all_signed_transactions_and_attempts(&signed) { + for tx in all_signed_transactions_and_attempts(signed) { let mut db = MemDb::new(); let mut txn = db.txn(); @@ -1142,14 +1173,15 @@ fn handle_block() { let plans = vec![random_bytes(&mut OsRng)]; SubstrateBlockPlans::set(&mut txn, set, *hash, &plans); } + // `Cosigned`, `Batch` are provided but do not require pre-existing state + Transaction::Cosigned { .. } | Transaction::Batch { .. } => {} + // These aren't provided transactions Transaction::RemoveParticipant { .. } | Transaction::DkgParticipation { .. } | Transaction::DkgConfirmationPreprocess { .. } | Transaction::DkgConfirmationShare { .. } | - Transaction::Cosigned { .. } | - Transaction::Batch { .. } | Transaction::Sign { .. } | - Transaction::SlashReport { .. } => {} + Transaction::SlashReport { .. } => unreachable!(), } let block_txs = vec![TributaryTransaction::Application(tx)]; @@ -1256,7 +1288,7 @@ fn handle_block() { ); } for hash in &batch_hashes { - let topic = expected_initially_recognized_sign_topic(VariantSignId::Batch(*hash)); + let topic = initial_sign_topic(VariantSignId::Batch(*hash)); assert!( RecognizedTopics::recognized(&txn, set, topic), "Batch should be recognized regardless of other txs in the block", diff --git a/coordinator/tributary/src/tests/scan_tributary.rs b/coordinator/tributary/src/tests/scan_tributary.rs index 34607cbff..57eed11d1 100644 --- a/coordinator/tributary/src/tests/scan_tributary.rs +++ b/coordinator/tributary/src/tests/scan_tributary.rs @@ -24,7 +24,7 @@ async fn make_tributary( let mut validators = vec![]; for weight in weights.iter().copied() { let this_key = random_key(&mut OsRng); - let pub_key = get_key_point(&this_key); + let pub_key = ::generator() * *this_key; key = Some(this_key); validator_keys.push((pub_key, u64::from(weight))); let addr = SeraiAddress(pub_key.to_bytes()); diff --git a/coordinator/tributary/src/tests/transaction.rs b/coordinator/tributary/src/tests/transaction.rs index 0fd5643de..b87378e1d 100644 --- a/coordinator/tributary/src/tests/transaction.rs +++ b/coordinator/tributary/src/tests/transaction.rs @@ -1,21 +1,24 @@ use core::ops::Deref as _; -use std::io::{self, Cursor, Read, Write}; +use std::{ + io::{self, Cursor, Read, Write}, + collections::HashSet, +}; +use rand_core::{RngCore as _, OsRng}; use blake2::{digest::typenum::U32, Digest as _, Blake2b}; -use borsh::{BorshDeserialize as _, BorshSerialize as _}; -use rand::{RngCore as _, rngs::OsRng}; - use ciphersuite::{ - group::{Group as _, GroupEncoding as _, ff::PrimeField as _}, + group::{ff::PrimeField as _, Group as _, GroupEncoding as _}, *, }; use dalek_ff_group::Ristretto; +use borsh::{BorshDeserialize as _, BorshSerialize as _}; + use messages::sign::VariantSignId; use serai_primitives::{test_helpers::*, validator_sets::KeyShares}; use tributary_sdk::{ ReadWrite, - transaction::{Transaction as _, TransactionError, TransactionKind}, + transaction::{TransactionKind, Transaction as _, TransactionError}, }; use super::*; @@ -26,12 +29,8 @@ fn all_signing_protocol_rounds() -> Vec { #[test] fn signing_protocol_round_nonce() { - for round in all_signing_protocol_rounds() { - let expected_nonce = match round { - SigningProtocolRound::Preprocess => 0, - SigningProtocolRound::Share => 1, - }; - assert_eq!(round.nonce(), expected_nonce, "Wrong nonce for {round:?}"); + for (i, round) in all_signing_protocol_rounds().into_iter().enumerate() { + assert_eq!(round.nonce(), u32::try_from(i).unwrap(), "Wrong nonce for {round:?}"); } } @@ -69,10 +68,9 @@ mod signed { ); let deserialized: Signed = borsh::from_slice(&serialized).unwrap(); - let mut cursor = Cursor::new(&serialized); assert_eq!( deserialized, - Signed::deserialize_reader(&mut cursor).unwrap(), + Signed::deserialize_reader(&mut serialized.as_slice()).unwrap(), "borsh::from_slice and Signed::deserialize_reader should produce identical results" ); @@ -122,8 +120,7 @@ mod signed { { let serialized = borsh::to_vec(&random_signed(&mut OsRng)).unwrap(); let truncated = &serialized[.. 5]; - let mut cursor = Cursor::new(truncated); - let result = Signed::deserialize_reader(&mut cursor); + let result = Signed::deserialize_reader(&mut &*truncated); assert!(result.is_err(), "truncated data should fail to deserialize"); } @@ -131,8 +128,7 @@ mod signed { { let serialized = borsh::to_vec(&random_signed(&mut OsRng)).unwrap(); let signer_only = &serialized[.. 32]; - let mut cursor = Cursor::new(signer_only); - let result = Signed::deserialize_reader(&mut cursor); + let result = Signed::deserialize_reader(&mut &*signer_only); assert!(result.is_err(), "signer-only data without signature should fail to deserialize"); } } @@ -326,7 +322,8 @@ mod transaction { out } - for mut tx in all_signed_transactions_and_attempts(&random_signed(&mut OsRng)) { + let mut orders = HashSet::new(); + for mut tx in all_signed_transactions_and_attempts(random_signed(&mut OsRng)) { tx.sign(&mut OsRng, genesis, &key); let (expected_order, expected_nonce) = match &tx { @@ -336,7 +333,11 @@ mod transaction { (order, 0) } Transaction::DkgParticipation { .. } => (borsh_label(b"DkgParticipation"), 0), - Transaction::DkgConfirmationPreprocess { attempt, .. } | + Transaction::DkgConfirmationPreprocess { attempt, .. } => { + let mut order = borsh_label(b"DkgConfirmation"); + order.extend(&attempt.to_le_bytes()); + (order, 0) + } Transaction::DkgConfirmationShare { attempt, .. } => { let mut order = borsh_label(b"DkgConfirmation"); order.extend(&attempt.to_le_bytes()); @@ -374,9 +375,10 @@ mod transaction { Transaction::Cosigned { .. } | Transaction::SubstrateBlock { .. } | Transaction::Batch { .. }) => { - panic!("all_signed_transactions_and_attempts returned non-signed tx: {other:?}") + unreachable!("all_signed_transactions_and_attempts returned non-signed tx: {other:?}") } }; + orders.insert((expected_order.clone(), expected_nonce)); match tx.kind() { TransactionKind::Signed(order, signed) => { @@ -392,10 +394,12 @@ mod transaction { } } } + assert_eq!(orders.len(), 11); } #[test] fn provided_transactions_kind() { + let mut orders = HashSet::new(); for tx in all_provided_transactions() { let expected_order = match &tx { Transaction::Cosign { .. } => "Cosign", @@ -411,6 +415,7 @@ mod transaction { panic!("all_provided_transactions returned non-provided tx: {other:?}") } }; + orders.insert(expected_order); match tx.kind() { TransactionKind::Provided(actual_order) => { @@ -421,6 +426,7 @@ mod transaction { } } } + assert_eq!(orders.len(), 4); } } @@ -475,6 +481,7 @@ mod transaction { tx2.hash(), "Hashes should be equal despite different nonces and signatures" ); + assert_ne!(ReadWrite::serialize(&tx1), ReadWrite::serialize(&tx2)); } } } @@ -587,17 +594,35 @@ mod transaction { let genesis = random_bytes(&mut OsRng); // Sets correct signer and produces verifiable signature - for mut tx in all_signed_transactions_and_attempts(&random_signed(&mut OsRng)) { + for mut tx in all_signed_transactions_and_attempts(random_signed(&mut OsRng)) { tx.sign(&mut OsRng, genesis, &key); + let TransactionKind::Signed(order, tributary_signed) = tx.kind() else { + panic!("non-signed TX from `all_signed_transactions_and_attempts`") + }; let sig_hash = tx.sig_hash(genesis); - - if let TransactionKind::Signed(_, tributary_signed) = tx.kind() { - assert_eq!(tributary_signed.signer, expected_signer, "Wrong signer for {tx:?}"); - assert!( - tributary_signed.signature.verify(tributary_signed.signer, sig_hash), - "Signature verification failed for {tx:?}" - ); - } + assert_eq!( + sig_hash, + ::F::from_bytes_mod_order_wide( + &blake2::Blake2b512::digest( + [ + b"Tributary Signed Transaction", + genesis.as_slice(), + &tx.hash(), + order.as_slice(), + tributary_signed.signature.R.to_bytes().as_slice(), + ] + .concat(), + ) + .into(), + ) + ); + + assert_eq!(tributary_signed.signer, expected_signer, "Wrong signer for {tx:?}"); + assert_ne!(tributary_signed.signature.R, ::G::identity()); + assert!( + tributary_signed.signature.verify(tributary_signed.signer, sig_hash), + "Signature verification failed for {tx:?}" + ); } // Wrong genesis fails verification diff --git a/coordinator/tributary/src/tests/tributary.rs b/coordinator/tributary/src/tests/tributary.rs index e4f441d55..9349b6ff2 100644 --- a/coordinator/tributary/src/tests/tributary.rs +++ b/coordinator/tributary/src/tests/tributary.rs @@ -1,27 +1,10 @@ use serai_db::{Db as _, DbTxn as _, MemDb}; -use crate::*; +use crate::{Transaction, SlashPoints, TributaryDb, slash_report_transaction}; use super::*; -/// Helper to extract slash_points from a SlashReport transaction. -fn unwrap_slash_report(tx: Transaction) -> (Vec, Signed) { - match tx { - Transaction::SlashReport { slash_points, signed } => (slash_points, signed), - other @ (Transaction::RemoveParticipant { .. } | - Transaction::DkgParticipation { .. } | - Transaction::DkgConfirmationPreprocess { .. } | - Transaction::DkgConfirmationShare { .. } | - Transaction::Cosign { .. } | - Transaction::Cosigned { .. } | - Transaction::SubstrateBlock { .. } | - Transaction::Batch { .. } | - Transaction::Sign { .. }) => panic!("expected SlashReport, got {other:?}"), - } -} - +// TODO: Test the resulting slash report the Tributary would yield in response to consensus on this #[test] fn slash_report() { - let set = random_validator_set(&mut OsRng); - // No slash points set: all zeros { let db = MemDb::new(); @@ -32,9 +15,10 @@ fn slash_report() { ]; let set_info = new_test_set_info(&validators); - let (points, signed) = unwrap_slash_report(slash_report_transaction(&db, &set_info)); - assert_eq!(points, vec![0, 0, 0]); - assert_eq!(signed, Signed::default()); + assert_eq!( + slash_report_transaction(&db, &set_info), + Transaction::SlashReport { slash_points: vec![0, 0, 0], signed: Signed::default() } + ); } // Respects validator order @@ -47,6 +31,7 @@ fn slash_report() { random_serai_address(&mut OsRng), ); let set_info = new_test_set_info(&[(v1, 1), (v2, 1), (v3, 1), (v4, 1)]); + let set = set_info.set; let (slash1, slash2, slash3, slash4) = (OsRng.next_u32(), OsRng.next_u32(), OsRng.next_u32(), OsRng.next_u32()); @@ -62,9 +47,13 @@ fn slash_report() { txn.commit(); } - let (points, signed) = unwrap_slash_report(slash_report_transaction(&db, &set_info)); - assert_eq!(points, vec![slash1, slash2, slash3, slash4]); - assert_eq!(signed, Signed::default()); + assert_eq!( + slash_report_transaction(&db, &set_info), + Transaction::SlashReport { + slash_points: vec![slash1, slash2, slash3, slash4], + signed: Signed::default() + } + ); } // Fatal slash yields u32::MAX @@ -72,6 +61,7 @@ fn slash_report() { let mut db = MemDb::new(); let (v1, v2) = (random_serai_address(&mut OsRng), random_serai_address(&mut OsRng)); let set_info = new_test_set_info(&[(v1, 1), (v2, 1)]); + let set = set_info.set; { let mut txn = db.txn(); @@ -79,8 +69,9 @@ fn slash_report() { txn.commit(); } - let (points, signed) = unwrap_slash_report(slash_report_transaction(&db, &set_info)); - assert_eq!(points, vec![u32::MAX, 0]); - assert_eq!(signed, Signed::default()); + assert_eq!( + slash_report_transaction(&db, &set_info), + Transaction::SlashReport { slash_points: vec![u32::MAX, 0], signed: Signed::default() } + ); } } diff --git a/coordinator/tributary/src/transaction.rs b/coordinator/tributary/src/transaction.rs index 53cc229b4..d7c68c8fc 100644 --- a/coordinator/tributary/src/transaction.rs +++ b/coordinator/tributary/src/transaction.rs @@ -94,7 +94,11 @@ impl Default for Signed { } } -/// The Tributary transaction definition used by Serai +/// The Tributary transaction definition used by Serai. +/// +/// Two transactions will be considered equal if equal on every level. This means transactions +/// which aren't equal may share a hash, due to the hash not binding to the signature, yet the +/// equality binding to the signature. #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum Transaction { /// A vote to remove a participant for invalid behavior