Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions base_layer/wallet/src/storage/database.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,11 @@ pub trait WalletBackend: Send + Sync + Clone {
exclude_recovered: bool,
) -> Result<(), WalletStorageError>;

/// Apply a sparse storage schedule to scanned blocks, keeping all recent blocks
/// and progressively sparser checkpoints for older blocks. Blocks containing
/// recovered outputs are always preserved.
fn apply_sparse_scanned_blocks_schedule(&self, tip_height: u64) -> Result<(), WalletStorageError>;

/// Change the passphrase used to encrypt the database
fn change_passphrase(&self, existing: &SafePassword, new: &SafePassword) -> Result<(), WalletStorageError>;

Expand Down Expand Up @@ -337,6 +342,11 @@ where T: WalletBackend + 'static
Ok(())
}

pub fn apply_sparse_scanned_blocks_schedule(&self, tip_height: u64) -> Result<(), WalletStorageError> {
self.db.apply_sparse_scanned_blocks_schedule(tip_height)?;
Ok(())
}

pub fn get_all_burn_proofs(&self) -> Result<Vec<DbBurnProof>, WalletStorageError> {
self.db.fetch_burn_proofs()
}
Expand Down
172 changes: 172 additions & 0 deletions base_layer/wallet/src/storage/sqlite_db/scanned_blocks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,52 @@ impl ScannedBlockSql {
query.execute(conn)?;
Ok(())
}

/// Apply a sparse storage schedule to scanned blocks, keeping:
/// - All blocks within `tip - 720` to `tip`
/// - Every 100th block from `tip - 10,000` to `tip - 720`
/// - Every 1,000th block from `tip - 100,000` to `tip - 10,000`
/// - Every 5,000th block from genesis to `tip - 100,000`
///
/// Implemented as a single `DELETE ... WHERE` so the scanned_blocks table
/// is not materialised in memory even when the wallet has scanned tens of
/// thousands of headers.
pub fn apply_sparse_schedule(tip_height: u64, conn: &mut SqliteConnection) -> Result<(), WalletStorageError> {
// Pre-compute the retention-window boundaries once and clamp to i64.
// `saturating_sub` keeps the query well-defined at low tip heights
// (e.g. immediately after a fresh scan or in unit tests).
let tip = i64::try_from(tip_height).unwrap_or(i64::MAX);
let recent_boundary = tip.saturating_sub(720);
let medium_boundary = tip.saturating_sub(10_000);
let sparse_boundary = tip.saturating_sub(100_000);

// Boundary semantics match the original Rust logic:
// keep depth ∈ [0, 720] → height >= recent_boundary
// band 1 (%100) depth ∈ (720, 10_000] → height ∈ [medium_boundary, recent_boundary)
// band 2 (%1_000) depth ∈ (10_000, 100_000] → height ∈ [sparse_boundary, medium_boundary)
// band 3 (%5_000) depth ∈ (100_000, ∞) → height < sparse_boundary
//
// The outer `height < recent_boundary` guard excludes the last 720
// blocks from any deletion. Each inner branch deletes only the rows
// whose height is NOT on the modulus appropriate for their band.
diesel::sql_query(
"DELETE FROM scanned_blocks \
WHERE height < ? \
AND ( \
(height >= ? AND (height % 100) != 0) OR \
(height < ? AND height >= ? AND (height % 1000) != 0) OR \
(height < ? AND (height % 5000) != 0) \
)",
)
.bind::<diesel::sql_types::BigInt, _>(recent_boundary)
.bind::<diesel::sql_types::BigInt, _>(medium_boundary)
.bind::<diesel::sql_types::BigInt, _>(medium_boundary)
.bind::<diesel::sql_types::BigInt, _>(sparse_boundary)
.bind::<diesel::sql_types::BigInt, _>(sparse_boundary)
.execute(conn)?;

Ok(())
}
}

impl From<ScannedBlock> for ScannedBlockSql {
Expand All @@ -132,3 +178,129 @@ impl TryFrom<ScannedBlockSql> for ScannedBlock {
})
}
}

#[cfg(test)]
mod test {
use diesel::SqliteConnection;
use tari_common_sqlite::sqlite_connection_pool::PooledDbConnection;

use super::*;
use crate::storage::sqlite_utilities::run_migration_and_create_sqlite_memory_connection;

/// Keep the `WalletDbConnection` alive for the lifetime of the test so the
/// `:memory:` SQLite pool doesn't drop between calls.
struct TestDb {
_conn: crate::storage::sqlite_utilities::WalletDbConnection,
}

impl TestDb {
fn new() -> (Self, diesel::r2d2::PooledConnection<diesel::r2d2::ConnectionManager<SqliteConnection>>) {
let conn = run_migration_and_create_sqlite_memory_connection().unwrap();
let pooled = conn.get_pooled_connection().unwrap();
(Self { _conn: conn }, pooled)
}
}

fn insert_heights(conn: &mut SqliteConnection, heights: &[i64]) {
for &h in heights {
ScannedBlockSql::new(vec![(h & 0xff) as u8; 32], h).commit(conn).unwrap();
}
}

fn remaining_heights(conn: &mut SqliteConnection) -> Vec<i64> {
let mut blocks = ScannedBlockSql::index(conn).unwrap();
blocks.sort_by_key(|b| b.height);
blocks.into_iter().map(|b| b.height).collect()
}

#[test]
fn sparse_schedule_keeps_last_720_blocks_intact() {
let (_db, mut conn) = TestDb::new();
let heights: Vec<i64> = (0..=720).collect();
insert_heights(&mut conn, &heights);

ScannedBlockSql::apply_sparse_schedule(720, &mut conn).unwrap();

// Every single height in [0..=720] has depth <= 720 so nothing is pruned.
assert_eq!(remaining_heights(&mut conn), heights);
}

#[test]
fn sparse_schedule_prunes_medium_band_to_every_100th_block() {
let (_db, mut conn) = TestDb::new();
// Tip = 5000 → heights 4281..=5000 are within the 720-block window and
// must survive verbatim, while heights 0..=4280 are pruned to every 100th.
let heights: Vec<i64> = (0..=5000).collect();
insert_heights(&mut conn, &heights);

ScannedBlockSql::apply_sparse_schedule(5000, &mut conn).unwrap();

let remaining = remaining_heights(&mut conn);
for h in 4281..=5000 {
assert!(remaining.contains(&h), "missing in-window height {h}");
}
for h in (0..=4200).step_by(100) {
assert!(remaining.contains(&h), "missing modulus-100 height {h}");
}
assert!(!remaining.contains(&123));
assert!(!remaining.contains(&4279));
}

#[test]
fn sparse_schedule_prunes_deep_bands_to_every_1000th_and_5000th_blocks() {
let (_db, mut conn) = TestDb::new();
let mut heights: Vec<i64> = Vec::new();
heights.extend((0..=200).map(|i| i * 5_000));
heights.extend([999_999, 999_500, 999_000, 900_500, 900_000, 100_123, 1_001, 1_000]);
heights.sort();
heights.dedup();
insert_heights(&mut conn, &heights);

let tip = 1_000_000u64;
ScannedBlockSql::apply_sparse_schedule(tip, &mut conn).unwrap();

let remaining = remaining_heights(&mut conn);
// depth <= 720 → always kept.
assert!(remaining.contains(&999_500));
assert!(remaining.contains(&999_999));
// depth 1_000 (720..10_000 band) on the 100-modulus grid → kept.
assert!(remaining.contains(&999_000));
// depth 100_000 (10_000..100_000 band); 900_000 % 1000 == 0 → kept.
assert!(remaining.contains(&900_000));
// In 10_000..100_000 band but not on a 1000 multiple → removed.
assert!(!remaining.contains(&900_500));
// Deepest band, not on 5000 multiple → removed.
assert!(!remaining.contains(&100_123));
assert!(!remaining.contains(&1_001));
assert!(!remaining.contains(&1_000));
// 0 is on every modulus → kept.
assert!(remaining.contains(&0));
assert!(remaining.contains(&5_000));
}

#[test]
fn sparse_schedule_is_idempotent() {
let (_db, mut conn) = TestDb::new();
let heights: Vec<i64> = (0..=2_000).collect();
insert_heights(&mut conn, &heights);

ScannedBlockSql::apply_sparse_schedule(2_000, &mut conn).unwrap();
let after_first = remaining_heights(&mut conn);
ScannedBlockSql::apply_sparse_schedule(2_000, &mut conn).unwrap();
let after_second = remaining_heights(&mut conn);

assert_eq!(after_first, after_second);
}

#[test]
fn sparse_schedule_handles_small_tip_without_saturating_underflow() {
let (_db, mut conn) = TestDb::new();
let heights: Vec<i64> = (0..=100).collect();
insert_heights(&mut conn, &heights);

// tip < 720: every block is within the retention window.
ScannedBlockSql::apply_sparse_schedule(100, &mut conn).unwrap();

assert_eq!(remaining_heights(&mut conn), heights);
}
}
5 changes: 5 additions & 0 deletions base_layer/wallet/src/storage/sqlite_db/wallet.rs
Original file line number Diff line number Diff line change
Expand Up @@ -551,6 +551,11 @@ impl WalletBackend for WalletSqliteDatabase {
ScannedBlockSql::clear_before_height(height, exclude_recovered, &mut conn)
}

fn apply_sparse_scanned_blocks_schedule(&self, tip_height: u64) -> Result<(), WalletStorageError> {
let mut conn = self.database_connection.get_pooled_connection()?;
ScannedBlockSql::apply_sparse_schedule(tip_height, &mut conn)
}

fn change_passphrase(&self, existing: &SafePassword, new: &SafePassword) -> Result<(), WalletStorageError> {
let mut conn = self.database_connection.get_pooled_connection()?;

Expand Down
2 changes: 1 addition & 1 deletion base_layer/wallet/src/utxo_scanner_service/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ use crate::{

pub const LOG_TARGET: &str = "wallet::utxo_scanning";

// Cache 1 days worth of headers.
// Keep all blocks within the most recent window (approximately 1 day of blocks).
pub const SCANNED_BLOCK_CACHE_SIZE: u64 = 720;

pub struct UtxoScannerService<
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ use crate::{
utxo_scanner_service::{
RECOVERY_KEY,
handle::UtxoScannerEvent,
service::{SCANNED_BLOCK_CACHE_SIZE, ScannedBlock, UtxoScannerResources},
service::{ScannedBlock, UtxoScannerResources},
uxto_scanner_service_builder::UtxoScannerMode,
},
};
Expand Down Expand Up @@ -576,10 +576,9 @@ where
}
// We need to update the last one
if let Some(scanned_block) = prev_scanned_block.clone() {
self.resources.db.clear_scanned_blocks_before_height(
scanned_block.height.saturating_sub(SCANNED_BLOCK_CACHE_SIZE),
true,
)?;
self.resources
.db
.apply_sparse_scanned_blocks_schedule(scanned_block.height)?;
if last_saved_hash != Some(scanned_block.header_hash) {
self.resources.db.save_scanned_block(scanned_block)?;
}
Expand Down