Skip to content

Commit

Permalink
Move BankHashStats from account's db to bank (solana-labs#3821)
Browse files Browse the repository at this point in the history
* remove lock on BankHashStat in accounts-db store

* move bank_hash_stats into bank

* set bank_hash_stat in serde

* add test

* pr

* pr

* pr: remove test

* pr: remove comments

* pr: move BankHashStats

* pr: return BankHashStat

* undo fmt

* reset bank hash stat

* pr: move bank_hash_stat into reconstructed_db_info

* update abi digest

* pr: update stats in two other place

* refactor

* pr: add bank_hash_stats to BankFields

* pr: remove set_bank_hash_stats

---------

Co-authored-by: HaoranYi <[email protected]>
  • Loading branch information
HaoranYi and HaoranYi authored Dec 4, 2024
1 parent eb40511 commit c13ca2d
Show file tree
Hide file tree
Showing 10 changed files with 144 additions and 178 deletions.
8 changes: 0 additions & 8 deletions accounts-db/src/accounts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -884,14 +884,6 @@ mod tests {
assert_eq!(loaded, vec![]);
}

#[test]
fn test_accounts_empty_bank_hash_stats() {
let accounts_db = AccountsDb::new_single_for_tests();
let accounts = Accounts::new(Arc::new(accounts_db));
assert!(accounts.accounts_db.get_bank_hash_stats(0).is_some());
assert!(accounts.accounts_db.get_bank_hash_stats(1).is_none());
}

#[test]
fn test_lock_accounts_with_duplicates() {
let accounts_db = AccountsDb::new_single_for_tests();
Expand Down
64 changes: 2 additions & 62 deletions accounts-db/src/accounts_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ use {
},
accounts_cache::{AccountsCache, CachedAccount, SlotCache},
accounts_db::stats::{
AccountsStats, BankHashStats, CleanAccountsStats, FlushStats, PurgeStats,
ShrinkAncientStats, ShrinkStats, ShrinkStatsSub, StoreAccountsTiming,
AccountsStats, CleanAccountsStats, FlushStats, PurgeStats, ShrinkAncientStats,
ShrinkStats, ShrinkStatsSub, StoreAccountsTiming,
},
accounts_file::{
AccountsFile, AccountsFileError, AccountsFileProvider, MatchAccountOwnerError,
Expand Down Expand Up @@ -1533,7 +1533,6 @@ pub struct AccountsDb {

pub thread_pool_hash: ThreadPool,

bank_hash_stats: Mutex<HashMap<Slot, BankHashStats>>,
accounts_delta_hashes: Mutex<HashMap<Slot, AccountsDeltaHash>>,
accounts_hashes: Mutex<HashMap<Slot, (AccountsHash, /*capitalization*/ u64)>>,
incremental_accounts_hashes:
Expand Down Expand Up @@ -1979,8 +1978,6 @@ impl AccountsDb {
Self::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI,
));

let bank_hash_stats = Mutex::new(HashMap::from([(0, BankHashStats::default())]));

// Increase the stack for foreground threads
// rayon needs a lot of stack
const ACCOUNTS_STACK_SIZE: usize = 8 * 1024 * 1024;
Expand Down Expand Up @@ -2049,7 +2046,6 @@ impl AccountsDb {
.into(),
verify_experimental_accumulator_hash: accounts_db_config
.verify_experimental_accumulator_hash,
bank_hash_stats,
thread_pool,
thread_pool_clean,
thread_pool_hash,
Expand Down Expand Up @@ -4556,12 +4552,10 @@ impl AccountsDb {
dropped_roots: impl Iterator<Item = Slot>,
) {
let mut accounts_delta_hashes = self.accounts_delta_hashes.lock().unwrap();
let mut bank_hash_stats = self.bank_hash_stats.lock().unwrap();

dropped_roots.for_each(|slot| {
self.accounts_index.clean_dead_slot(slot);
accounts_delta_hashes.remove(&slot);
bank_hash_stats.remove(&slot);
// the storage has been removed from this slot and recycled or dropped
assert!(self.storage.remove(&slot, false).is_none());
debug_assert!(
Expand Down Expand Up @@ -4973,21 +4967,6 @@ impl AccountsDb {
}
}

/// Insert a default bank hash stats for `slot`
///
/// This fn is called when creating a new bank from parent.
pub fn insert_default_bank_hash_stats(&self, slot: Slot, parent_slot: Slot) {
let mut bank_hash_stats = self.bank_hash_stats.lock().unwrap();
if bank_hash_stats.get(&slot).is_some() {
error!(
"set_hash: already exists; multiple forks with shared slot {slot} as child \
(parent: {parent_slot})!?"
);
return;
}
bank_hash_stats.insert(slot, BankHashStats::default());
}

pub fn load(
&self,
ancestors: &Ancestors,
Expand Down Expand Up @@ -7654,30 +7633,6 @@ impl AccountsDb {
.cloned()
}

/// When reconstructing AccountsDb from a snapshot, insert the `bank_hash_stats` into the
/// internal bank hash stats map.
///
/// This fn is only called when loading from a snapshot, which means AccountsDb is new and its
/// bank hash stats map is unpopulated. Except for slot 0.
///
/// Slot 0 is a special case. When a new AccountsDb is created--like when loading from a
/// snapshot--the bank hash stats map is populated with a default entry at slot 0. Remove the
/// default entry at slot 0, and then insert the new value at `slot`.
pub fn update_bank_hash_stats_from_snapshot(
&mut self,
slot: Slot,
stats: BankHashStats,
) -> Option<BankHashStats> {
let mut bank_hash_stats = self.bank_hash_stats.lock().unwrap();
bank_hash_stats.remove(&0);
bank_hash_stats.insert(slot, stats)
}

/// Get the bank hash stats for `slot` in the `bank_hash_stats` map
pub fn get_bank_hash_stats(&self, slot: Slot) -> Option<BankHashStats> {
self.bank_hash_stats.lock().unwrap().get(&slot).cloned()
}

fn update_index<'a>(
&self,
infos: Vec<AccountInfo>,
Expand Down Expand Up @@ -7899,13 +7854,10 @@ impl AccountsDb {
);

let mut accounts_delta_hashes = self.accounts_delta_hashes.lock().unwrap();
let mut bank_hash_stats = self.bank_hash_stats.lock().unwrap();
for slot in dead_slots_iter {
accounts_delta_hashes.remove(slot);
bank_hash_stats.remove(slot);
}
drop(accounts_delta_hashes);
drop(bank_hash_stats);

measure.stop();
inc_new_counter_info!("remove_dead_slots_metadata-ms", measure.as_ms() as usize);
Expand Down Expand Up @@ -8135,29 +8087,17 @@ impl AccountsDb {
return;
}

let mut stats = BankHashStats::default();
let mut total_data = 0;
(0..accounts.len()).for_each(|index| {
accounts.account(index, |account| {
total_data += account.data().len();
stats.update(&account);
})
});

self.stats
.store_total_data
.fetch_add(total_data as u64, Ordering::Relaxed);

{
// we need to drop the bank_hash_stats lock to prevent deadlocks
self.bank_hash_stats
.lock()
.unwrap()
.entry(accounts.target_slot())
.or_default()
.accumulate(&stats);
}

// we use default hashes for now since the same account may be stored to the cache multiple times
self.store_accounts_unfrozen(
accounts,
Expand Down
42 changes: 2 additions & 40 deletions accounts-db/src/accounts_db/stats.rs
Original file line number Diff line number Diff line change
@@ -1,56 +1,18 @@
use {
crate::{
accounts_index::{AccountsIndexRootsStats, ZeroLamport},
accounts_index::AccountsIndexRootsStats,
append_vec::{
APPEND_VEC_MMAPPED_FILES_DIRTY, APPEND_VEC_MMAPPED_FILES_OPEN,
APPEND_VEC_OPEN_AS_FILE_IO,
},
},
serde::{Deserialize, Serialize},
solana_sdk::{account::ReadableAccount, timing::AtomicInterval},
solana_sdk::timing::AtomicInterval,
std::{
num::Saturating,
sync::atomic::{AtomicU64, AtomicUsize, Ordering},
},
};

#[cfg_attr(feature = "frozen-abi", derive(AbiExample))]
#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct BankHashStats {
pub num_updated_accounts: u64,
pub num_removed_accounts: u64,
pub num_lamports_stored: u64,
pub total_data_len: u64,
pub num_executable_accounts: u64,
}

impl BankHashStats {
pub fn update<T: ReadableAccount + ZeroLamport>(&mut self, account: &T) {
if account.is_zero_lamport() {
self.num_removed_accounts += 1;
} else {
self.num_updated_accounts += 1;
}
self.total_data_len = self
.total_data_len
.wrapping_add(account.data().len() as u64);
if account.executable() {
self.num_executable_accounts += 1;
}
self.num_lamports_stored = self.num_lamports_stored.wrapping_add(account.lamports());
}

pub fn accumulate(&mut self, other: &BankHashStats) {
self.num_updated_accounts += other.num_updated_accounts;
self.num_removed_accounts += other.num_removed_accounts;
self.total_data_len = self.total_data_len.wrapping_add(other.total_data_len);
self.num_lamports_stored = self
.num_lamports_stored
.wrapping_add(other.num_lamports_stored);
self.num_executable_accounts += other.num_executable_accounts;
}
}

#[derive(Debug, Default)]
pub struct AccountsStats {
pub delta_hash_scan_time_total_us: AtomicU64,
Expand Down
28 changes: 0 additions & 28 deletions accounts-db/src/accounts_db/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1089,14 +1089,12 @@ fn run_test_remove_unrooted_slot(is_cached: bool, db: AccountsDb) {
} else {
db.store_for_tests(unrooted_slot, &[(&key, &account0)]);
}
assert!(db.get_bank_hash_stats(unrooted_slot).is_some());
assert!(db.accounts_index.contains(&key));
db.assert_load_account(unrooted_slot, key, 1);

// Purge the slot
db.remove_unrooted_slots(&[(unrooted_slot, unrooted_bank_id)]);
assert!(db.load_without_fixed_root(&ancestors, &key).is_none());
assert!(db.get_bank_hash_stats(unrooted_slot).is_none());
assert!(db.accounts_cache.slot_cache(unrooted_slot).is_none());
assert!(db.storage.get_slot_storage_entry(unrooted_slot).is_none());
assert!(!db.accounts_index.contains(&key));
Expand Down Expand Up @@ -2394,32 +2392,6 @@ fn test_hash_stored_account() {
);
}

#[test]
fn test_bank_hash_stats() {
solana_logger::setup();
let db = AccountsDb::new_single_for_tests();

let key = Pubkey::default();
let some_data_len = 5;
let some_slot: Slot = 0;
let account = AccountSharedData::new(1, some_data_len, &key);
let ancestors = vec![(some_slot, 0)].into_iter().collect();

db.store_for_tests(some_slot, &[(&key, &account)]);
let mut account = db.load_without_fixed_root(&ancestors, &key).unwrap().0;
account.checked_sub_lamports(1).unwrap();
account.set_executable(true);
db.store_for_tests(some_slot, &[(&key, &account)]);
db.add_root(some_slot);

let stats = db.get_bank_hash_stats(some_slot).unwrap();
assert_eq!(stats.num_updated_accounts, 1);
assert_eq!(stats.num_removed_accounts, 1);
assert_eq!(stats.num_lamports_stored, 1);
assert_eq!(stats.total_data_len, 2 * some_data_len as u64);
assert_eq!(stats.num_executable_accounts, 1);
}

// something we can get a ref to
lazy_static! {
pub static ref EPOCH_SCHEDULE: EpochSchedule = EpochSchedule::default();
Expand Down
Loading

0 comments on commit c13ca2d

Please sign in to comment.