global: address -> addr rename

This commit is contained in:
nym21
2026-03-17 11:01:21 +01:00
parent 5609e6c010
commit f62943199c
141 changed files with 3788 additions and 3754 deletions

View File

@@ -9,7 +9,7 @@
//! | `reactivated` | Addresses that were empty and now have funds |
//! | `both` | Addresses that both sent AND received same block |
use brk_cohort::ByAddressType;
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Height, StoredU32, Version};
@@ -41,9 +41,9 @@ impl BlockActivityCounts {
/// Per-address-type activity counts - aggregated during block processing.
#[derive(Debug, Default, Deref, DerefMut)]
pub struct AddressTypeToActivityCounts(pub ByAddressType<BlockActivityCounts>);
pub struct AddrTypeToActivityCounts(pub ByAddrType<BlockActivityCounts>);
impl AddressTypeToActivityCounts {
impl AddrTypeToActivityCounts {
/// Reset all per-type counts.
pub(crate) fn reset(&mut self) {
self.0.values_mut().for_each(|v| v.reset());
@@ -163,16 +163,16 @@ impl ActivityCountVecs {
/// Per-address-type activity count vecs.
#[derive(Deref, DerefMut, Traversable)]
pub struct AddressTypeToActivityCountVecs<M: StorageMode = Rw>(ByAddressType<ActivityCountVecs<M>>);
pub struct AddrTypeToActivityCountVecs<M: StorageMode = Rw>(ByAddrType<ActivityCountVecs<M>>);
impl From<ByAddressType<ActivityCountVecs>> for AddressTypeToActivityCountVecs {
impl From<ByAddrType<ActivityCountVecs>> for AddrTypeToActivityCountVecs {
#[inline]
fn from(value: ByAddressType<ActivityCountVecs>) -> Self {
fn from(value: ByAddrType<ActivityCountVecs>) -> Self {
Self(value)
}
}
impl AddressTypeToActivityCountVecs {
impl AddrTypeToActivityCountVecs {
pub(crate) fn forced_import(
db: &Database,
name: &str,
@@ -181,7 +181,7 @@ impl AddressTypeToActivityCountVecs {
cached_starts: &CachedWindowStarts,
) -> Result<Self> {
Ok(Self::from(
ByAddressType::<ActivityCountVecs>::new_with_name(|type_name| {
ByAddrType::<ActivityCountVecs>::new_with_name(|type_name| {
ActivityCountVecs::forced_import(
db,
&format!("{type_name}_{name}"),
@@ -233,7 +233,7 @@ impl AddressTypeToActivityCountVecs {
}
#[inline(always)]
pub(crate) fn push_height(&mut self, counts: &AddressTypeToActivityCounts) {
pub(crate) fn push_height(&mut self, counts: &AddrTypeToActivityCounts) {
for (vecs, c) in self.0.values_mut().zip(counts.0.values()) {
vecs.push_height(c);
}
@@ -242,13 +242,13 @@ impl AddressTypeToActivityCountVecs {
/// Storage for activity metrics (global + per type).
#[derive(Traversable)]
pub struct AddressActivityVecs<M: StorageMode = Rw> {
pub struct AddrActivityVecs<M: StorageMode = Rw> {
pub all: ActivityCountVecs<M>,
#[traversable(flatten)]
pub by_address_type: AddressTypeToActivityCountVecs<M>,
pub by_addr_type: AddrTypeToActivityCountVecs<M>,
}
impl AddressActivityVecs {
impl AddrActivityVecs {
pub(crate) fn forced_import(
db: &Database,
name: &str,
@@ -258,7 +258,7 @@ impl AddressActivityVecs {
) -> Result<Self> {
Ok(Self {
all: ActivityCountVecs::forced_import(db, name, version, indexes, cached_starts)?,
by_address_type: AddressTypeToActivityCountVecs::forced_import(
by_addr_type: AddrTypeToActivityCountVecs::forced_import(
db, name, version, indexes, cached_starts,
)?,
})
@@ -267,7 +267,7 @@ impl AddressActivityVecs {
pub(crate) fn min_stateful_len(&self) -> usize {
self.all
.min_stateful_len()
.min(self.by_address_type.min_stateful_len())
.min(self.by_addr_type.min_stateful_len())
}
pub(crate) fn par_iter_height_mut(
@@ -275,12 +275,12 @@ impl AddressActivityVecs {
) -> impl ParallelIterator<Item = &mut dyn AnyStoredVec> {
self.all
.par_iter_height_mut()
.chain(self.by_address_type.par_iter_height_mut())
.chain(self.by_addr_type.par_iter_height_mut())
}
pub(crate) fn reset_height(&mut self) -> Result<()> {
self.all.reset_height()?;
self.by_address_type.reset_height()?;
self.by_addr_type.reset_height()?;
Ok(())
}
@@ -290,14 +290,14 @@ impl AddressActivityVecs {
exit: &Exit,
) -> Result<()> {
self.all.compute_rest(max_from, exit)?;
self.by_address_type.compute_rest(max_from, exit)?;
self.by_addr_type.compute_rest(max_from, exit)?;
Ok(())
}
#[inline(always)]
pub(crate) fn push_height(&mut self, counts: &AddressTypeToActivityCounts) {
pub(crate) fn push_height(&mut self, counts: &AddrTypeToActivityCounts) {
let totals = counts.totals();
self.all.push_height(&totals);
self.by_address_type.push_height(counts);
self.by_addr_type.push_height(counts);
}
}

View File

@@ -1,4 +1,4 @@
use brk_cohort::ByAddressType;
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Height, Indexes, StoredU64, Version};
@@ -12,11 +12,11 @@ use vecdb::{
use crate::{indexes, internal::PerBlock};
#[derive(Deref, DerefMut, Traversable)]
pub struct AddressCountVecs<M: StorageMode = Rw>(
pub struct AddrCountVecs<M: StorageMode = Rw>(
#[traversable(flatten)] pub PerBlock<StoredU64, M>,
);
impl AddressCountVecs {
impl AddrCountVecs {
pub(crate) fn forced_import(
db: &Database,
name: &str,
@@ -31,20 +31,20 @@ impl AddressCountVecs {
/// Address count per address type (runtime state).
#[derive(Debug, Default, Deref, DerefMut)]
pub struct AddressTypeToAddressCount(ByAddressType<u64>);
pub struct AddrTypeToAddrCount(ByAddrType<u64>);
impl AddressTypeToAddressCount {
impl AddrTypeToAddrCount {
#[inline]
pub(crate) fn sum(&self) -> u64 {
self.0.values().sum()
}
}
impl From<(&AddressTypeToAddressCountVecs, Height)> for AddressTypeToAddressCount {
impl From<(&AddrTypeToAddrCountVecs, Height)> for AddrTypeToAddrCount {
#[inline]
fn from((groups, starting_height): (&AddressTypeToAddressCountVecs, Height)) -> Self {
fn from((groups, starting_height): (&AddrTypeToAddrCountVecs, Height)) -> Self {
if let Some(prev_height) = starting_height.decremented() {
Self(ByAddressType {
Self(ByAddrType {
p2pk65: groups
.p2pk65
.height
@@ -102,25 +102,25 @@ impl From<(&AddressTypeToAddressCountVecs, Height)> for AddressTypeToAddressCoun
/// Address count per address type, with height + derived indexes.
#[derive(Deref, DerefMut, Traversable)]
pub struct AddressTypeToAddressCountVecs<M: StorageMode = Rw>(ByAddressType<AddressCountVecs<M>>);
pub struct AddrTypeToAddrCountVecs<M: StorageMode = Rw>(ByAddrType<AddrCountVecs<M>>);
impl From<ByAddressType<AddressCountVecs>> for AddressTypeToAddressCountVecs {
impl From<ByAddrType<AddrCountVecs>> for AddrTypeToAddrCountVecs {
#[inline]
fn from(value: ByAddressType<AddressCountVecs>) -> Self {
fn from(value: ByAddrType<AddrCountVecs>) -> Self {
Self(value)
}
}
impl AddressTypeToAddressCountVecs {
impl AddrTypeToAddrCountVecs {
pub(crate) fn forced_import(
db: &Database,
name: &str,
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
Ok(Self::from(ByAddressType::<AddressCountVecs>::new_with_name(
Ok(Self::from(ByAddrType::<AddrCountVecs>::new_with_name(
|type_name| {
AddressCountVecs::forced_import(db, &format!("{type_name}_{name}"), version, indexes)
AddrCountVecs::forced_import(db, &format!("{type_name}_{name}"), version, indexes)
},
)?))
}
@@ -138,8 +138,8 @@ impl AddressTypeToAddressCountVecs {
}
#[inline(always)]
pub(crate) fn push_height(&mut self, address_counts: &AddressTypeToAddressCount) {
for (vecs, &count) in self.0.values_mut().zip(address_counts.values()) {
pub(crate) fn push_height(&mut self, addr_counts: &AddrTypeToAddrCount) {
for (vecs, &count) in self.0.values_mut().zip(addr_counts.values()) {
vecs.height.push(count.into());
}
}
@@ -157,13 +157,13 @@ impl AddressTypeToAddressCountVecs {
}
#[derive(Traversable)]
pub struct AddressCountsVecs<M: StorageMode = Rw> {
pub all: AddressCountVecs<M>,
pub struct AddrCountsVecs<M: StorageMode = Rw> {
pub all: AddrCountVecs<M>,
#[traversable(flatten)]
pub by_address_type: AddressTypeToAddressCountVecs<M>,
pub by_addr_type: AddrTypeToAddrCountVecs<M>,
}
impl AddressCountsVecs {
impl AddrCountsVecs {
pub(crate) fn forced_import(
db: &Database,
name: &str,
@@ -171,32 +171,32 @@ impl AddressCountsVecs {
indexes: &indexes::Vecs,
) -> Result<Self> {
Ok(Self {
all: AddressCountVecs::forced_import(db, name, version, indexes)?,
by_address_type: AddressTypeToAddressCountVecs::forced_import(db, name, version, indexes)?,
all: AddrCountVecs::forced_import(db, name, version, indexes)?,
by_addr_type: AddrTypeToAddrCountVecs::forced_import(db, name, version, indexes)?,
})
}
pub(crate) fn min_stateful_len(&self) -> usize {
self.all.height.len().min(self.by_address_type.min_stateful_len())
self.all.height.len().min(self.by_addr_type.min_stateful_len())
}
pub(crate) fn par_iter_height_mut(
&mut self,
) -> impl ParallelIterator<Item = &mut dyn AnyStoredVec> {
rayon::iter::once(&mut self.all.height as &mut dyn AnyStoredVec)
.chain(self.by_address_type.par_iter_height_mut())
.chain(self.by_addr_type.par_iter_height_mut())
}
pub(crate) fn reset_height(&mut self) -> Result<()> {
self.all.height.reset()?;
self.by_address_type.reset_height()?;
self.by_addr_type.reset_height()?;
Ok(())
}
#[inline(always)]
pub(crate) fn push_height(&mut self, total: u64, address_counts: &AddressTypeToAddressCount) {
pub(crate) fn push_height(&mut self, total: u64, addr_counts: &AddrTypeToAddrCount) {
self.all.height.push(total.into());
self.by_address_type.push_height(address_counts);
self.by_addr_type.push_height(addr_counts);
}
pub(crate) fn compute_rest(
@@ -204,7 +204,7 @@ impl AddressCountsVecs {
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
let sources = self.by_address_type.by_height();
let sources = self.by_addr_type.by_height();
self.all
.height
.compute_sum_of_others(starting_indexes.height, &sources, exit)?;

View File

@@ -1,19 +1,19 @@
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{
EmptyAddressData, EmptyAddressIndex, FundedAddressData, FundedAddressIndex, Height,
EmptyAddrData, EmptyAddrIndex, FundedAddrData, FundedAddrIndex, Height,
};
use rayon::prelude::*;
use vecdb::{AnyStoredVec, BytesVec, Rw, Stamp, StorageMode, WritableVec};
/// Storage for both funded and empty address data.
#[derive(Traversable)]
pub struct AddressesDataVecs<M: StorageMode = Rw> {
pub funded: M::Stored<BytesVec<FundedAddressIndex, FundedAddressData>>,
pub empty: M::Stored<BytesVec<EmptyAddressIndex, EmptyAddressData>>,
pub struct AddrsDataVecs<M: StorageMode = Rw> {
pub funded: M::Stored<BytesVec<FundedAddrIndex, FundedAddrData>>,
pub empty: M::Stored<BytesVec<EmptyAddrIndex, EmptyAddrData>>,
}
impl AddressesDataVecs {
impl AddrsDataVecs {
/// Get minimum stamped height across funded and empty data.
pub(crate) fn min_stamped_len(&self) -> Height {
Height::from(self.funded.stamp())

View File

@@ -1,4 +1,4 @@
use brk_cohort::ByAddressType;
use brk_cohort::ByAddrType;
use brk_traversable::Traversable;
use brk_types::{BasisPointsSigned32, StoredI64, StoredU64, Version};
@@ -7,7 +7,7 @@ use crate::{
internal::{CachedWindowStarts, LazyRollingDeltasFromHeight},
};
use super::AddressCountsVecs;
use super::AddrCountsVecs;
type AddrDelta = LazyRollingDeltasFromHeight<StoredU64, StoredI64, BasisPointsSigned32>;
@@ -15,29 +15,29 @@ type AddrDelta = LazyRollingDeltasFromHeight<StoredU64, StoredI64, BasisPointsSi
pub struct DeltaVecs {
pub all: AddrDelta,
#[traversable(flatten)]
pub by_address_type: ByAddressType<AddrDelta>,
pub by_addr_type: ByAddrType<AddrDelta>,
}
impl DeltaVecs {
pub(crate) fn new(
version: Version,
address_count: &AddressCountsVecs,
addr_count: &AddrCountsVecs,
cached_starts: &CachedWindowStarts,
indexes: &indexes::Vecs,
) -> Self {
let version = version + Version::TWO;
let all = LazyRollingDeltasFromHeight::new(
"address_count",
"addr_count",
version,
&address_count.all.0.height,
&addr_count.all.0.height,
cached_starts,
indexes,
);
let by_address_type = address_count.by_address_type.map_with_name(|name, addr| {
let by_addr_type = addr_count.by_addr_type.map_with_name(|name, addr| {
LazyRollingDeltasFromHeight::new(
&format!("{name}_address_count"),
&format!("{name}_addr_count"),
version,
&addr.0.height,
cached_starts,
@@ -47,7 +47,7 @@ impl DeltaVecs {
Self {
all,
by_address_type,
by_addr_type,
}
}
}

View File

@@ -1,11 +1,11 @@
use std::thread;
use brk_cohort::ByAddressType;
use brk_cohort::ByAddrType;
use brk_error::{Error, Result};
use brk_traversable::Traversable;
use brk_types::{
AnyAddressIndex, Height, OutputType, P2AAddressIndex, P2PK33AddressIndex, P2PK65AddressIndex,
P2PKHAddressIndex, P2SHAddressIndex, P2TRAddressIndex, P2WPKHAddressIndex, P2WSHAddressIndex,
AnyAddrIndex, Height, OutputType, P2AAddrIndex, P2PK33AddrIndex, P2PK65AddrIndex,
P2PKHAddrIndex, P2SHAddrIndex, P2TRAddrIndex, P2WPKHAddrIndex, P2WSHAddrIndex,
TypeIndex, Version,
};
use rayon::prelude::*;
@@ -15,24 +15,24 @@ use vecdb::{
Rw, Stamp, StorageMode, WritableVec,
};
use super::super::AddressTypeToTypeIndexMap;
use super::super::AddrTypeToTypeIndexMap;
const SAVED_STAMPED_CHANGES: u16 = 10;
/// Macro to define AnyAddressIndexesVecs and its methods.
macro_rules! define_any_address_indexes_vecs {
/// Macro to define AnyAddrIndexesVecs and its methods.
macro_rules! define_any_addr_indexes_vecs {
($(($field:ident, $variant:ident, $index:ty)),* $(,)?) => {
#[derive(Traversable)]
pub struct AnyAddressIndexesVecs<M: StorageMode = Rw> {
$(pub $field: M::Stored<BytesVec<$index, AnyAddressIndex>>,)*
pub struct AnyAddrIndexesVecs<M: StorageMode = Rw> {
$(pub $field: M::Stored<BytesVec<$index, AnyAddrIndex>>,)*
}
impl AnyAddressIndexesVecs {
impl AnyAddrIndexesVecs {
/// Import from database.
pub(crate) fn forced_import(db: &Database, version: Version) -> Result<Self> {
Ok(Self {
$($field: BytesVec::forced_import_with(
ImportOptions::new(db, "any_address_index", version)
ImportOptions::new(db, "any_addr_index", version)
.with_saved_stamped_changes(SAVED_STAMPED_CHANGES),
)?,)*
})
@@ -59,10 +59,10 @@ macro_rules! define_any_address_indexes_vecs {
/// Get address index for a given type and type_index.
/// Uses get_any_or_read_at to check updated layer (needed after rollback).
pub(crate) fn get(&self, address_type: OutputType, type_index: TypeIndex, reader: &Reader) -> Result<AnyAddressIndex> {
match address_type {
pub(crate) fn get(&self, addr_type: OutputType, type_index: TypeIndex, reader: &Reader) -> Result<AnyAddrIndex> {
match addr_type {
$(OutputType::$variant => Ok(self.$field.get_any_or_read_at(type_index.into(), reader)?.unwrap()),)*
_ => unreachable!("Invalid address type: {:?}", address_type),
_ => unreachable!("Invalid addr type: {:?}", addr_type),
}
}
@@ -72,14 +72,14 @@ macro_rules! define_any_address_indexes_vecs {
}
}
impl<M: StorageMode> AnyAddressIndexesVecs<M> {
impl<M: StorageMode> AnyAddrIndexesVecs<M> {
/// Get address index with single read (no caching).
pub fn get_once(&self, address_type: OutputType, type_index: TypeIndex) -> Result<AnyAddressIndex> {
match address_type {
pub fn get_once(&self, addr_type: OutputType, type_index: TypeIndex) -> Result<AnyAddrIndex> {
match addr_type {
$(OutputType::$variant => self.$field
.collect_one(<$index>::from(usize::from(type_index)))
.ok_or_else(|| Error::UnsupportedType(address_type.to_string())),)*
_ => Err(Error::UnsupportedType(address_type.to_string())),
.ok_or_else(|| Error::UnsupportedType(addr_type.to_string())),)*
_ => Err(Error::UnsupportedType(addr_type.to_string())),
}
}
}
@@ -87,28 +87,28 @@ macro_rules! define_any_address_indexes_vecs {
}
// Generate the struct and methods
define_any_address_indexes_vecs!(
(p2a, P2A, P2AAddressIndex),
(p2pk33, P2PK33, P2PK33AddressIndex),
(p2pk65, P2PK65, P2PK65AddressIndex),
(p2pkh, P2PKH, P2PKHAddressIndex),
(p2sh, P2SH, P2SHAddressIndex),
(p2tr, P2TR, P2TRAddressIndex),
(p2wpkh, P2WPKH, P2WPKHAddressIndex),
(p2wsh, P2WSH, P2WSHAddressIndex),
define_any_addr_indexes_vecs!(
(p2a, P2A, P2AAddrIndex),
(p2pk33, P2PK33, P2PK33AddrIndex),
(p2pk65, P2PK65, P2PK65AddrIndex),
(p2pkh, P2PKH, P2PKHAddrIndex),
(p2sh, P2SH, P2SHAddrIndex),
(p2tr, P2TR, P2TRAddrIndex),
(p2wpkh, P2WPKH, P2WPKHAddrIndex),
(p2wsh, P2WSH, P2WSHAddrIndex),
);
impl AnyAddressIndexesVecs {
impl AnyAddrIndexesVecs {
/// Process index updates in parallel by address type.
/// Accepts two maps (e.g. from empty and funded processing) and merges per-thread.
/// Updates existing entries and pushes new ones (sorted).
/// Returns (update_count, push_count).
pub(crate) fn par_batch_update(
&mut self,
updates1: AddressTypeToTypeIndexMap<AnyAddressIndex>,
updates2: AddressTypeToTypeIndexMap<AnyAddressIndex>,
updates1: AddrTypeToTypeIndexMap<AnyAddrIndex>,
updates2: AddrTypeToTypeIndexMap<AnyAddrIndex>,
) -> Result<(usize, usize)> {
let ByAddressType {
let ByAddrType {
p2a: u1_p2a,
p2pk33: u1_p2pk33,
p2pk65: u1_p2pk65,
@@ -119,7 +119,7 @@ impl AnyAddressIndexesVecs {
p2wsh: u1_p2wsh,
} = updates1.into_inner();
let ByAddressType {
let ByAddrType {
p2a: u2_p2a,
p2pk33: u2_p2pk33,
p2pk65: u2_p2pk65,
@@ -169,9 +169,9 @@ impl AnyAddressIndexesVecs {
/// Process updates for a single address type's BytesVec, merging two maps.
fn process_single_type_merged<I: vecdb::VecIndex>(
vec: &mut BytesVec<I, AnyAddressIndex>,
map1: FxHashMap<TypeIndex, AnyAddressIndex>,
map2: FxHashMap<TypeIndex, AnyAddressIndex>,
vec: &mut BytesVec<I, AnyAddrIndex>,
map1: FxHashMap<TypeIndex, AnyAddrIndex>,
map2: FxHashMap<TypeIndex, AnyAddrIndex>,
) -> Result<(usize, usize)> {
let current_len = vec.len();
let mut pushes = Vec::with_capacity(map1.len() + map2.len());

View File

@@ -0,0 +1,17 @@
mod activity;
mod addr_count;
mod data;
mod delta;
mod indexes;
mod new_addr_count;
mod total_addr_count;
mod type_map;
pub use activity::{AddrActivityVecs, AddrTypeToActivityCounts};
pub use addr_count::{AddrCountsVecs, AddrTypeToAddrCount};
pub use data::AddrsDataVecs;
pub use delta::DeltaVecs;
pub use indexes::AnyAddrIndexesVecs;
pub use new_addr_count::NewAddrCountVecs;
pub use total_addr_count::TotalAddrCountVecs;
pub use type_map::{AddrTypeToTypeIndexMap, AddrTypeToVec, HeightToAddrTypeToVec};

View File

@@ -1,4 +1,4 @@
use brk_cohort::ByAddressType;
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Height, StoredU64, Version};
@@ -9,17 +9,17 @@ use crate::{
internal::{CachedWindowStarts, PerBlockCumulativeWithSums},
};
use super::TotalAddressCountVecs;
use super::TotalAddrCountVecs;
/// New address count per block (global + per-type)
#[derive(Traversable)]
pub struct NewAddressCountVecs<M: StorageMode = Rw> {
pub struct NewAddrCountVecs<M: StorageMode = Rw> {
pub all: PerBlockCumulativeWithSums<StoredU64, StoredU64, M>,
#[traversable(flatten)]
pub by_address_type: ByAddressType<PerBlockCumulativeWithSums<StoredU64, StoredU64, M>>,
pub by_addr_type: ByAddrType<PerBlockCumulativeWithSums<StoredU64, StoredU64, M>>,
}
impl NewAddressCountVecs {
impl NewAddrCountVecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
@@ -28,16 +28,16 @@ impl NewAddressCountVecs {
) -> Result<Self> {
let all = PerBlockCumulativeWithSums::forced_import(
db,
"new_address_count",
"new_addr_count",
version,
indexes,
cached_starts,
)?;
let by_address_type = ByAddressType::new_with_name(|name| {
let by_addr_type = ByAddrType::new_with_name(|name| {
PerBlockCumulativeWithSums::forced_import(
db,
&format!("{name}_new_address_count"),
&format!("{name}_new_addr_count"),
version,
indexes,
cached_starts,
@@ -46,24 +46,24 @@ impl NewAddressCountVecs {
Ok(Self {
all,
by_address_type,
by_addr_type,
})
}
pub(crate) fn compute(
&mut self,
max_from: Height,
total_address_count: &TotalAddressCountVecs,
total_addr_count: &TotalAddrCountVecs,
exit: &Exit,
) -> Result<()> {
self.all.compute(max_from, exit, |height_vec| {
Ok(height_vec.compute_change(max_from, &total_address_count.all.height, 1, exit)?)
Ok(height_vec.compute_change(max_from, &total_addr_count.all.height, 1, exit)?)
})?;
for ((_, new), (_, total)) in self
.by_address_type
.by_addr_type
.iter_mut()
.zip(total_address_count.by_address_type.iter())
.zip(total_addr_count.by_addr_type.iter())
{
new.compute(max_from, exit, |height_vec| {
Ok(height_vec.compute_change(max_from, &total.height, 1, exit)?)

View File

@@ -1,4 +1,4 @@
use brk_cohort::ByAddressType;
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Height, StoredU64, Version};
@@ -6,29 +6,29 @@ use vecdb::{Database, Exit, Rw, StorageMode};
use crate::{indexes, internal::PerBlock};
use super::AddressCountsVecs;
use super::AddrCountsVecs;
/// Total address count (global + per-type) with all derived indexes
#[derive(Traversable)]
pub struct TotalAddressCountVecs<M: StorageMode = Rw> {
pub struct TotalAddrCountVecs<M: StorageMode = Rw> {
pub all: PerBlock<StoredU64, M>,
#[traversable(flatten)]
pub by_address_type: ByAddressType<PerBlock<StoredU64, M>>,
pub by_addr_type: ByAddrType<PerBlock<StoredU64, M>>,
}
impl TotalAddressCountVecs {
impl TotalAddrCountVecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
let all = PerBlock::forced_import(db, "total_address_count", version, indexes)?;
let all = PerBlock::forced_import(db, "total_addr_count", version, indexes)?;
let by_address_type: ByAddressType<PerBlock<StoredU64>> =
ByAddressType::new_with_name(|name| {
let by_addr_type: ByAddrType<PerBlock<StoredU64>> =
ByAddrType::new_with_name(|name| {
PerBlock::forced_import(
db,
&format!("{name}_total_address_count"),
&format!("{name}_total_addr_count"),
version,
indexes,
)
@@ -36,30 +36,30 @@ impl TotalAddressCountVecs {
Ok(Self {
all,
by_address_type,
by_addr_type,
})
}
/// Eagerly compute total = address_count + empty_address_count.
/// Eagerly compute total = addr_count + empty_addr_count.
pub(crate) fn compute(
&mut self,
max_from: Height,
address_count: &AddressCountsVecs,
empty_address_count: &AddressCountsVecs,
addr_count: &AddrCountsVecs,
empty_addr_count: &AddrCountsVecs,
exit: &Exit,
) -> Result<()> {
self.all.height.compute_add(
max_from,
&address_count.all.height,
&empty_address_count.all.height,
&addr_count.all.height,
&empty_addr_count.all.height,
exit,
)?;
for ((_, total), ((_, addr), (_, empty))) in self.by_address_type.iter_mut().zip(
address_count
.by_address_type
for ((_, total), ((_, addr), (_, empty))) in self.by_addr_type.iter_mut().zip(
addr_count
.by_addr_type
.iter()
.zip(empty_address_count.by_address_type.iter()),
.zip(empty_addr_count.by_addr_type.iter()),
) {
total
.height

View File

@@ -2,13 +2,13 @@ use brk_types::Height;
use derive_more::{Deref, DerefMut};
use rustc_hash::FxHashMap;
use super::vec::AddressTypeToVec;
use super::vec::AddrTypeToVec;
/// Hashmap from Height to AddressTypeToVec.
/// Hashmap from Height to AddrTypeToVec.
#[derive(Debug, Default, Deref, DerefMut)]
pub struct HeightToAddressTypeToVec<T>(FxHashMap<Height, AddressTypeToVec<T>>);
pub struct HeightToAddrTypeToVec<T>(FxHashMap<Height, AddrTypeToVec<T>>);
impl<T> HeightToAddressTypeToVec<T> {
impl<T> HeightToAddrTypeToVec<T> {
/// Create with pre-allocated capacity for unique heights.
pub(crate) fn with_capacity(capacity: usize) -> Self {
Self(FxHashMap::with_capacity_and_hasher(
@@ -18,9 +18,9 @@ impl<T> HeightToAddressTypeToVec<T> {
}
}
impl<T> HeightToAddressTypeToVec<T> {
/// Consume and iterate over (Height, AddressTypeToVec) pairs.
pub(crate) fn into_iter(self) -> impl Iterator<Item = (Height, AddressTypeToVec<T>)> {
impl<T> HeightToAddrTypeToVec<T> {
/// Consume and iterate over (Height, AddrTypeToVec) pairs.
pub(crate) fn into_iter(self) -> impl Iterator<Item = (Height, AddrTypeToVec<T>)> {
self.0.into_iter()
}
}

View File

@@ -1,6 +1,6 @@
use std::{collections::hash_map::Entry, mem};
use brk_cohort::ByAddressType;
use brk_cohort::ByAddrType;
use brk_types::{OutputType, TypeIndex};
use derive_more::{Deref, DerefMut};
use rustc_hash::FxHashMap;
@@ -8,11 +8,11 @@ use smallvec::{Array, SmallVec};
/// A hashmap for each address type, keyed by TypeIndex.
#[derive(Debug, Deref, DerefMut)]
pub struct AddressTypeToTypeIndexMap<T>(ByAddressType<FxHashMap<TypeIndex, T>>);
pub struct AddrTypeToTypeIndexMap<T>(ByAddrType<FxHashMap<TypeIndex, T>>);
impl<T> Default for AddressTypeToTypeIndexMap<T> {
impl<T> Default for AddrTypeToTypeIndexMap<T> {
fn default() -> Self {
Self(ByAddressType {
Self(ByAddrType {
p2a: FxHashMap::default(),
p2pk33: FxHashMap::default(),
p2pk65: FxHashMap::default(),
@@ -25,10 +25,10 @@ impl<T> Default for AddressTypeToTypeIndexMap<T> {
}
}
impl<T> AddressTypeToTypeIndexMap<T> {
impl<T> AddrTypeToTypeIndexMap<T> {
/// Create with pre-allocated capacity per address type.
pub(crate) fn with_capacity(capacity: usize) -> Self {
Self(ByAddressType {
Self(ByAddrType {
p2a: FxHashMap::with_capacity_and_hasher(capacity, Default::default()),
p2pk33: FxHashMap::with_capacity_and_hasher(capacity, Default::default()),
p2pk65: FxHashMap::with_capacity_and_hasher(capacity, Default::default()),
@@ -62,11 +62,11 @@ impl<T> AddressTypeToTypeIndexMap<T> {
/// Insert a value for a specific address type and type_index.
pub(crate) fn insert_for_type(
&mut self,
address_type: OutputType,
addr_type: OutputType,
type_index: TypeIndex,
value: T,
) {
self.get_mut(address_type).unwrap().insert(type_index, value);
self.get_mut(addr_type).unwrap().insert(type_index, value);
}
/// Consume and iterate over entries by address type.
@@ -75,8 +75,8 @@ impl<T> AddressTypeToTypeIndexMap<T> {
self.0.into_iter()
}
/// Consume and return the inner ByAddressType.
pub(crate) fn into_inner(self) -> ByAddressType<FxHashMap<TypeIndex, T>> {
/// Consume and return the inner ByAddrType.
pub(crate) fn into_inner(self) -> ByAddrType<FxHashMap<TypeIndex, T>> {
self.0
}
@@ -88,14 +88,14 @@ impl<T> AddressTypeToTypeIndexMap<T> {
}
}
impl<T> AddressTypeToTypeIndexMap<SmallVec<T>>
impl<T> AddrTypeToTypeIndexMap<SmallVec<T>>
where
T: Array,
{
/// Merge two maps of SmallVec values, concatenating vectors.
pub(crate) fn merge_vec(mut self, other: Self) -> Self {
for (address_type, other_map) in other.0.into_iter() {
let self_map = self.0.get_mut_unwrap(address_type);
for (addr_type, other_map) in other.0.into_iter() {
let self_map = self.0.get_mut_unwrap(addr_type);
for (type_index, mut other_vec) in other_map {
match self_map.entry(type_index) {
Entry::Occupied(mut entry) => {

View File

@@ -1,13 +1,13 @@
use brk_cohort::ByAddressType;
use brk_cohort::ByAddrType;
use derive_more::{Deref, DerefMut};
/// A vector for each address type.
#[derive(Debug, Deref, DerefMut)]
pub struct AddressTypeToVec<T>(ByAddressType<Vec<T>>);
pub struct AddrTypeToVec<T>(ByAddrType<Vec<T>>);
impl<T> Default for AddressTypeToVec<T> {
impl<T> Default for AddrTypeToVec<T> {
fn default() -> Self {
Self(ByAddressType {
Self(ByAddrType {
p2a: vec![],
p2pk33: vec![],
p2pk65: vec![],
@@ -20,10 +20,10 @@ impl<T> Default for AddressTypeToVec<T> {
}
}
impl<T> AddressTypeToVec<T> {
impl<T> AddrTypeToVec<T> {
/// Create with pre-allocated capacity per address type.
pub(crate) fn with_capacity(capacity: usize) -> Self {
Self(ByAddressType {
Self(ByAddrType {
p2a: Vec::with_capacity(capacity),
p2pk33: Vec::with_capacity(capacity),
p2pk65: Vec::with_capacity(capacity),
@@ -36,9 +36,9 @@ impl<T> AddressTypeToVec<T> {
}
}
impl<T> AddressTypeToVec<T> {
/// Unwrap the inner ByAddressType.
pub(crate) fn unwrap(self) -> ByAddressType<Vec<T>> {
impl<T> AddrTypeToVec<T> {
/// Unwrap the inner ByAddrType.
pub(crate) fn unwrap(self) -> ByAddrType<Vec<T>> {
self.0
}
}

View File

@@ -1,17 +0,0 @@
mod activity;
mod address_count;
mod data;
mod delta;
mod indexes;
mod new_address_count;
mod total_address_count;
mod type_map;
pub use activity::{AddressActivityVecs, AddressTypeToActivityCounts};
pub use address_count::{AddressCountsVecs, AddressTypeToAddressCount};
pub use data::AddressesDataVecs;
pub use delta::DeltaVecs;
pub use indexes::AnyAddressIndexesVecs;
pub use new_address_count::NewAddressCountVecs;
pub use total_address_count::TotalAddressCountVecs;
pub use type_map::{AddressTypeToTypeIndexMap, AddressTypeToVec, HeightToAddressTypeToVec};

View File

@@ -0,0 +1,138 @@
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_types::{
AnyAddrDataIndexEnum, EmptyAddrData, FundedAddrData, OutputType, TxIndex, TypeIndex,
};
use smallvec::SmallVec;
use crate::distribution::{
addr::{AddrTypeToTypeIndexMap, AddrsDataVecs, AnyAddrIndexesVecs},
compute::VecsReaders,
};
use super::super::cohort::{WithAddrDataSource, update_tx_counts};
use super::lookup::AddrLookup;
/// Cache for address data within a flush interval.
pub struct AddrCache {
/// Addrs with non-zero balance
funded: AddrTypeToTypeIndexMap<WithAddrDataSource<FundedAddrData>>,
/// Addrs that became empty (zero balance)
empty: AddrTypeToTypeIndexMap<WithAddrDataSource<EmptyAddrData>>,
}
impl Default for AddrCache {
fn default() -> Self {
Self::new()
}
}
impl AddrCache {
pub(crate) fn new() -> Self {
Self {
funded: AddrTypeToTypeIndexMap::default(),
empty: AddrTypeToTypeIndexMap::default(),
}
}
/// Check if address is in cache (either funded or empty).
#[inline]
pub(crate) fn contains(&self, addr_type: OutputType, type_index: TypeIndex) -> bool {
self.funded
.get(addr_type)
.is_some_and(|m| m.contains_key(&type_index))
|| self
.empty
.get(addr_type)
.is_some_and(|m| m.contains_key(&type_index))
}
/// Merge address data into funded cache.
#[inline]
pub(crate) fn merge_funded(
&mut self,
data: AddrTypeToTypeIndexMap<WithAddrDataSource<FundedAddrData>>,
) {
self.funded.merge_mut(data);
}
/// Create an AddrLookup view into this cache.
#[inline]
pub(crate) fn as_lookup(&mut self) -> AddrLookup<'_> {
AddrLookup {
funded: &mut self.funded,
empty: &mut self.empty,
}
}
/// Update transaction counts for addresses.
pub(crate) fn update_tx_counts(
&mut self,
tx_index_vecs: AddrTypeToTypeIndexMap<SmallVec<[TxIndex; 4]>>,
) {
update_tx_counts(&mut self.funded, &mut self.empty, tx_index_vecs);
}
/// Take the cache contents for flushing, leaving empty caches.
pub(crate) fn take(
&mut self,
) -> (
AddrTypeToTypeIndexMap<WithAddrDataSource<EmptyAddrData>>,
AddrTypeToTypeIndexMap<WithAddrDataSource<FundedAddrData>>,
) {
(
std::mem::take(&mut self.empty),
std::mem::take(&mut self.funded),
)
}
}
/// Load address data from storage or create new.
///
/// Returns None if address is already in cache (funded or empty).
#[allow(clippy::too_many_arguments)]
pub(crate) fn load_uncached_addr_data(
addr_type: OutputType,
type_index: TypeIndex,
first_addr_indexes: &ByAddrType<TypeIndex>,
cache: &AddrCache,
vr: &VecsReaders,
any_addr_indexes: &AnyAddrIndexesVecs,
addrs_data: &AddrsDataVecs,
) -> Result<Option<WithAddrDataSource<FundedAddrData>>> {
// Check if this is a new address (type_index >= first for this height)
let first = *first_addr_indexes.get(addr_type).unwrap();
if first <= type_index {
return Ok(Some(WithAddrDataSource::New(
FundedAddrData::default(),
)));
}
// Skip if already in cache
if cache.contains(addr_type, type_index) {
return Ok(None);
}
// Read from storage
let reader = vr.addr_reader(addr_type);
let any_addr_index = any_addr_indexes.get(addr_type, type_index, reader)?;
Ok(Some(match any_addr_index.to_enum() {
AnyAddrDataIndexEnum::Funded(funded_index) => {
let reader = &vr.any_addr_index_to_any_addr_data.funded;
let funded_data = addrs_data
.funded
.get_any_or_read_at(funded_index.into(), reader)?
.unwrap();
WithAddrDataSource::FromFunded(funded_index, funded_data)
}
AnyAddrDataIndexEnum::Empty(empty_index) => {
let reader = &vr.any_addr_index_to_any_addr_data.empty;
let empty_data = addrs_data
.empty
.get_any_or_read_at(empty_index.into(), reader)?
.unwrap();
WithAddrDataSource::FromEmpty(empty_index, empty_data.into())
}
}))
}

View File

@@ -1,138 +0,0 @@
use brk_cohort::ByAddressType;
use brk_error::Result;
use brk_types::{
AnyAddressDataIndexEnum, EmptyAddressData, FundedAddressData, OutputType, TxIndex, TypeIndex,
};
use smallvec::SmallVec;
use crate::distribution::{
address::{AddressTypeToTypeIndexMap, AddressesDataVecs, AnyAddressIndexesVecs},
compute::VecsReaders,
};
use super::super::cohort::{WithAddressDataSource, update_tx_counts};
use super::lookup::AddressLookup;
/// Cache for address data within a flush interval.
pub struct AddressCache {
/// Addresses with non-zero balance
funded: AddressTypeToTypeIndexMap<WithAddressDataSource<FundedAddressData>>,
/// Addresses that became empty (zero balance)
empty: AddressTypeToTypeIndexMap<WithAddressDataSource<EmptyAddressData>>,
}
impl Default for AddressCache {
fn default() -> Self {
Self::new()
}
}
impl AddressCache {
pub(crate) fn new() -> Self {
Self {
funded: AddressTypeToTypeIndexMap::default(),
empty: AddressTypeToTypeIndexMap::default(),
}
}
/// Check if address is in cache (either funded or empty).
#[inline]
pub(crate) fn contains(&self, address_type: OutputType, type_index: TypeIndex) -> bool {
self.funded
.get(address_type)
.is_some_and(|m| m.contains_key(&type_index))
|| self
.empty
.get(address_type)
.is_some_and(|m| m.contains_key(&type_index))
}
/// Merge address data into funded cache.
#[inline]
pub(crate) fn merge_funded(
&mut self,
data: AddressTypeToTypeIndexMap<WithAddressDataSource<FundedAddressData>>,
) {
self.funded.merge_mut(data);
}
/// Create an AddressLookup view into this cache.
#[inline]
pub(crate) fn as_lookup(&mut self) -> AddressLookup<'_> {
AddressLookup {
funded: &mut self.funded,
empty: &mut self.empty,
}
}
/// Update transaction counts for addresses.
pub(crate) fn update_tx_counts(
&mut self,
tx_index_vecs: AddressTypeToTypeIndexMap<SmallVec<[TxIndex; 4]>>,
) {
update_tx_counts(&mut self.funded, &mut self.empty, tx_index_vecs);
}
/// Take the cache contents for flushing, leaving empty caches.
pub(crate) fn take(
&mut self,
) -> (
AddressTypeToTypeIndexMap<WithAddressDataSource<EmptyAddressData>>,
AddressTypeToTypeIndexMap<WithAddressDataSource<FundedAddressData>>,
) {
(
std::mem::take(&mut self.empty),
std::mem::take(&mut self.funded),
)
}
}
/// Load address data from storage or create new.
///
/// Returns None if address is already in cache (funded or empty).
#[allow(clippy::too_many_arguments)]
pub(crate) fn load_uncached_address_data(
address_type: OutputType,
type_index: TypeIndex,
first_address_indexes: &ByAddressType<TypeIndex>,
cache: &AddressCache,
vr: &VecsReaders,
any_address_indexes: &AnyAddressIndexesVecs,
addresses_data: &AddressesDataVecs,
) -> Result<Option<WithAddressDataSource<FundedAddressData>>> {
// Check if this is a new address (type_index >= first for this height)
let first = *first_address_indexes.get(address_type).unwrap();
if first <= type_index {
return Ok(Some(WithAddressDataSource::New(
FundedAddressData::default(),
)));
}
// Skip if already in cache
if cache.contains(address_type, type_index) {
return Ok(None);
}
// Read from storage
let reader = vr.address_reader(address_type);
let any_address_index = any_address_indexes.get(address_type, type_index, reader)?;
Ok(Some(match any_address_index.to_enum() {
AnyAddressDataIndexEnum::Funded(funded_index) => {
let reader = &vr.any_address_index_to_any_address_data.funded;
let funded_data = addresses_data
.funded
.get_any_or_read_at(funded_index.into(), reader)?
.unwrap();
WithAddressDataSource::FromFunded(funded_index, funded_data)
}
AnyAddressDataIndexEnum::Empty(empty_index) => {
let reader = &vr.any_address_index_to_any_address_data.empty;
let empty_data = addresses_data
.empty
.get_any_or_read_at(empty_index.into(), reader)?
.unwrap();
WithAddressDataSource::FromEmpty(empty_index, empty_data.into())
}
}))
}

View File

@@ -1,8 +1,8 @@
use brk_types::{EmptyAddressData, FundedAddressData, OutputType, TypeIndex};
use brk_types::{EmptyAddrData, FundedAddrData, OutputType, TypeIndex};
use crate::distribution::address::AddressTypeToTypeIndexMap;
use crate::distribution::addr::AddrTypeToTypeIndexMap;
use super::super::cohort::WithAddressDataSource;
use super::super::cohort::WithAddrDataSource;
/// Tracking status of an address - determines cohort update strategy.
#[derive(Clone, Copy)]
@@ -16,18 +16,18 @@ pub enum TrackingStatus {
}
/// Context for looking up and storing address data during block processing.
pub struct AddressLookup<'a> {
pub funded: &'a mut AddressTypeToTypeIndexMap<WithAddressDataSource<FundedAddressData>>,
pub empty: &'a mut AddressTypeToTypeIndexMap<WithAddressDataSource<EmptyAddressData>>,
pub struct AddrLookup<'a> {
pub funded: &'a mut AddrTypeToTypeIndexMap<WithAddrDataSource<FundedAddrData>>,
pub empty: &'a mut AddrTypeToTypeIndexMap<WithAddrDataSource<EmptyAddrData>>,
}
impl<'a> AddressLookup<'a> {
impl<'a> AddrLookup<'a> {
pub(crate) fn get_or_create_for_receive(
&mut self,
output_type: OutputType,
type_index: TypeIndex,
) -> (
&mut WithAddressDataSource<FundedAddressData>,
&mut WithAddrDataSource<FundedAddrData>,
TrackingStatus,
) {
use std::collections::hash_map::Entry;
@@ -36,7 +36,7 @@ impl<'a> AddressLookup<'a> {
match map.entry(type_index) {
Entry::Occupied(entry) => {
// Address is in cache. Need to determine if it's been processed
// Addr is in cache. Need to determine if it's been processed
// by process_received (added to a cohort) or just funded this block.
//
// - If wrapper is New AND funded_txo_count == 0: hasn't received yet,
@@ -47,15 +47,15 @@ impl<'a> AddressLookup<'a> {
// - If wrapper is FromEmpty AND utxo_count == 0: still empty → WasEmpty
// - If wrapper is FromEmpty AND utxo_count > 0: already received → Tracked
let status = match entry.get() {
WithAddressDataSource::New(data) => {
WithAddrDataSource::New(data) => {
if data.funded_txo_count == 0 {
TrackingStatus::New
} else {
TrackingStatus::Tracked
}
}
WithAddressDataSource::FromFunded(..) => TrackingStatus::Tracked,
WithAddressDataSource::FromEmpty(_, data) => {
WithAddrDataSource::FromFunded(..) => TrackingStatus::Tracked,
WithAddrDataSource::FromEmpty(_, data) => {
if data.utxo_count() == 0 {
TrackingStatus::WasEmpty
} else {
@@ -72,7 +72,7 @@ impl<'a> AddressLookup<'a> {
return (entry.insert(empty_data.into()), TrackingStatus::WasEmpty);
}
(
entry.insert(WithAddressDataSource::New(FundedAddressData::default())),
entry.insert(WithAddrDataSource::New(FundedAddrData::default())),
TrackingStatus::New,
)
}
@@ -84,12 +84,12 @@ impl<'a> AddressLookup<'a> {
&mut self,
output_type: OutputType,
type_index: TypeIndex,
) -> &mut WithAddressDataSource<FundedAddressData> {
) -> &mut WithAddrDataSource<FundedAddrData> {
self.funded
.get_mut(output_type)
.unwrap()
.get_mut(&type_index)
.expect("Address must exist for send")
.expect("Addr must exist for send")
}
/// Move address from funded to empty set.

View File

@@ -1,5 +1,5 @@
mod address;
mod addr;
mod lookup;
pub use address::*;
pub use addr::*;
pub use lookup::*;

View File

@@ -0,0 +1,152 @@
use brk_error::Result;
use brk_types::{
AnyAddrIndex, EmptyAddrData, EmptyAddrIndex, FundedAddrData, FundedAddrIndex,
OutputType, TypeIndex,
};
use vecdb::AnyVec;
use crate::distribution::{AddrTypeToTypeIndexMap, AddrsDataVecs};
use super::with_source::WithAddrDataSource;
/// Process funded address data updates.
///
/// Handles:
/// - New funded address: push to funded storage
/// - Updated funded address (was funded): update in place
/// - Transition empty -> funded: delete from empty, push to funded
pub(crate) fn process_funded_addrs(
addrs_data: &mut AddrsDataVecs,
funded_updates: AddrTypeToTypeIndexMap<WithAddrDataSource<FundedAddrData>>,
) -> Result<AddrTypeToTypeIndexMap<AnyAddrIndex>> {
let total: usize = funded_updates.iter().map(|(_, m)| m.len()).sum();
let mut updates: Vec<(FundedAddrIndex, FundedAddrData)> = Vec::with_capacity(total);
let mut deletes: Vec<EmptyAddrIndex> = Vec::with_capacity(total);
let mut pushes: Vec<(OutputType, TypeIndex, FundedAddrData)> = Vec::with_capacity(total);
for (addr_type, items) in funded_updates.into_iter() {
for (type_index, source) in items {
match source {
WithAddrDataSource::New(data) => {
pushes.push((addr_type, type_index, data));
}
WithAddrDataSource::FromFunded(index, data) => {
updates.push((index, data));
}
WithAddrDataSource::FromEmpty(empty_index, data) => {
deletes.push(empty_index);
pushes.push((addr_type, type_index, data));
}
}
}
}
// Phase 1: Deletes (creates holes)
for empty_index in deletes {
addrs_data.empty.delete(empty_index);
}
// Phase 2: Updates (in-place)
for (index, data) in updates {
addrs_data.funded.update(index, data)?;
}
// Phase 3: Pushes (fill holes first, then pure pushes)
let mut result = AddrTypeToTypeIndexMap::with_capacity(pushes.len() / 4);
let holes_count = addrs_data.funded.holes().len();
let mut pushes_iter = pushes.into_iter();
for (addr_type, type_index, data) in pushes_iter.by_ref().take(holes_count) {
let index = addrs_data.funded.fill_first_hole_or_push(data)?;
result
.get_mut(addr_type)
.unwrap()
.insert(type_index, AnyAddrIndex::from(index));
}
// Pure pushes - no holes remain
addrs_data.funded.reserve_pushed(pushes_iter.len());
let mut next_index = addrs_data.funded.len();
for (addr_type, type_index, data) in pushes_iter {
addrs_data.funded.push(data);
result.get_mut(addr_type).unwrap().insert(
type_index,
AnyAddrIndex::from(FundedAddrIndex::from(next_index)),
);
next_index += 1;
}
Ok(result)
}
/// Process empty address data updates.
///
/// Handles:
/// - New empty address: push to empty storage
/// - Updated empty address (was empty): update in place
/// - Transition funded -> empty: delete from funded, push to empty
pub(crate) fn process_empty_addrs(
addrs_data: &mut AddrsDataVecs,
empty_updates: AddrTypeToTypeIndexMap<WithAddrDataSource<EmptyAddrData>>,
) -> Result<AddrTypeToTypeIndexMap<AnyAddrIndex>> {
let total: usize = empty_updates.iter().map(|(_, m)| m.len()).sum();
let mut updates: Vec<(EmptyAddrIndex, EmptyAddrData)> = Vec::with_capacity(total);
let mut deletes: Vec<FundedAddrIndex> = Vec::with_capacity(total);
let mut pushes: Vec<(OutputType, TypeIndex, EmptyAddrData)> = Vec::with_capacity(total);
for (addr_type, items) in empty_updates.into_iter() {
for (type_index, source) in items {
match source {
WithAddrDataSource::New(data) => {
pushes.push((addr_type, type_index, data));
}
WithAddrDataSource::FromEmpty(index, data) => {
updates.push((index, data));
}
WithAddrDataSource::FromFunded(funded_index, data) => {
deletes.push(funded_index);
pushes.push((addr_type, type_index, data));
}
}
}
}
// Phase 1: Deletes (creates holes)
for funded_index in deletes {
addrs_data.funded.delete(funded_index);
}
// Phase 2: Updates (in-place)
for (index, data) in updates {
addrs_data.empty.update(index, data)?;
}
// Phase 3: Pushes (fill holes first, then pure pushes)
let mut result = AddrTypeToTypeIndexMap::with_capacity(pushes.len() / 4);
let holes_count = addrs_data.empty.holes().len();
let mut pushes_iter = pushes.into_iter();
for (addr_type, type_index, data) in pushes_iter.by_ref().take(holes_count) {
let index = addrs_data.empty.fill_first_hole_or_push(data)?;
result
.get_mut(addr_type)
.unwrap()
.insert(type_index, AnyAddrIndex::from(index));
}
// Pure pushes - no holes remain
addrs_data.empty.reserve_pushed(pushes_iter.len());
let mut next_index = addrs_data.empty.len();
for (addr_type, type_index, data) in pushes_iter {
addrs_data.empty.push(data);
result.get_mut(addr_type).unwrap().insert(
type_index,
AnyAddrIndex::from(EmptyAddrIndex::from(next_index)),
);
next_index += 1;
}
Ok(result)
}

View File

@@ -1,152 +0,0 @@
use brk_error::Result;
use brk_types::{
AnyAddressIndex, EmptyAddressData, EmptyAddressIndex, FundedAddressData, FundedAddressIndex,
OutputType, TypeIndex,
};
use vecdb::AnyVec;
use crate::distribution::{AddressTypeToTypeIndexMap, AddressesDataVecs};
use super::with_source::WithAddressDataSource;
/// Process funded address data updates.
///
/// Handles:
/// - New funded address: push to funded storage
/// - Updated funded address (was funded): update in place
/// - Transition empty -> funded: delete from empty, push to funded
pub(crate) fn process_funded_addresses(
addresses_data: &mut AddressesDataVecs,
funded_updates: AddressTypeToTypeIndexMap<WithAddressDataSource<FundedAddressData>>,
) -> Result<AddressTypeToTypeIndexMap<AnyAddressIndex>> {
let total: usize = funded_updates.iter().map(|(_, m)| m.len()).sum();
let mut updates: Vec<(FundedAddressIndex, FundedAddressData)> = Vec::with_capacity(total);
let mut deletes: Vec<EmptyAddressIndex> = Vec::with_capacity(total);
let mut pushes: Vec<(OutputType, TypeIndex, FundedAddressData)> = Vec::with_capacity(total);
for (address_type, items) in funded_updates.into_iter() {
for (type_index, source) in items {
match source {
WithAddressDataSource::New(data) => {
pushes.push((address_type, type_index, data));
}
WithAddressDataSource::FromFunded(index, data) => {
updates.push((index, data));
}
WithAddressDataSource::FromEmpty(empty_index, data) => {
deletes.push(empty_index);
pushes.push((address_type, type_index, data));
}
}
}
}
// Phase 1: Deletes (creates holes)
for empty_index in deletes {
addresses_data.empty.delete(empty_index);
}
// Phase 2: Updates (in-place)
for (index, data) in updates {
addresses_data.funded.update(index, data)?;
}
// Phase 3: Pushes (fill holes first, then pure pushes)
let mut result = AddressTypeToTypeIndexMap::with_capacity(pushes.len() / 4);
let holes_count = addresses_data.funded.holes().len();
let mut pushes_iter = pushes.into_iter();
for (address_type, type_index, data) in pushes_iter.by_ref().take(holes_count) {
let index = addresses_data.funded.fill_first_hole_or_push(data)?;
result
.get_mut(address_type)
.unwrap()
.insert(type_index, AnyAddressIndex::from(index));
}
// Pure pushes - no holes remain
addresses_data.funded.reserve_pushed(pushes_iter.len());
let mut next_index = addresses_data.funded.len();
for (address_type, type_index, data) in pushes_iter {
addresses_data.funded.push(data);
result.get_mut(address_type).unwrap().insert(
type_index,
AnyAddressIndex::from(FundedAddressIndex::from(next_index)),
);
next_index += 1;
}
Ok(result)
}
/// Process empty address data updates.
///
/// Handles:
/// - New empty address: push to empty storage
/// - Updated empty address (was empty): update in place
/// - Transition funded -> empty: delete from funded, push to empty
pub(crate) fn process_empty_addresses(
addresses_data: &mut AddressesDataVecs,
empty_updates: AddressTypeToTypeIndexMap<WithAddressDataSource<EmptyAddressData>>,
) -> Result<AddressTypeToTypeIndexMap<AnyAddressIndex>> {
let total: usize = empty_updates.iter().map(|(_, m)| m.len()).sum();
let mut updates: Vec<(EmptyAddressIndex, EmptyAddressData)> = Vec::with_capacity(total);
let mut deletes: Vec<FundedAddressIndex> = Vec::with_capacity(total);
let mut pushes: Vec<(OutputType, TypeIndex, EmptyAddressData)> = Vec::with_capacity(total);
for (address_type, items) in empty_updates.into_iter() {
for (type_index, source) in items {
match source {
WithAddressDataSource::New(data) => {
pushes.push((address_type, type_index, data));
}
WithAddressDataSource::FromEmpty(index, data) => {
updates.push((index, data));
}
WithAddressDataSource::FromFunded(funded_index, data) => {
deletes.push(funded_index);
pushes.push((address_type, type_index, data));
}
}
}
}
// Phase 1: Deletes (creates holes)
for funded_index in deletes {
addresses_data.funded.delete(funded_index);
}
// Phase 2: Updates (in-place)
for (index, data) in updates {
addresses_data.empty.update(index, data)?;
}
// Phase 3: Pushes (fill holes first, then pure pushes)
let mut result = AddressTypeToTypeIndexMap::with_capacity(pushes.len() / 4);
let holes_count = addresses_data.empty.holes().len();
let mut pushes_iter = pushes.into_iter();
for (address_type, type_index, data) in pushes_iter.by_ref().take(holes_count) {
let index = addresses_data.empty.fill_first_hole_or_push(data)?;
result
.get_mut(address_type)
.unwrap()
.insert(type_index, AnyAddressIndex::from(index));
}
// Pure pushes - no holes remain
addresses_data.empty.reserve_pushed(pushes_iter.len());
let mut next_index = addresses_data.empty.len();
for (address_type, type_index, data) in pushes_iter {
addresses_data.empty.push(data);
result.get_mut(address_type).unwrap().insert(
type_index,
AnyAddressIndex::from(EmptyAddressIndex::from(next_index)),
);
next_index += 1;
}
Ok(result)
}

View File

@@ -1,10 +1,10 @@
mod address_updates;
mod addr_updates;
mod received;
mod sent;
mod tx_counts;
mod with_source;
pub(crate) use address_updates::*;
pub(crate) use addr_updates::*;
pub(crate) use received::*;
pub(crate) use sent::*;
pub(crate) use tx_counts::*;

View File

@@ -1,13 +1,13 @@
use brk_cohort::{AmountBucket, ByAddressType};
use brk_cohort::{AmountBucket, ByAddrType};
use brk_types::{Cents, Sats, TypeIndex};
use rustc_hash::FxHashMap;
use crate::distribution::{
address::{AddressTypeToActivityCounts, AddressTypeToVec},
cohorts::AddressCohorts,
addr::{AddrTypeToActivityCounts, AddrTypeToVec},
cohorts::AddrCohorts,
};
use super::super::cache::{AddressLookup, TrackingStatus};
use super::super::cache::{AddrLookup, TrackingStatus};
/// Aggregated receive data for a single address within a block.
#[derive(Default)]
@@ -18,13 +18,13 @@ struct AggregatedReceive {
#[allow(clippy::too_many_arguments)]
pub(crate) fn process_received(
received_data: AddressTypeToVec<(TypeIndex, Sats)>,
cohorts: &mut AddressCohorts,
lookup: &mut AddressLookup<'_>,
received_data: AddrTypeToVec<(TypeIndex, Sats)>,
cohorts: &mut AddrCohorts,
lookup: &mut AddrLookup<'_>,
price: Cents,
address_count: &mut ByAddressType<u64>,
empty_address_count: &mut ByAddressType<u64>,
activity_counts: &mut AddressTypeToActivityCounts,
addr_count: &mut ByAddrType<u64>,
empty_addr_count: &mut ByAddrType<u64>,
activity_counts: &mut AddrTypeToActivityCounts,
) {
let max_type_len = received_data.iter().map(|(_, v)| v.len()).max().unwrap_or(0);
let mut aggregated: FxHashMap<TypeIndex, AggregatedReceive> =
@@ -36,8 +36,8 @@ pub(crate) fn process_received(
}
// Cache mutable refs for this address type
let type_address_count = address_count.get_mut(output_type).unwrap();
let type_empty_count = empty_address_count.get_mut(output_type).unwrap();
let type_addr_count = addr_count.get_mut(output_type).unwrap();
let type_empty_count = empty_addr_count.get_mut(output_type).unwrap();
let type_activity = activity_counts.get_mut_unwrap(output_type);
// Aggregate receives by address - each address processed exactly once
@@ -55,10 +55,10 @@ pub(crate) fn process_received(
match status {
TrackingStatus::New => {
*type_address_count += 1;
*type_addr_count += 1;
}
TrackingStatus::WasEmpty => {
*type_address_count += 1;
*type_addr_count += 1;
*type_empty_count -= 1;
// Reactivated - was empty, now has funds
type_activity.reactivated += 1;
@@ -100,7 +100,7 @@ pub(crate) fn process_received(
"process_received: cohort underflow detected!\n\
output_type={:?}, type_index={:?}\n\
prev_balance={}, new_balance={}, total_value={}\n\
Address: {:?}",
Addr: {:?}",
output_type,
type_index,
prev_balance,

View File

@@ -1,16 +1,16 @@
use brk_cohort::{AmountBucket, ByAddressType};
use brk_cohort::{AmountBucket, ByAddrType};
use brk_error::Result;
use brk_types::{Age, Cents, CheckedSub, Height, Sats, Timestamp, TypeIndex};
use rustc_hash::FxHashSet;
use vecdb::VecIndex;
use crate::distribution::{
address::{AddressTypeToActivityCounts, HeightToAddressTypeToVec},
cohorts::AddressCohorts,
addr::{AddrTypeToActivityCounts, HeightToAddrTypeToVec},
cohorts::AddrCohorts,
compute::PriceRangeMax,
};
use super::super::cache::AddressLookup;
use super::super::cache::AddrLookup;
/// Process sent outputs for address cohorts.
///
@@ -27,20 +27,20 @@ use super::super::cache::AddressLookup;
/// for accurate peak regret calculation.
#[allow(clippy::too_many_arguments)]
pub(crate) fn process_sent(
sent_data: HeightToAddressTypeToVec<(TypeIndex, Sats)>,
cohorts: &mut AddressCohorts,
lookup: &mut AddressLookup<'_>,
sent_data: HeightToAddrTypeToVec<(TypeIndex, Sats)>,
cohorts: &mut AddrCohorts,
lookup: &mut AddrLookup<'_>,
current_price: Cents,
price_range_max: &PriceRangeMax,
address_count: &mut ByAddressType<u64>,
empty_address_count: &mut ByAddressType<u64>,
activity_counts: &mut AddressTypeToActivityCounts,
received_addresses: &ByAddressType<FxHashSet<TypeIndex>>,
addr_count: &mut ByAddrType<u64>,
empty_addr_count: &mut ByAddrType<u64>,
activity_counts: &mut AddrTypeToActivityCounts,
received_addrs: &ByAddrType<FxHashSet<TypeIndex>>,
height_to_price: &[Cents],
height_to_timestamp: &[Timestamp],
current_height: Height,
current_timestamp: Timestamp,
seen_senders: &mut ByAddressType<FxHashSet<TypeIndex>>,
seen_senders: &mut ByAddrType<FxHashSet<TypeIndex>>,
) -> Result<()> {
seen_senders.values_mut().for_each(|set| set.clear());
@@ -54,10 +54,10 @@ pub(crate) fn process_sent(
for (output_type, vec) in by_type.unwrap().into_iter() {
// Cache mutable refs for this address type
let type_address_count = address_count.get_mut(output_type).unwrap();
let type_empty_count = empty_address_count.get_mut(output_type).unwrap();
let type_addr_count = addr_count.get_mut(output_type).unwrap();
let type_empty_count = empty_addr_count.get_mut(output_type).unwrap();
let type_activity = activity_counts.get_mut_unwrap(output_type);
let type_received = received_addresses.get(output_type);
let type_received = received_addrs.get(output_type);
let type_seen = seen_senders.get_mut_unwrap(output_type);
for (type_index, value) in vec {
@@ -99,7 +99,7 @@ pub(crate) fn process_sent(
// Migrate address to new bucket or mark as empty
if will_be_empty {
*type_address_count -= 1;
*type_addr_count -= 1;
*type_empty_count += 1;
lookup.move_to_empty(output_type, type_index);
} else if crossing_boundary {

View File

@@ -1,9 +1,9 @@
use brk_types::{EmptyAddressData, FundedAddressData, TxIndex};
use brk_types::{EmptyAddrData, FundedAddrData, TxIndex};
use smallvec::SmallVec;
use crate::distribution::address::AddressTypeToTypeIndexMap;
use crate::distribution::addr::AddrTypeToTypeIndexMap;
use super::with_source::WithAddressDataSource;
use super::with_source::WithAddrDataSource;
/// Update tx_count for addresses based on unique transactions they participated in.
///
@@ -14,9 +14,9 @@ use super::with_source::WithAddressDataSource;
/// Addresses are looked up in funded_cache first, then empty_cache.
/// NOTE: This should be called AFTER merging parallel-fetched address data into funded_cache.
pub(crate) fn update_tx_counts(
funded_cache: &mut AddressTypeToTypeIndexMap<WithAddressDataSource<FundedAddressData>>,
empty_cache: &mut AddressTypeToTypeIndexMap<WithAddressDataSource<EmptyAddressData>>,
mut tx_index_vecs: AddressTypeToTypeIndexMap<SmallVec<[TxIndex; 4]>>,
funded_cache: &mut AddrTypeToTypeIndexMap<WithAddrDataSource<FundedAddrData>>,
empty_cache: &mut AddrTypeToTypeIndexMap<WithAddrDataSource<EmptyAddrData>>,
mut tx_index_vecs: AddrTypeToTypeIndexMap<SmallVec<[TxIndex; 4]>>,
) {
// First, deduplicate tx_index_vecs for addresses that appear multiple times in a block
for (_, map) in tx_index_vecs.iter_mut() {
@@ -29,20 +29,20 @@ pub(crate) fn update_tx_counts(
}
// Update tx_count on address data
for (address_type, type_index, tx_index_vec) in tx_index_vecs
for (addr_type, type_index, tx_index_vec) in tx_index_vecs
.into_iter()
.flat_map(|(t, m)| m.into_iter().map(move |(i, v)| (t, i, v)))
{
let tx_count = tx_index_vec.len() as u32;
if let Some(addr_data) = funded_cache
.get_mut(address_type)
.get_mut(addr_type)
.unwrap()
.get_mut(&type_index)
{
addr_data.tx_count += tx_count;
} else if let Some(addr_data) = empty_cache
.get_mut(address_type)
.get_mut(addr_type)
.unwrap()
.get_mut(&type_index)
{

View File

@@ -1,20 +1,20 @@
use brk_types::{EmptyAddressData, EmptyAddressIndex, FundedAddressData, FundedAddressIndex};
use brk_types::{EmptyAddrData, EmptyAddrIndex, FundedAddrData, FundedAddrIndex};
/// Address data wrapped with its source location for flush operations.
///
/// This enum tracks where the data came from so it can be correctly
/// updated or created during the flush phase.
#[derive(Debug, Clone)]
pub enum WithAddressDataSource<T> {
pub enum WithAddrDataSource<T> {
/// Brand new address (never seen before)
New(T),
/// Funded from funded address storage (with original index)
FromFunded(FundedAddressIndex, T),
FromFunded(FundedAddrIndex, T),
/// Funded from empty address storage (with original index)
FromEmpty(EmptyAddressIndex, T),
FromEmpty(EmptyAddrIndex, T),
}
impl<T> std::ops::Deref for WithAddressDataSource<T> {
impl<T> std::ops::Deref for WithAddrDataSource<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
@@ -24,7 +24,7 @@ impl<T> std::ops::Deref for WithAddressDataSource<T> {
}
}
impl<T> std::ops::DerefMut for WithAddressDataSource<T> {
impl<T> std::ops::DerefMut for WithAddrDataSource<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
match self {
Self::New(v) | Self::FromFunded(_, v) | Self::FromEmpty(_, v) => v,
@@ -32,24 +32,24 @@ impl<T> std::ops::DerefMut for WithAddressDataSource<T> {
}
}
impl From<WithAddressDataSource<EmptyAddressData>> for WithAddressDataSource<FundedAddressData> {
impl From<WithAddrDataSource<EmptyAddrData>> for WithAddrDataSource<FundedAddrData> {
#[inline]
fn from(value: WithAddressDataSource<EmptyAddressData>) -> Self {
fn from(value: WithAddrDataSource<EmptyAddrData>) -> Self {
match value {
WithAddressDataSource::New(v) => Self::New(v.into()),
WithAddressDataSource::FromFunded(i, v) => Self::FromFunded(i, v.into()),
WithAddressDataSource::FromEmpty(i, v) => Self::FromEmpty(i, v.into()),
WithAddrDataSource::New(v) => Self::New(v.into()),
WithAddrDataSource::FromFunded(i, v) => Self::FromFunded(i, v.into()),
WithAddrDataSource::FromEmpty(i, v) => Self::FromEmpty(i, v.into()),
}
}
}
impl From<WithAddressDataSource<FundedAddressData>> for WithAddressDataSource<EmptyAddressData> {
impl From<WithAddrDataSource<FundedAddrData>> for WithAddrDataSource<EmptyAddrData> {
#[inline]
fn from(value: WithAddressDataSource<FundedAddressData>) -> Self {
fn from(value: WithAddrDataSource<FundedAddrData>) -> Self {
match value {
WithAddressDataSource::New(v) => Self::New(v.into()),
WithAddressDataSource::FromFunded(i, v) => Self::FromFunded(i, v.into()),
WithAddressDataSource::FromEmpty(i, v) => Self::FromEmpty(i, v.into()),
WithAddrDataSource::New(v) => Self::New(v.into()),
WithAddrDataSource::FromFunded(i, v) => Self::FromFunded(i, v.into()),
WithAddrDataSource::FromEmpty(i, v) => Self::FromEmpty(i, v.into()),
}
}
}

View File

@@ -1,21 +1,21 @@
use brk_cohort::ByAddressType;
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_types::{FundedAddressData, Height, OutputType, Sats, TxIndex, TypeIndex};
use brk_types::{FundedAddrData, Height, OutputType, Sats, TxIndex, TypeIndex};
use rayon::prelude::*;
use rustc_hash::FxHashMap;
use smallvec::SmallVec;
use crate::distribution::{
address::{AddressTypeToTypeIndexMap, AddressesDataVecs, AnyAddressIndexesVecs},
addr::{AddrTypeToTypeIndexMap, AddrsDataVecs, AnyAddrIndexesVecs},
compute::VecsReaders,
state::Transacted,
};
use crate::distribution::address::HeightToAddressTypeToVec;
use crate::distribution::addr::HeightToAddrTypeToVec;
use super::super::{
cache::{AddressCache, load_uncached_address_data},
cohort::WithAddressDataSource,
cache::{AddrCache, load_uncached_addr_data},
cohort::WithAddrDataSource,
};
/// Result of processing inputs for a block.
@@ -23,11 +23,11 @@ pub struct InputsResult {
/// Map from UTXO creation height -> aggregated sent supply.
pub height_to_sent: FxHashMap<Height, Transacted>,
/// Per-height, per-address-type sent data: (type_index, value) for each address.
pub sent_data: HeightToAddressTypeToVec<(TypeIndex, Sats)>,
/// Address data looked up during processing, keyed by (address_type, type_index).
pub address_data: AddressTypeToTypeIndexMap<WithAddressDataSource<FundedAddressData>>,
pub sent_data: HeightToAddrTypeToVec<(TypeIndex, Sats)>,
/// Address data looked up during processing, keyed by (addr_type, type_index).
pub addr_data: AddrTypeToTypeIndexMap<WithAddrDataSource<FundedAddrData>>,
/// Transaction indexes per address for tx_count tracking.
pub tx_index_vecs: AddressTypeToTypeIndexMap<SmallVec<[TxIndex; 4]>>,
pub tx_index_vecs: AddrTypeToTypeIndexMap<SmallVec<[TxIndex; 4]>>,
}
/// Process inputs (spent UTXOs) for a block.
@@ -51,11 +51,11 @@ pub(crate) fn process_inputs(
txin_index_to_output_type: &[OutputType],
txin_index_to_type_index: &[TypeIndex],
txin_index_to_prev_height: &[Height],
first_address_indexes: &ByAddressType<TypeIndex>,
cache: &AddressCache,
first_addr_indexes: &ByAddrType<TypeIndex>,
cache: &AddrCache,
vr: &VecsReaders,
any_address_indexes: &AnyAddressIndexesVecs,
addresses_data: &AddressesDataVecs,
any_addr_indexes: &AnyAddrIndexesVecs,
addrs_data: &AddrsDataVecs,
) -> Result<InputsResult> {
let map_fn = |local_idx: usize| -> Result<_> {
let tx_index = txin_index_to_tx_index[local_idx];
@@ -64,21 +64,21 @@ pub(crate) fn process_inputs(
let value = txin_index_to_value[local_idx];
let input_type = txin_index_to_output_type[local_idx];
if input_type.is_not_address() {
if input_type.is_not_addr() {
return Ok((prev_height, value, input_type, None));
}
let type_index = txin_index_to_type_index[local_idx];
// Look up address data
let addr_data_opt = load_uncached_address_data(
let addr_data_opt = load_uncached_addr_data(
input_type,
type_index,
first_address_indexes,
first_addr_indexes,
cache,
vr,
any_address_indexes,
addresses_data,
any_addr_indexes,
addrs_data,
)?;
Ok((
@@ -108,13 +108,13 @@ pub(crate) fn process_inputs(
estimated_unique_heights,
Default::default(),
);
let mut sent_data = HeightToAddressTypeToVec::with_capacity(estimated_unique_heights);
let mut address_data =
AddressTypeToTypeIndexMap::<WithAddressDataSource<FundedAddressData>>::with_capacity(
let mut sent_data = HeightToAddrTypeToVec::with_capacity(estimated_unique_heights);
let mut addr_data =
AddrTypeToTypeIndexMap::<WithAddrDataSource<FundedAddrData>>::with_capacity(
estimated_per_type,
);
let mut tx_index_vecs =
AddressTypeToTypeIndexMap::<SmallVec<[TxIndex; 4]>>::with_capacity(estimated_per_type);
AddrTypeToTypeIndexMap::<SmallVec<[TxIndex; 4]>>::with_capacity(estimated_per_type);
for (prev_height, value, output_type, addr_info) in items {
height_to_sent
@@ -130,8 +130,8 @@ pub(crate) fn process_inputs(
.unwrap()
.push((type_index, value));
if let Some(addr_data) = addr_data_opt {
address_data.insert_for_type(output_type, type_index, addr_data);
if let Some(source) = addr_data_opt {
addr_data.insert_for_type(output_type, type_index, source);
}
tx_index_vecs
@@ -146,7 +146,7 @@ pub(crate) fn process_inputs(
Ok(InputsResult {
height_to_sent,
sent_data,
address_data,
addr_data,
tx_index_vecs,
})
}

View File

@@ -1,20 +1,20 @@
use brk_cohort::ByAddressType;
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_types::{FundedAddressData, Sats, TxIndex, TypeIndex};
use brk_types::{FundedAddrData, Sats, TxIndex, TypeIndex};
use rayon::prelude::*;
use smallvec::SmallVec;
use crate::distribution::{
address::{
AddressTypeToTypeIndexMap, AddressTypeToVec, AddressesDataVecs, AnyAddressIndexesVecs,
addr::{
AddrTypeToTypeIndexMap, AddrTypeToVec, AddrsDataVecs, AnyAddrIndexesVecs,
},
compute::{TxOutData, VecsReaders},
state::Transacted,
};
use super::super::{
cache::{AddressCache, load_uncached_address_data},
cohort::WithAddressDataSource,
cache::{AddrCache, load_uncached_addr_data},
cohort::WithAddrDataSource,
};
/// Result of processing outputs for a block.
@@ -22,11 +22,11 @@ pub struct OutputsResult {
/// Aggregated supply transacted in this block.
pub transacted: Transacted,
/// Per-address-type received data: (type_index, value) for each address.
pub received_data: AddressTypeToVec<(TypeIndex, Sats)>,
/// Address data looked up during processing, keyed by (address_type, type_index).
pub address_data: AddressTypeToTypeIndexMap<WithAddressDataSource<FundedAddressData>>,
pub received_data: AddrTypeToVec<(TypeIndex, Sats)>,
/// Address data looked up during processing, keyed by (addr_type, type_index).
pub addr_data: AddrTypeToTypeIndexMap<WithAddrDataSource<FundedAddrData>>,
/// Transaction indexes per address for tx_count tracking.
pub tx_index_vecs: AddressTypeToTypeIndexMap<SmallVec<[TxIndex; 4]>>,
pub tx_index_vecs: AddrTypeToTypeIndexMap<SmallVec<[TxIndex; 4]>>,
}
/// Process outputs (new UTXOs) for a block.
@@ -40,35 +40,35 @@ pub struct OutputsResult {
pub(crate) fn process_outputs(
txout_index_to_tx_index: &[TxIndex],
txout_data_vec: &[TxOutData],
first_address_indexes: &ByAddressType<TypeIndex>,
cache: &AddressCache,
first_addr_indexes: &ByAddrType<TypeIndex>,
cache: &AddrCache,
vr: &VecsReaders,
any_address_indexes: &AnyAddressIndexesVecs,
addresses_data: &AddressesDataVecs,
any_addr_indexes: &AnyAddrIndexesVecs,
addrs_data: &AddrsDataVecs,
) -> Result<OutputsResult> {
let output_count = txout_data_vec.len();
// Phase 1: Address lookups (mmap reads) — parallel for large blocks, sequential for small
// Phase 1: Addr lookups (mmap reads) — parallel for large blocks, sequential for small
let map_fn = |local_idx: usize| -> Result<_> {
let txout_data = &txout_data_vec[local_idx];
let value = txout_data.value;
let output_type = txout_data.output_type;
if output_type.is_not_address() {
if output_type.is_not_addr() {
return Ok((value, output_type, None));
}
let type_index = txout_data.type_index;
let tx_index = txout_index_to_tx_index[local_idx];
let addr_data_opt = load_uncached_address_data(
let addr_data_opt = load_uncached_addr_data(
output_type,
type_index,
first_address_indexes,
first_addr_indexes,
cache,
vr,
any_address_indexes,
addresses_data,
any_addr_indexes,
addrs_data,
)?;
Ok((
@@ -92,13 +92,13 @@ pub(crate) fn process_outputs(
// Phase 2: Sequential accumulation
let estimated_per_type = (output_count / 8).max(8);
let mut transacted = Transacted::default();
let mut received_data = AddressTypeToVec::with_capacity(estimated_per_type);
let mut address_data =
AddressTypeToTypeIndexMap::<WithAddressDataSource<FundedAddressData>>::with_capacity(
let mut received_data = AddrTypeToVec::with_capacity(estimated_per_type);
let mut addr_data =
AddrTypeToTypeIndexMap::<WithAddrDataSource<FundedAddrData>>::with_capacity(
estimated_per_type,
);
let mut tx_index_vecs =
AddressTypeToTypeIndexMap::<SmallVec<[TxIndex; 4]>>::with_capacity(estimated_per_type);
AddrTypeToTypeIndexMap::<SmallVec<[TxIndex; 4]>>::with_capacity(estimated_per_type);
for (value, output_type, addr_info) in items {
transacted.iterate(value, output_type);
@@ -109,8 +109,8 @@ pub(crate) fn process_outputs(
.unwrap()
.push((type_index, value));
if let Some(addr_data) = addr_data_opt {
address_data.insert_for_type(output_type, type_index, addr_data);
if let Some(source) = addr_data_opt {
addr_data.insert_for_type(output_type, type_index, source);
}
tx_index_vecs
@@ -125,7 +125,7 @@ pub(crate) fn process_outputs(
Ok(OutputsResult {
transacted,
received_data,
address_data,
addr_data,
tx_index_vecs,
})
}

View File

@@ -1,7 +1,7 @@
use std::path::Path;
use brk_cohort::{
AddressGroups, AmountRange, OverAmount, UnderAmount, Filter, Filtered,
AddrGroups, AmountRange, OverAmount, UnderAmount, Filter, Filtered,
};
use brk_error::Result;
use brk_traversable::Traversable;
@@ -12,16 +12,16 @@ use vecdb::{AnyStoredVec, Database, Exit, Rw, StorageMode};
use crate::{distribution::DynCohortVecs, indexes, internal::CachedWindowStarts, prices};
use super::{super::traits::CohortVecs, vecs::AddressCohortVecs};
use super::{super::traits::CohortVecs, vecs::AddrCohortVecs};
const VERSION: Version = Version::new(0);
/// All Address cohorts organized by filter type.
/// All Addr cohorts organized by filter type.
#[derive(Deref, DerefMut, Traversable)]
pub struct AddressCohorts<M: StorageMode = Rw>(AddressGroups<AddressCohortVecs<M>>);
pub struct AddrCohorts<M: StorageMode = Rw>(AddrGroups<AddrCohortVecs<M>>);
impl AddressCohorts {
/// Import all Address cohorts from database.
impl AddrCohorts {
/// Import all Addr cohorts from database.
pub(crate) fn forced_import(
db: &Database,
version: Version,
@@ -33,15 +33,15 @@ impl AddressCohorts {
// Helper to create a cohort - only amount_range cohorts have state
let create =
|filter: Filter, name: &'static str, has_state: bool| -> Result<AddressCohortVecs> {
|filter: Filter, name: &'static str, has_state: bool| -> Result<AddrCohortVecs> {
let sp = if has_state { Some(states_path) } else { None };
AddressCohortVecs::forced_import(db, filter, name, v, indexes, sp, cached_starts)
AddrCohortVecs::forced_import(db, filter, name, v, indexes, sp, cached_starts)
};
let full = |f: Filter, name: &'static str| create(f, name, true);
let none = |f: Filter, name: &'static str| create(f, name, false);
Ok(Self(AddressGroups {
Ok(Self(AddrGroups {
amount_range: AmountRange::try_new(&full)?,
under_amount: UnderAmount::try_new(&none)?,
over_amount: OverAmount::try_new(&none)?,
@@ -51,7 +51,7 @@ impl AddressCohorts {
/// Apply a function to each aggregate cohort with its source cohorts (in parallel).
fn for_each_aggregate<F>(&mut self, f: F) -> Result<()>
where
F: Fn(&mut AddressCohortVecs, Vec<&AddressCohortVecs>) -> Result<()> + Sync,
F: Fn(&mut AddrCohortVecs, Vec<&AddrCohortVecs>) -> Result<()> + Sync,
{
let by_amount_range = &self.0.amount_range;

View File

@@ -8,7 +8,7 @@ use rayon::prelude::*;
use vecdb::{AnyStoredVec, AnyVec, Database, Exit, ReadableVec, Rw, StorageMode, WritableVec};
use crate::{
distribution::state::{AddressCohortState, MinimalRealizedState},
distribution::state::{AddrCohortState, MinimalRealizedState},
indexes,
internal::{CachedWindowStarts, PerBlockWithDeltas},
prices,
@@ -18,19 +18,19 @@ use crate::distribution::metrics::{ImportConfig, MinimalCohortMetrics};
use super::super::traits::{CohortVecs, DynCohortVecs};
#[derive(Traversable)]
pub struct AddressCohortVecs<M: StorageMode = Rw> {
pub struct AddrCohortVecs<M: StorageMode = Rw> {
starting_height: Option<Height>,
#[traversable(skip)]
pub state: Option<Box<AddressCohortState<MinimalRealizedState>>>,
pub state: Option<Box<AddrCohortState<MinimalRealizedState>>>,
#[traversable(flatten)]
pub metrics: MinimalCohortMetrics<M>,
pub address_count: PerBlockWithDeltas<StoredU64, StoredI64, BasisPointsSigned32, M>,
pub addr_count: PerBlockWithDeltas<StoredU64, StoredI64, BasisPointsSigned32, M>,
}
impl AddressCohortVecs {
impl AddrCohortVecs {
pub(crate) fn forced_import(
db: &Database,
filter: Filter,
@@ -40,7 +40,7 @@ impl AddressCohortVecs {
states_path: Option<&Path>,
cached_starts: &CachedWindowStarts,
) -> Result<Self> {
let full_name = CohortContext::Address.full_name(&filter, name);
let full_name = CohortContext::Addr.full_name(&filter, name);
let cfg = ImportConfig {
db,
@@ -51,9 +51,9 @@ impl AddressCohortVecs {
cached_starts,
};
let address_count = PerBlockWithDeltas::forced_import(
let addr_count = PerBlockWithDeltas::forced_import(
db,
&cfg.name("address_count"),
&cfg.name("addr_count"),
version,
Version::ONE,
indexes,
@@ -62,9 +62,9 @@ impl AddressCohortVecs {
Ok(Self {
starting_height: None,
state: states_path.map(|path| Box::new(AddressCohortState::new(path, &full_name))),
state: states_path.map(|path| Box::new(AddrCohortState::new(path, &full_name))),
metrics: MinimalCohortMetrics::forced_import(&cfg)?,
address_count,
addr_count,
})
}
@@ -76,7 +76,7 @@ impl AddressCohortVecs {
&mut self,
) -> impl ParallelIterator<Item = &mut dyn AnyStoredVec> {
let mut vecs: Vec<&mut dyn AnyStoredVec> = Vec::new();
vecs.push(&mut self.address_count.height as &mut dyn AnyStoredVec);
vecs.push(&mut self.addr_count.height as &mut dyn AnyStoredVec);
vecs.extend(self.metrics.collect_all_vecs_mut());
vecs.into_par_iter()
}
@@ -89,15 +89,15 @@ impl AddressCohortVecs {
}
}
impl Filtered for AddressCohortVecs {
impl Filtered for AddrCohortVecs {
fn filter(&self) -> &Filter {
&self.metrics.filter
}
}
impl DynCohortVecs for AddressCohortVecs {
impl DynCohortVecs for AddrCohortVecs {
fn min_stateful_len(&self) -> usize {
self.address_count
self.addr_count
.height
.len()
.min(self.metrics.min_stateful_len())
@@ -130,7 +130,7 @@ impl DynCohortVecs for AddressCohortVecs {
.height
.collect_one(prev_height)
.unwrap();
state.address_count = *self.address_count.height.collect_one(prev_height).unwrap();
state.addr_count = *self.addr_count.height.collect_one(prev_height).unwrap();
state.inner.restore_realized_cap();
@@ -149,7 +149,7 @@ impl DynCohortVecs for AddressCohortVecs {
fn validate_computed_versions(&mut self, base_version: Version) -> Result<()> {
use vecdb::WritableVec;
self.address_count
self.addr_count
.height
.validate_computed_version_or_reset(base_version)?;
Ok(())
@@ -161,9 +161,9 @@ impl DynCohortVecs for AddressCohortVecs {
}
if let Some(state) = self.state.as_ref() {
self.address_count
self.addr_count
.height
.push(state.address_count.into());
.push(state.addr_count.into());
self.metrics.supply.push_state(&state.inner);
self.metrics.outputs.push_state(&state.inner);
self.metrics.realized.push_state(&state.inner);
@@ -203,18 +203,18 @@ impl DynCohortVecs for AddressCohortVecs {
}
}
impl CohortVecs for AddressCohortVecs {
impl CohortVecs for AddrCohortVecs {
fn compute_from_stateful(
&mut self,
starting_indexes: &Indexes,
others: &[&Self],
exit: &Exit,
) -> Result<()> {
self.address_count.height.compute_sum_of_others(
self.addr_count.height.compute_sum_of_others(
starting_indexes.height,
others
.iter()
.map(|v| &v.address_count.height)
.map(|v| &v.addr_count.height)
.collect::<Vec<_>>()
.as_slice(),
exit,

View File

@@ -1,7 +1,7 @@
mod address;
mod addr;
mod traits;
mod utxo;
pub use address::AddressCohorts;
pub use addr::AddrCohorts;
pub use traits::DynCohortVecs;
pub use utxo::UTXOCohorts;

View File

@@ -1,4 +1,4 @@
use brk_cohort::ByAddressType;
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_indexer::Indexer;
use brk_types::{
@@ -11,12 +11,12 @@ use vecdb::{AnyStoredVec, AnyVec, Exit, ReadableVec, VecIndex, WritableVec};
use crate::{
distribution::{
address::{AddressTypeToActivityCounts, AddressTypeToAddressCount},
addr::{AddrTypeToActivityCounts, AddrTypeToAddrCount},
block::{
AddressCache, InputsResult, process_inputs, process_outputs, process_received,
AddrCache, InputsResult, process_inputs, process_outputs, process_received,
process_sent,
},
compute::write::{process_address_updates, write},
compute::write::{process_addr_updates, write},
state::{BlockState, Transacted},
},
indexes, inputs, outputs, transactions,
@@ -25,7 +25,7 @@ use crate::{
use super::{
super::{
RangeMap,
cohorts::{AddressCohorts, DynCohortVecs, UTXOCohorts},
cohorts::{AddrCohorts, DynCohortVecs, UTXOCohorts},
vecs::Vecs,
},
BIP30_DUPLICATE_HEIGHT_1, BIP30_DUPLICATE_HEIGHT_2, BIP30_ORIGINAL_HEIGHT_1,
@@ -103,7 +103,7 @@ pub(crate) fn process_blocks(
.collect();
debug!("creating VecsReaders");
let mut vr = VecsReaders::new(&vecs.any_address_indexes, &vecs.addresses_data);
let mut vr = VecsReaders::new(&vecs.any_addr_indexes, &vecs.addrs_data);
debug!("VecsReaders created");
// Extend tx_index_to_height RangeMap with new entries (incremental, O(new_blocks))
@@ -143,69 +143,69 @@ pub(crate) fn process_blocks(
// Pre-collect first address indexes per type for the block range
let first_p2a_vec = indexer
.vecs
.addresses
.addrs
.p2a.first_index
.collect_range_at(start_usize, end_usize);
let first_p2pk33_vec = indexer
.vecs
.addresses
.addrs
.p2pk33.first_index
.collect_range_at(start_usize, end_usize);
let first_p2pk65_vec = indexer
.vecs
.addresses
.addrs
.p2pk65.first_index
.collect_range_at(start_usize, end_usize);
let first_p2pkh_vec = indexer
.vecs
.addresses
.addrs
.p2pkh.first_index
.collect_range_at(start_usize, end_usize);
let first_p2sh_vec = indexer
.vecs
.addresses
.addrs
.p2sh.first_index
.collect_range_at(start_usize, end_usize);
let first_p2tr_vec = indexer
.vecs
.addresses
.addrs
.p2tr.first_index
.collect_range_at(start_usize, end_usize);
let first_p2wpkh_vec = indexer
.vecs
.addresses
.addrs
.p2wpkh.first_index
.collect_range_at(start_usize, end_usize);
let first_p2wsh_vec = indexer
.vecs
.addresses
.addrs
.p2wsh.first_index
.collect_range_at(start_usize, end_usize);
// Track running totals - recover from previous height if resuming
debug!("recovering address_counts from height {}", starting_height);
let (mut address_counts, mut empty_address_counts) = if starting_height > Height::ZERO {
let address_counts =
AddressTypeToAddressCount::from((&vecs.addresses.funded.by_address_type, starting_height));
let empty_address_counts = AddressTypeToAddressCount::from((
&vecs.addresses.empty.by_address_type,
debug!("recovering addr_counts from height {}", starting_height);
let (mut addr_counts, mut empty_addr_counts) = if starting_height > Height::ZERO {
let addr_counts =
AddrTypeToAddrCount::from((&vecs.addrs.funded.by_addr_type, starting_height));
let empty_addr_counts = AddrTypeToAddrCount::from((
&vecs.addrs.empty.by_addr_type,
starting_height,
));
(address_counts, empty_address_counts)
(addr_counts, empty_addr_counts)
} else {
(
AddressTypeToAddressCount::default(),
AddressTypeToAddressCount::default(),
AddrTypeToAddrCount::default(),
AddrTypeToAddrCount::default(),
)
};
debug!("address_counts recovered");
debug!("addr_counts recovered");
// Track activity counts - reset each block
let mut activity_counts = AddressTypeToActivityCounts::default();
let mut activity_counts = AddrTypeToActivityCounts::default();
debug!("creating AddressCache");
let mut cache = AddressCache::new();
debug!("AddressCache created, entering main loop");
debug!("creating AddrCache");
let mut cache = AddrCache::new();
debug!("AddrCache created, entering main loop");
// Initialize Fenwick tree from imported BTreeMap state (one-time)
vecs.utxo_cohorts.init_fenwick_if_needed();
@@ -216,10 +216,10 @@ pub(crate) fn process_blocks(
let start = starting_height.to_usize();
vecs.utxo_cohorts
.par_iter_vecs_mut()
.chain(vecs.address_cohorts.par_iter_vecs_mut())
.chain(vecs.addresses.funded.par_iter_height_mut())
.chain(vecs.addresses.empty.par_iter_height_mut())
.chain(vecs.addresses.activity.par_iter_height_mut())
.chain(vecs.addr_cohorts.par_iter_vecs_mut())
.chain(vecs.addrs.funded.par_iter_height_mut())
.chain(vecs.addrs.empty.par_iter_height_mut())
.chain(vecs.addrs.activity.par_iter_height_mut())
.chain(rayon::iter::once(
&mut vecs.coinblocks_destroyed.base.height as &mut dyn AnyStoredVec,
))
@@ -227,8 +227,8 @@ pub(crate) fn process_blocks(
}
// Reusable hashsets (avoid per-block allocation)
let mut received_addresses = ByAddressType::<FxHashSet<TypeIndex>>::default();
let mut seen_senders = ByAddressType::<FxHashSet<TypeIndex>>::default();
let mut received_addrs = ByAddrType::<FxHashSet<TypeIndex>>::default();
let mut seen_senders = ByAddrType::<FxHashSet<TypeIndex>>::default();
// Track earliest chain_state modification from sends (for incremental supply_state writes)
let mut min_supply_modified: Option<Height> = None;
@@ -255,7 +255,7 @@ pub(crate) fn process_blocks(
debug_assert_eq!(ctx.price_at(height), block_price);
// Get first address indexes for this height from pre-collected vecs
let first_address_indexes = ByAddressType {
let first_addr_indexes = ByAddrType {
p2a: TypeIndex::from(first_p2a_vec[offset].to_usize()),
p2pk33: TypeIndex::from(first_p2pk33_vec[offset].to_usize()),
p2pk65: TypeIndex::from(first_p2pk65_vec[offset].to_usize()),
@@ -285,11 +285,11 @@ pub(crate) fn process_blocks(
process_outputs(
txout_index_to_tx_index,
txout_data_vec,
&first_address_indexes,
&first_addr_indexes,
&cache,
&vr,
&vecs.any_address_indexes,
&vecs.addresses_data,
&vecs.any_addr_indexes,
&vecs.addrs_data,
)
},
|| -> Result<_> {
@@ -309,17 +309,17 @@ pub(crate) fn process_blocks(
input_output_types,
input_type_indexes,
input_prev_heights,
&first_address_indexes,
&first_addr_indexes,
&cache,
&vr,
&vecs.any_address_indexes,
&vecs.addresses_data,
&vecs.any_addr_indexes,
&vecs.addrs_data,
)
} else {
Ok(InputsResult {
height_to_sent: Default::default(),
sent_data: Default::default(),
address_data: Default::default(),
addr_data: Default::default(),
tx_index_vecs: Default::default(),
})
}
@@ -331,8 +331,8 @@ pub(crate) fn process_blocks(
let (outputs_result, inputs_result) = oi_result?;
// Merge new address data into current cache
cache.merge_funded(outputs_result.address_data);
cache.merge_funded(inputs_result.address_data);
cache.merge_funded(outputs_result.addr_data);
cache.merge_funded(inputs_result.addr_data);
// Combine tx_index_vecs from outputs and inputs, then update tx_count
let combined_tx_index_vecs = outputs_result
@@ -390,15 +390,15 @@ pub(crate) fn process_blocks(
// Build set of addresses that received this block (for detecting "both" in sent)
// Reuse pre-allocated hashsets: clear preserves capacity, avoiding reallocation
received_addresses.values_mut().for_each(|set| set.clear());
received_addrs.values_mut().for_each(|set| set.clear());
for (output_type, vec) in outputs_result.received_data.iter() {
let set = received_addresses.get_mut_unwrap(output_type);
let set = received_addrs.get_mut_unwrap(output_type);
for (type_index, _) in vec {
set.insert(*type_index);
}
}
// Process UTXO cohorts and Address cohorts in parallel
// Process UTXO cohorts and Addr cohorts in parallel
let (_, addr_result) = rayon::join(
|| {
// UTXO cohorts receive/send
@@ -418,25 +418,25 @@ pub(crate) fn process_blocks(
// Process received outputs (addresses receiving funds)
process_received(
outputs_result.received_data,
&mut vecs.address_cohorts,
&mut vecs.addr_cohorts,
&mut lookup,
block_price,
&mut address_counts,
&mut empty_address_counts,
&mut addr_counts,
&mut empty_addr_counts,
&mut activity_counts,
);
// Process sent inputs (addresses sending funds)
process_sent(
inputs_result.sent_data,
&mut vecs.address_cohorts,
&mut vecs.addr_cohorts,
&mut lookup,
block_price,
ctx.price_range_max,
&mut address_counts,
&mut empty_address_counts,
&mut addr_counts,
&mut empty_addr_counts,
&mut activity_counts,
&received_addresses,
&received_addrs,
height_to_price_vec,
height_to_timestamp_vec,
height,
@@ -451,18 +451,18 @@ pub(crate) fn process_blocks(
vecs.utxo_cohorts.update_fenwick_from_pending();
// Push to height-indexed vectors
vecs.addresses.funded
.push_height(address_counts.sum(), &address_counts);
vecs.addresses.empty
.push_height(empty_address_counts.sum(), &empty_address_counts);
vecs.addresses.activity.push_height(&activity_counts);
vecs.addrs.funded
.push_height(addr_counts.sum(), &addr_counts);
vecs.addrs.empty
.push_height(empty_addr_counts.sum(), &empty_addr_counts);
vecs.addrs.activity.push_height(&activity_counts);
let is_last_of_day = is_last_of_day[offset];
let date_opt = is_last_of_day.then(|| Date::from(timestamp));
push_cohort_states(
&mut vecs.utxo_cohorts,
&mut vecs.address_cohorts,
&mut vecs.addr_cohorts,
height,
block_price,
);
@@ -484,9 +484,9 @@ pub(crate) fn process_blocks(
let (empty_updates, funded_updates) = cache.take();
// Process address updates (mutations)
process_address_updates(
&mut vecs.addresses_data,
&mut vecs.any_address_indexes,
process_addr_updates(
&mut vecs.addrs_data,
&mut vecs.any_addr_indexes,
empty_updates,
funded_updates,
)?;
@@ -499,7 +499,7 @@ pub(crate) fn process_blocks(
vecs.flush()?;
// Recreate readers
vr = VecsReaders::new(&vecs.any_address_indexes, &vecs.addresses_data);
vr = VecsReaders::new(&vecs.any_addr_indexes, &vecs.addrs_data);
}
}
@@ -511,9 +511,9 @@ pub(crate) fn process_blocks(
let (empty_updates, funded_updates) = cache.take();
// Process address updates (mutations)
process_address_updates(
&mut vecs.addresses_data,
&mut vecs.any_address_indexes,
process_addr_updates(
&mut vecs.addrs_data,
&mut vecs.any_addr_indexes,
empty_updates,
funded_updates,
)?;
@@ -527,7 +527,7 @@ pub(crate) fn process_blocks(
/// Push cohort states to height-indexed vectors, then reset per-block values.
fn push_cohort_states(
utxo_cohorts: &mut UTXOCohorts,
address_cohorts: &mut AddressCohorts,
addr_cohorts: &mut AddrCohorts,
height: Height,
height_price: Cents,
) {
@@ -542,7 +542,7 @@ fn push_cohort_states(
})
},
|| {
address_cohorts
addr_cohorts
.par_iter_separate_mut()
.for_each(|v| {
v.push_state(height);
@@ -558,7 +558,7 @@ fn push_cohort_states(
utxo_cohorts
.iter_separate_mut()
.for_each(|v| v.reset_single_iteration_values());
address_cohorts
addr_cohorts
.iter_separate_mut()
.for_each(|v| v.reset_single_iteration_values());
}

View File

@@ -1,4 +1,4 @@
use brk_cohort::{ByAddressType, ByAnyAddress};
use brk_cohort::{ByAddrType, ByAnyAddr};
use brk_indexer::Indexer;
use brk_types::{Height, OutPoint, OutputType, Sats, StoredU64, TxIndex, TypeIndex};
use vecdb::{ReadableVec, Reader, VecIndex};
@@ -6,7 +6,7 @@ use vecdb::{ReadableVec, Reader, VecIndex};
use crate::{
distribution::{
RangeMap,
address::{AddressesDataVecs, AnyAddressIndexesVecs},
addr::{AddrsDataVecs, AnyAddrIndexesVecs},
},
inputs,
};
@@ -161,37 +161,37 @@ impl<'a> TxInReaders<'a> {
/// Cached readers for stateful vectors.
pub struct VecsReaders {
pub address_type_index_to_any_address_index: ByAddressType<Reader>,
pub any_address_index_to_any_address_data: ByAnyAddress<Reader>,
pub addr_type_index_to_any_addr_index: ByAddrType<Reader>,
pub any_addr_index_to_any_addr_data: ByAnyAddr<Reader>,
}
impl VecsReaders {
pub(crate) fn new(
any_address_indexes: &AnyAddressIndexesVecs,
addresses_data: &AddressesDataVecs,
any_addr_indexes: &AnyAddrIndexesVecs,
addrs_data: &AddrsDataVecs,
) -> Self {
Self {
address_type_index_to_any_address_index: ByAddressType {
p2a: any_address_indexes.p2a.create_reader(),
p2pk33: any_address_indexes.p2pk33.create_reader(),
p2pk65: any_address_indexes.p2pk65.create_reader(),
p2pkh: any_address_indexes.p2pkh.create_reader(),
p2sh: any_address_indexes.p2sh.create_reader(),
p2tr: any_address_indexes.p2tr.create_reader(),
p2wpkh: any_address_indexes.p2wpkh.create_reader(),
p2wsh: any_address_indexes.p2wsh.create_reader(),
addr_type_index_to_any_addr_index: ByAddrType {
p2a: any_addr_indexes.p2a.create_reader(),
p2pk33: any_addr_indexes.p2pk33.create_reader(),
p2pk65: any_addr_indexes.p2pk65.create_reader(),
p2pkh: any_addr_indexes.p2pkh.create_reader(),
p2sh: any_addr_indexes.p2sh.create_reader(),
p2tr: any_addr_indexes.p2tr.create_reader(),
p2wpkh: any_addr_indexes.p2wpkh.create_reader(),
p2wsh: any_addr_indexes.p2wsh.create_reader(),
},
any_address_index_to_any_address_data: ByAnyAddress {
funded: addresses_data.funded.create_reader(),
empty: addresses_data.empty.create_reader(),
any_addr_index_to_any_addr_data: ByAnyAddr {
funded: addrs_data.funded.create_reader(),
empty: addrs_data.empty.create_reader(),
},
}
}
/// Get reader for specific address type.
pub(crate) fn address_reader(&self, address_type: OutputType) -> &Reader {
self.address_type_index_to_any_address_index
.get(address_type)
pub(crate) fn addr_reader(&self, addr_type: OutputType) -> &Reader {
self.addr_type_index_to_any_addr_index
.get(addr_type)
.unwrap()
}
}

View File

@@ -6,9 +6,9 @@ use tracing::{debug, warn};
use vecdb::Stamp;
use super::super::{
AddressesDataVecs,
address::AnyAddressIndexesVecs,
cohorts::{AddressCohorts, UTXOCohorts},
AddrsDataVecs,
addr::AnyAddrIndexesVecs,
cohorts::{AddrCohorts, UTXOCohorts},
};
/// Result of state recovery.
@@ -25,22 +25,22 @@ pub struct RecoveredState {
pub(crate) fn recover_state(
height: Height,
chain_state_rollback: vecdb::Result<Stamp>,
any_address_indexes: &mut AnyAddressIndexesVecs,
addresses_data: &mut AddressesDataVecs,
any_addr_indexes: &mut AnyAddrIndexesVecs,
addrs_data: &mut AddrsDataVecs,
utxo_cohorts: &mut UTXOCohorts,
address_cohorts: &mut AddressCohorts,
addr_cohorts: &mut AddrCohorts,
) -> Result<RecoveredState> {
let stamp = Stamp::from(height);
// Rollback address state vectors
let address_indexes_rollback = any_address_indexes.rollback_before(stamp);
let address_data_rollback = addresses_data.rollback_before(stamp);
let addr_indexes_rollback = any_addr_indexes.rollback_before(stamp);
let addr_data_rollback = addrs_data.rollback_before(stamp);
// Verify rollback consistency - all must agree on the same height
let consistent_height = rollback_states(
chain_state_rollback,
address_indexes_rollback,
address_data_rollback,
addr_indexes_rollback,
addr_data_rollback,
);
// If rollbacks are inconsistent, start fresh
@@ -88,19 +88,19 @@ pub(crate) fn recover_state(
// Import address cohort states - all must succeed
debug!(
"importing address cohort states at height {}",
"importing addr cohort states at height {}",
consistent_height
);
if !address_cohorts.import_separate_states(consistent_height) {
if !addr_cohorts.import_separate_states(consistent_height) {
warn!(
"Address cohort state import failed at height {}",
"Addr cohort state import failed at height {}",
consistent_height
);
return Ok(RecoveredState {
starting_height: Height::ZERO,
});
}
debug!("address cohort states imported");
debug!("addr cohort states imported");
Ok(RecoveredState {
starting_height: consistent_height,
@@ -111,22 +111,22 @@ pub(crate) fn recover_state(
///
/// Resets all state vectors and cohort states.
pub(crate) fn reset_state(
any_address_indexes: &mut AnyAddressIndexesVecs,
addresses_data: &mut AddressesDataVecs,
any_addr_indexes: &mut AnyAddrIndexesVecs,
addrs_data: &mut AddrsDataVecs,
utxo_cohorts: &mut UTXOCohorts,
address_cohorts: &mut AddressCohorts,
addr_cohorts: &mut AddrCohorts,
) -> Result<RecoveredState> {
// Reset address state
any_address_indexes.reset()?;
addresses_data.reset()?;
any_addr_indexes.reset()?;
addrs_data.reset()?;
// Reset cohort state heights
utxo_cohorts.reset_separate_state_heights();
address_cohorts.reset_separate_state_heights();
addr_cohorts.reset_separate_state_heights();
// Reset cost_basis_data for all cohorts
utxo_cohorts.reset_separate_cost_basis_data()?;
address_cohorts.reset_separate_cost_basis_data()?;
addr_cohorts.reset_separate_cost_basis_data()?;
Ok(RecoveredState {
starting_height: Height::ZERO,
@@ -164,8 +164,8 @@ pub enum StartMode {
/// otherwise returns Height::ZERO (need fresh start).
fn rollback_states(
chain_state_rollback: vecdb::Result<Stamp>,
address_indexes_rollbacks: Result<Vec<Stamp>>,
address_data_rollbacks: Result<[Stamp; 2]>,
addr_indexes_rollbacks: Result<Vec<Stamp>>,
addr_data_rollbacks: Result<[Stamp; 2]>,
) -> Height {
let mut heights: BTreeSet<Height> = BTreeSet::new();
@@ -181,30 +181,30 @@ fn rollback_states(
);
heights.insert(chain_height);
let Ok(stamps) = address_indexes_rollbacks else {
let Ok(stamps) = addr_indexes_rollbacks else {
warn!(
"address_indexes rollback failed: {:?}",
address_indexes_rollbacks
"addr_indexes rollback failed: {:?}",
addr_indexes_rollbacks
);
return Height::ZERO;
};
for (i, s) in stamps.iter().enumerate() {
let h = Height::from(*s).incremented();
debug!(
"address_indexes[{}] rolled back to stamp {:?}, height {}",
"addr_indexes[{}] rolled back to stamp {:?}, height {}",
i, s, h
);
heights.insert(h);
}
let Ok(stamps) = address_data_rollbacks else {
warn!("address_data rollback failed: {:?}", address_data_rollbacks);
let Ok(stamps) = addr_data_rollbacks else {
warn!("addr_data rollback failed: {:?}", addr_data_rollbacks);
return Height::ZERO;
};
for (i, s) in stamps.iter().enumerate() {
let h = Height::from(*s).incremented();
debug!(
"address_data[{}] rolled back to stamp {:?}, height {}",
"addr_data[{}] rolled back to stamp {:?}, height {}",
i, s, h
);
heights.insert(h);

View File

@@ -1,18 +1,18 @@
use std::time::Instant;
use brk_error::Result;
use brk_types::{EmptyAddressData, FundedAddressData, Height};
use brk_types::{EmptyAddrData, FundedAddrData, Height};
use rayon::prelude::*;
use tracing::info;
use vecdb::{AnyStoredVec, AnyVec, Stamp, VecIndex, WritableVec};
use crate::distribution::{
Vecs,
block::{WithAddressDataSource, process_empty_addresses, process_funded_addresses},
block::{WithAddrDataSource, process_empty_addrs, process_funded_addrs},
state::BlockState,
};
use super::super::address::{AddressTypeToTypeIndexMap, AddressesDataVecs, AnyAddressIndexesVecs};
use super::super::addr::{AddrTypeToTypeIndexMap, AddrsDataVecs, AnyAddrIndexesVecs};
/// Process address updates from caches.
///
@@ -22,20 +22,20 @@ use super::super::address::{AddressTypeToTypeIndexMap, AddressesDataVecs, AnyAdd
/// - Updates address indexes
///
/// Call this before `flush()` to prepare data for writing.
pub(crate) fn process_address_updates(
addresses_data: &mut AddressesDataVecs,
address_indexes: &mut AnyAddressIndexesVecs,
empty_updates: AddressTypeToTypeIndexMap<WithAddressDataSource<EmptyAddressData>>,
funded_updates: AddressTypeToTypeIndexMap<WithAddressDataSource<FundedAddressData>>,
pub(crate) fn process_addr_updates(
addrs_data: &mut AddrsDataVecs,
addr_indexes: &mut AnyAddrIndexesVecs,
empty_updates: AddrTypeToTypeIndexMap<WithAddrDataSource<EmptyAddrData>>,
funded_updates: AddrTypeToTypeIndexMap<WithAddrDataSource<FundedAddrData>>,
) -> Result<()> {
info!("Processing address updates...");
info!("Processing addr updates...");
let i = Instant::now();
let empty_result = process_empty_addresses(addresses_data, empty_updates)?;
let funded_result = process_funded_addresses(addresses_data, funded_updates)?;
address_indexes.par_batch_update(empty_result, funded_result)?;
let empty_result = process_empty_addrs(addrs_data, empty_updates)?;
let funded_result = process_funded_addrs(addrs_data, funded_updates)?;
addr_indexes.par_batch_update(empty_result, funded_result)?;
info!("Processed address updates in {:?}", i.elapsed());
info!("Processed addr updates in {:?}", i.elapsed());
Ok(())
}
@@ -73,12 +73,12 @@ pub(crate) fn write(
vecs.supply_state.push(block_state.supply);
}
vecs.any_address_indexes
vecs.any_addr_indexes
.par_iter_mut()
.chain(vecs.addresses_data.par_iter_mut())
.chain(vecs.addresses.funded.par_iter_height_mut())
.chain(vecs.addresses.empty.par_iter_height_mut())
.chain(vecs.addresses.activity.par_iter_height_mut())
.chain(vecs.addrs_data.par_iter_mut())
.chain(vecs.addrs.funded.par_iter_height_mut())
.chain(vecs.addrs.empty.par_iter_height_mut())
.chain(vecs.addrs.activity.par_iter_height_mut())
.chain(
[
&mut vecs.supply_state as &mut dyn AnyStoredVec,
@@ -87,13 +87,13 @@ pub(crate) fn write(
.into_par_iter(),
)
.chain(vecs.utxo_cohorts.par_iter_vecs_mut())
.chain(vecs.address_cohorts.par_iter_vecs_mut())
.chain(vecs.addr_cohorts.par_iter_vecs_mut())
.try_for_each(|v| v.any_stamped_write_maybe_with_changes(stamp, with_changes))?;
// Commit states after vec writes
let cleanup = with_changes;
vecs.utxo_cohorts.commit_all_states(height, cleanup)?;
vecs.address_cohorts.commit_all_states(height, cleanup)?;
vecs.addr_cohorts.commit_all_states(height, cleanup)?;
info!("Wrote in {:?}", i.elapsed());

View File

@@ -1,4 +1,4 @@
pub mod address;
pub mod addr;
mod block;
pub mod cohorts;
pub mod compute;
@@ -11,5 +11,5 @@ pub use vecs::Vecs;
pub const DB_NAME: &str = "distribution";
pub use address::{AddressTypeToTypeIndexMap, AddressesDataVecs, AnyAddressIndexesVecs};
pub use cohorts::{AddressCohorts, DynCohortVecs, UTXOCohorts};
pub use addr::{AddrTypeToTypeIndexMap, AddrsDataVecs, AnyAddrIndexesVecs};
pub use cohorts::{AddrCohorts, DynCohortVecs, UTXOCohorts};

View File

@@ -1,7 +1,7 @@
use std::path::Path;
use brk_error::Result;
use brk_types::{Age, Cents, FundedAddressData, Sats, SupplyState};
use brk_types::{Age, Cents, FundedAddrData, Sats, SupplyState};
use vecdb::unlikely;
use super::super::cost_basis::{CostBasisRaw, RealizedOps};
@@ -10,22 +10,22 @@ use super::base::CohortState;
/// Significant digits for address cost basis prices (after rounding to dollars).
const COST_BASIS_PRICE_DIGITS: i32 = 4;
pub struct AddressCohortState<R: RealizedOps> {
pub address_count: u64,
pub struct AddrCohortState<R: RealizedOps> {
pub addr_count: u64,
pub inner: CohortState<R, CostBasisRaw>,
}
impl<R: RealizedOps> AddressCohortState<R> {
impl<R: RealizedOps> AddrCohortState<R> {
pub(crate) fn new(path: &Path, name: &str) -> Self {
Self {
address_count: 0,
addr_count: 0,
inner: CohortState::new(path, name).with_price_rounding(COST_BASIS_PRICE_DIGITS),
}
}
/// Reset state for fresh start.
pub(crate) fn reset(&mut self) {
self.address_count = 0;
self.addr_count = 0;
self.inner.supply = SupplyState::default();
self.inner.sent = Sats::ZERO;
self.inner.satdays_destroyed = Sats::ZERO;
@@ -34,18 +34,18 @@ impl<R: RealizedOps> AddressCohortState<R> {
pub(crate) fn send(
&mut self,
address_data: &mut FundedAddressData,
addr_data: &mut FundedAddrData,
value: Sats,
current_price: Cents,
prev_price: Cents,
ath: Cents,
age: Age,
) -> Result<()> {
let prev = address_data.cost_basis_snapshot();
address_data.send(value, prev_price)?;
let current = address_data.cost_basis_snapshot();
let prev = addr_data.cost_basis_snapshot();
addr_data.send(value, prev_price)?;
let current = addr_data.cost_basis_snapshot();
self.inner.send_address(
self.inner.send_addr(
&SupplyState {
utxo_count: 1,
value,
@@ -63,16 +63,16 @@ impl<R: RealizedOps> AddressCohortState<R> {
pub(crate) fn receive_outputs(
&mut self,
address_data: &mut FundedAddressData,
addr_data: &mut FundedAddrData,
value: Sats,
price: Cents,
output_count: u32,
) {
let prev = address_data.cost_basis_snapshot();
address_data.receive_outputs(value, price, output_count);
let current = address_data.cost_basis_snapshot();
let prev = addr_data.cost_basis_snapshot();
addr_data.receive_outputs(value, price, output_count);
let current = addr_data.cost_basis_snapshot();
self.inner.receive_address(
self.inner.receive_addr(
&SupplyState {
utxo_count: output_count as u64,
value,
@@ -83,53 +83,53 @@ impl<R: RealizedOps> AddressCohortState<R> {
);
}
pub(crate) fn add(&mut self, address_data: &FundedAddressData) {
self.address_count += 1;
pub(crate) fn add(&mut self, addr_data: &FundedAddrData) {
self.addr_count += 1;
self.inner
.increment_snapshot(&address_data.cost_basis_snapshot());
.increment_snapshot(&addr_data.cost_basis_snapshot());
}
pub(crate) fn subtract(&mut self, address_data: &FundedAddressData) {
let snapshot = address_data.cost_basis_snapshot();
pub(crate) fn subtract(&mut self, addr_data: &FundedAddrData) {
let snapshot = addr_data.cost_basis_snapshot();
// Check for potential underflow before it happens
if unlikely(self.inner.supply.utxo_count < snapshot.supply_state.utxo_count) {
panic!(
"AddressCohortState::subtract underflow!\n\
Cohort state: address_count={}, supply={}\n\
Address being subtracted: {}\n\
Address supply: {}\n\
"AddrCohortState::subtract underflow!\n\
Cohort state: addr_count={}, supply={}\n\
Addr being subtracted: {}\n\
Addr supply: {}\n\
Realized price: {}\n\
This means the address is not properly tracked in this cohort.",
self.address_count,
This means the addr is not properly tracked in this cohort.",
self.addr_count,
self.inner.supply,
address_data,
addr_data,
snapshot.supply_state,
snapshot.realized_price
);
}
if unlikely(self.inner.supply.value < snapshot.supply_state.value) {
panic!(
"AddressCohortState::subtract value underflow!\n\
Cohort state: address_count={}, supply={}\n\
Address being subtracted: {}\n\
Address supply: {}\n\
"AddrCohortState::subtract value underflow!\n\
Cohort state: addr_count={}, supply={}\n\
Addr being subtracted: {}\n\
Addr supply: {}\n\
Realized price: {}\n\
This means the address is not properly tracked in this cohort.",
self.address_count,
This means the addr is not properly tracked in this cohort.",
self.addr_count,
self.inner.supply,
address_data,
addr_data,
snapshot.supply_state,
snapshot.realized_price
);
}
self.address_count = self.address_count.checked_sub(1).unwrap_or_else(|| {
self.addr_count = self.addr_count.checked_sub(1).unwrap_or_else(|| {
panic!(
"AddressCohortState::subtract address_count underflow! address_count=0\n\
Address being subtracted: {}\n\
"AddrCohortState::subtract addr_count underflow! addr_count=0\n\
Addr being subtracted: {}\n\
Realized price: {}",
address_data, snapshot.realized_price
addr_data, snapshot.realized_price
)
});

View File

@@ -158,7 +158,7 @@ impl<R: RealizedOps, C: CostBasisOps> CohortState<R, C> {
}
}
pub(crate) fn receive_address(
pub(crate) fn receive_addr(
&mut self,
supply: &SupplyState,
price: Cents,
@@ -224,7 +224,7 @@ impl<R: RealizedOps, C: CostBasisOps> CohortState<R, C> {
}
#[allow(clippy::too_many_arguments)]
pub(crate) fn send_address(
pub(crate) fn send_addr(
&mut self,
supply: &SupplyState,
current_price: Cents,

View File

@@ -1,7 +1,7 @@
mod address;
mod addr;
mod base;
mod utxo;
pub use address::*;
pub use addr::*;
pub use base::*;
pub use utxo::*;

View File

@@ -4,7 +4,7 @@ use brk_error::Result;
use brk_indexer::Indexer;
use brk_traversable::Traversable;
use brk_types::{
Cents, EmptyAddressData, EmptyAddressIndex, FundedAddressData, FundedAddressIndex, Height,
Cents, EmptyAddrData, EmptyAddrIndex, FundedAddrData, FundedAddrIndex, Height,
Indexes, StoredF64, SupplyState, Timestamp, TxIndex, Version,
};
use tracing::{debug, info};
@@ -28,28 +28,28 @@ use crate::{
};
use super::{
AddressCohorts, AddressesDataVecs, AnyAddressIndexesVecs, RangeMap, UTXOCohorts,
address::{
AddressCountsVecs, AddressActivityVecs, DeltaVecs, NewAddressCountVecs, TotalAddressCountVecs,
AddrCohorts, AddrsDataVecs, AnyAddrIndexesVecs, RangeMap, UTXOCohorts,
addr::{
AddrCountsVecs, AddrActivityVecs, DeltaVecs, NewAddrCountVecs, TotalAddrCountVecs,
},
};
const VERSION: Version = Version::new(22);
#[derive(Traversable)]
pub struct AddressMetricsVecs<M: StorageMode = Rw> {
pub funded: AddressCountsVecs<M>,
pub empty: AddressCountsVecs<M>,
pub activity: AddressActivityVecs<M>,
pub total: TotalAddressCountVecs<M>,
pub new: NewAddressCountVecs<M>,
pub struct AddrMetricsVecs<M: StorageMode = Rw> {
pub funded: AddrCountsVecs<M>,
pub empty: AddrCountsVecs<M>,
pub activity: AddrActivityVecs<M>,
pub total: TotalAddrCountVecs<M>,
pub new: NewAddrCountVecs<M>,
pub delta: DeltaVecs,
#[traversable(wrap = "indexes", rename = "funded")]
pub funded_index:
LazyVecFrom1<FundedAddressIndex, FundedAddressIndex, FundedAddressIndex, FundedAddressData>,
LazyVecFrom1<FundedAddrIndex, FundedAddrIndex, FundedAddrIndex, FundedAddrData>,
#[traversable(wrap = "indexes", rename = "empty")]
pub empty_index:
LazyVecFrom1<EmptyAddressIndex, EmptyAddressIndex, EmptyAddressIndex, EmptyAddressData>,
LazyVecFrom1<EmptyAddrIndex, EmptyAddrIndex, EmptyAddrIndex, EmptyAddrData>,
}
#[derive(Traversable)]
@@ -61,17 +61,17 @@ pub struct Vecs<M: StorageMode = Rw> {
#[traversable(wrap = "supply", rename = "state")]
pub supply_state: M::Stored<BytesVec<Height, SupplyState>>,
#[traversable(wrap = "addresses", rename = "indexes")]
pub any_address_indexes: AnyAddressIndexesVecs<M>,
#[traversable(wrap = "addresses", rename = "data")]
pub addresses_data: AddressesDataVecs<M>,
#[traversable(wrap = "addrs", rename = "indexes")]
pub any_addr_indexes: AnyAddrIndexesVecs<M>,
#[traversable(wrap = "addrs", rename = "data")]
pub addrs_data: AddrsDataVecs<M>,
#[traversable(wrap = "cohorts", rename = "utxo")]
pub utxo_cohorts: UTXOCohorts<M>,
#[traversable(wrap = "cohorts", rename = "address")]
pub address_cohorts: AddressCohorts<M>,
#[traversable(wrap = "cohorts", rename = "addr")]
pub addr_cohorts: AddrCohorts<M>,
#[traversable(wrap = "cointime/activity")]
pub coinblocks_destroyed: PerBlockCumulativeWithSums<StoredF64, StoredF64, M>,
pub addresses: AddressMetricsVecs<M>,
pub addrs: AddrMetricsVecs<M>,
/// In-memory block state for UTXO processing. Persisted via supply_state.
/// Kept across compute() calls to avoid O(n) rebuild on resume.
@@ -111,47 +111,47 @@ impl Vecs {
let utxo_cohorts = UTXOCohorts::forced_import(&db, version, indexes, &states_path, cached_starts)?;
let address_cohorts = AddressCohorts::forced_import(&db, version, indexes, &states_path, cached_starts)?;
let addr_cohorts = AddrCohorts::forced_import(&db, version, indexes, &states_path, cached_starts)?;
// Create address data BytesVecs first so we can also use them for identity mappings
let funded_address_index_to_funded_address_data = BytesVec::forced_import_with(
vecdb::ImportOptions::new(&db, "funded_address_data", version)
let funded_addr_index_to_funded_addr_data = BytesVec::forced_import_with(
vecdb::ImportOptions::new(&db, "funded_addr_data", version)
.with_saved_stamped_changes(SAVED_STAMPED_CHANGES),
)?;
let empty_address_index_to_empty_address_data = BytesVec::forced_import_with(
vecdb::ImportOptions::new(&db, "empty_address_data", version)
let empty_addr_index_to_empty_addr_data = BytesVec::forced_import_with(
vecdb::ImportOptions::new(&db, "empty_addr_data", version)
.with_saved_stamped_changes(SAVED_STAMPED_CHANGES),
)?;
// Identity mappings for traversable
let funded_address_index = LazyVecFrom1::init(
"funded_address_index",
let funded_addr_index = LazyVecFrom1::init(
"funded_addr_index",
version,
funded_address_index_to_funded_address_data.read_only_boxed_clone(),
funded_addr_index_to_funded_addr_data.read_only_boxed_clone(),
|index, _| index,
);
let empty_address_index = LazyVecFrom1::init(
"empty_address_index",
let empty_addr_index = LazyVecFrom1::init(
"empty_addr_index",
version,
empty_address_index_to_empty_address_data.read_only_boxed_clone(),
empty_addr_index_to_empty_addr_data.read_only_boxed_clone(),
|index, _| index,
);
let address_count = AddressCountsVecs::forced_import(&db, "address_count", version, indexes)?;
let empty_address_count =
AddressCountsVecs::forced_import(&db, "empty_address_count", version, indexes)?;
let address_activity =
AddressActivityVecs::forced_import(&db, "address_activity", version, indexes, cached_starts)?;
let addr_count = AddrCountsVecs::forced_import(&db, "addr_count", version, indexes)?;
let empty_addr_count =
AddrCountsVecs::forced_import(&db, "empty_addr_count", version, indexes)?;
let addr_activity =
AddrActivityVecs::forced_import(&db, "addr_activity", version, indexes, cached_starts)?;
// Stored total = address_count + empty_address_count (global + per-type, with all derived indexes)
let total_address_count = TotalAddressCountVecs::forced_import(&db, version, indexes)?;
// Stored total = addr_count + empty_addr_count (global + per-type, with all derived indexes)
let total_addr_count = TotalAddrCountVecs::forced_import(&db, version, indexes)?;
// Per-block delta of total (global + per-type)
let new_address_count =
NewAddressCountVecs::forced_import(&db, version, indexes, cached_starts)?;
let new_addr_count =
NewAddrCountVecs::forced_import(&db, version, indexes, cached_starts)?;
// Growth rate: delta change + rate (global + per-type)
let delta = DeltaVecs::new(version, &address_count, cached_starts, indexes);
let delta = DeltaVecs::new(version, &addr_count, cached_starts, indexes);
let this = Self {
supply_state: BytesVec::forced_import_with(
@@ -159,19 +159,19 @@ impl Vecs {
.with_saved_stamped_changes(SAVED_STAMPED_CHANGES),
)?,
addresses: AddressMetricsVecs {
funded: address_count,
empty: empty_address_count,
activity: address_activity,
total: total_address_count,
new: new_address_count,
addrs: AddrMetricsVecs {
funded: addr_count,
empty: empty_addr_count,
activity: addr_activity,
total: total_addr_count,
new: new_addr_count,
delta,
funded_index: funded_address_index,
empty_index: empty_address_index,
funded_index: funded_addr_index,
empty_index: empty_addr_index,
},
utxo_cohorts,
address_cohorts,
addr_cohorts,
coinblocks_destroyed: PerBlockCumulativeWithSums::forced_import(
&db,
@@ -181,10 +181,10 @@ impl Vecs {
cached_starts,
)?,
any_address_indexes: AnyAddressIndexesVecs::forced_import(&db, version)?,
addresses_data: AddressesDataVecs {
funded: funded_address_index_to_funded_address_data,
empty: empty_address_index_to_empty_address_data,
any_addr_indexes: AnyAddrIndexesVecs::forced_import(&db, version)?,
addrs_data: AddrsDataVecs {
funded: funded_addr_index_to_funded_addr_data,
empty: empty_addr_index_to_empty_addr_data,
},
chain_state: Vec::new(),
tx_index_to_height: RangeMap::default(),
@@ -275,10 +275,10 @@ impl Vecs {
let recovered = recover_state(
height,
chain_state_rollback,
&mut self.any_address_indexes,
&mut self.addresses_data,
&mut self.any_addr_indexes,
&mut self.addrs_data,
&mut self.utxo_cohorts,
&mut self.address_cohorts,
&mut self.addr_cohorts,
)?;
if recovered.starting_height.is_zero() {
@@ -302,14 +302,14 @@ impl Vecs {
// Recover or reuse chain_state
let starting_height = if recovered_height.is_zero() {
self.supply_state.reset()?;
self.addresses.funded.reset_height()?;
self.addresses.empty.reset_height()?;
self.addresses.activity.reset_height()?;
self.addrs.funded.reset_height()?;
self.addrs.empty.reset_height()?;
self.addrs.activity.reset_height()?;
reset_state(
&mut self.any_address_indexes,
&mut self.addresses_data,
&mut self.any_addr_indexes,
&mut self.addrs_data,
&mut self.utxo_cohorts,
&mut self.address_cohorts,
&mut self.addr_cohorts,
)?;
chain_state.clear();
@@ -356,7 +356,7 @@ impl Vecs {
debug!("validating computed versions");
let base_version = VERSION;
self.utxo_cohorts.validate_computed_versions(base_version)?;
self.address_cohorts
self.addr_cohorts
.validate_computed_versions(base_version)?;
debug!("computed versions validated");
@@ -406,7 +406,7 @@ impl Vecs {
{
let (r1, r2) = rayon::join(
|| self.utxo_cohorts.compute_overlapping_vecs(starting_indexes, exit),
|| self.address_cohorts.compute_overlapping_vecs(starting_indexes, exit),
|| self.addr_cohorts.compute_overlapping_vecs(starting_indexes, exit),
);
r1?;
r2?;
@@ -421,30 +421,30 @@ impl Vecs {
{
let (r1, r2) = rayon::join(
|| self.utxo_cohorts.compute_rest_part1(prices, starting_indexes, exit),
|| self.address_cohorts.compute_rest_part1(prices, starting_indexes, exit),
|| self.addr_cohorts.compute_rest_part1(prices, starting_indexes, exit),
);
r1?;
r2?;
}
// 6b. Compute address count sum (by address_type all)
self.addresses.funded.compute_rest(starting_indexes, exit)?;
self.addresses.empty.compute_rest(starting_indexes, exit)?;
// 6b. Compute address count sum (by addr_type -> all)
self.addrs.funded.compute_rest(starting_indexes, exit)?;
self.addrs.empty.compute_rest(starting_indexes, exit)?;
// 6c. Compute total_address_count = address_count + empty_address_count
self.addresses.total.compute(
// 6c. Compute total_addr_count = addr_count + empty_addr_count
self.addrs.total.compute(
starting_indexes.height,
&self.addresses.funded,
&self.addresses.empty,
&self.addrs.funded,
&self.addrs.empty,
exit,
)?;
self.addresses
self.addrs
.activity
.compute_rest(starting_indexes.height, exit)?;
self.addresses.new.compute(
self.addrs.new.compute(
starting_indexes.height,
&self.addresses.total,
&self.addrs.total,
exit,
)?;
@@ -467,7 +467,7 @@ impl Vecs {
&height_to_market_cap,
exit,
)?;
self.address_cohorts
self.addr_cohorts
.compute_rest_part2(prices, starting_indexes, exit)?;
let _lock = exit.lock();
@@ -483,13 +483,13 @@ impl Vecs {
fn min_stateful_len(&self) -> Height {
self.utxo_cohorts
.min_stateful_len()
.min(self.address_cohorts.min_stateful_len())
.min(self.addr_cohorts.min_stateful_len())
.min(Height::from(self.supply_state.len()))
.min(self.any_address_indexes.min_stamped_len())
.min(self.addresses_data.min_stamped_len())
.min(Height::from(self.addresses.funded.min_stateful_len()))
.min(Height::from(self.addresses.empty.min_stateful_len()))
.min(Height::from(self.addresses.activity.min_stateful_len()))
.min(self.any_addr_indexes.min_stamped_len())
.min(self.addrs_data.min_stamped_len())
.min(Height::from(self.addrs.funded.min_stateful_len()))
.min(Height::from(self.addrs.empty.min_stateful_len()))
.min(Height::from(self.addrs.activity.min_stateful_len()))
.min(Height::from(self.coinblocks_destroyed.base.height.len()))
}
}