global: snapshot

This commit is contained in:
nym21
2026-04-14 22:53:10 +02:00
parent 904ec93668
commit 39da441d14
57 changed files with 2886 additions and 1668 deletions

View File

@@ -8,5 +8,5 @@ use crate::internal::PerBlockRollingAverage;
#[derive(Deref, DerefMut, Traversable)]
pub struct Vecs<M: StorageMode = Rw>(
#[traversable(flatten)] pub PerBlockRollingAverage<Timestamp, M>,
#[traversable(flatten)] pub PerBlockRollingAverage<Timestamp, Timestamp, M>,
);

View File

@@ -7,12 +7,13 @@
//! | `receiving` | Unique addresses that received this block |
//! | `sending` | Unique addresses that sent this block |
//! | `reactivated` | Addresses that were empty and now have funds |
//! | `both` | Addresses that both sent AND received same block |
//! | `bidirectional` | Addresses that both sent AND received in same block |
//! | `active` | Distinct addresses involved (sent received) |
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Height, StoredU32, Version};
use brk_types::{Height, StoredU32, StoredU64, Version};
use derive_more::{Deref, DerefMut};
use rayon::prelude::*;
use vecdb::{AnyStoredVec, AnyVec, Database, Exit, Rw, StorageMode, WritableVec};
@@ -28,7 +29,7 @@ pub struct BlockActivityCounts {
pub reactivated: u32,
pub sending: u32,
pub receiving: u32,
pub both: u32,
pub bidirectional: u32,
}
impl BlockActivityCounts {
@@ -56,7 +57,7 @@ impl AddrTypeToActivityCounts {
total.reactivated += counts.reactivated;
total.sending += counts.sending;
total.receiving += counts.receiving;
total.both += counts.both;
total.bidirectional += counts.bidirectional;
}
total
}
@@ -65,16 +66,25 @@ impl AddrTypeToActivityCounts {
/// Activity count vectors for a single category (e.g., one address type or "all").
#[derive(Traversable)]
pub struct ActivityCountVecs<M: StorageMode = Rw> {
pub reactivated: PerBlockRollingAverage<StoredU32, M>,
pub sending: PerBlockRollingAverage<StoredU32, M>,
pub receiving: PerBlockRollingAverage<StoredU32, M>,
pub both: PerBlockRollingAverage<StoredU32, M>,
pub reactivated: PerBlockRollingAverage<StoredU32, StoredU64, M>,
pub sending: PerBlockRollingAverage<StoredU32, StoredU64, M>,
pub receiving: PerBlockRollingAverage<StoredU32, StoredU64, M>,
pub bidirectional: PerBlockRollingAverage<StoredU32, StoredU64, M>,
/// Distinct addresses involved in this block (sent received),
/// computed at push time as `sending + receiving - bidirectional`
/// via inclusion-exclusion. For per-type instances this is
/// per-type. For the `all` aggregate it's the cross-type total.
pub active: PerBlockRollingAverage<StoredU32, StoredU64, M>,
}
impl ActivityCountVecs {
/// `prefix` is prepended to each field's disk name. Use `""` for the
/// "all" aggregate and `"{type}_"` for per-address-type instances.
/// Field names are suffixed with `_addrs` so the final disk series
/// are e.g. `active_addrs`, `p2tr_bidirectional_addrs`.
pub(crate) fn forced_import(
db: &Database,
name: &str,
prefix: &str,
version: Version,
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
@@ -82,28 +92,35 @@ impl ActivityCountVecs {
Ok(Self {
reactivated: PerBlockRollingAverage::forced_import(
db,
&format!("{name}_reactivated"),
&format!("{prefix}reactivated_addrs"),
version,
indexes,
cached_starts,
)?,
sending: PerBlockRollingAverage::forced_import(
db,
&format!("{name}_sending"),
&format!("{prefix}sending_addrs"),
version,
indexes,
cached_starts,
)?,
receiving: PerBlockRollingAverage::forced_import(
db,
&format!("{name}_receiving"),
&format!("{prefix}receiving_addrs"),
version,
indexes,
cached_starts,
)?,
both: PerBlockRollingAverage::forced_import(
bidirectional: PerBlockRollingAverage::forced_import(
db,
&format!("{name}_both"),
&format!("{prefix}bidirectional_addrs"),
version,
indexes,
cached_starts,
)?,
active: PerBlockRollingAverage::forced_import(
db,
&format!("{prefix}active_addrs"),
version,
indexes,
cached_starts,
@@ -117,7 +134,8 @@ impl ActivityCountVecs {
.len()
.min(self.sending.block.len())
.min(self.receiving.block.len())
.min(self.both.block.len())
.min(self.bidirectional.block.len())
.min(self.active.block.len())
}
pub(crate) fn par_iter_height_mut(
@@ -125,9 +143,10 @@ impl ActivityCountVecs {
) -> impl ParallelIterator<Item = &mut dyn AnyStoredVec> {
[
&mut self.reactivated.block as &mut dyn AnyStoredVec,
&mut self.sending.block as &mut dyn AnyStoredVec,
&mut self.receiving.block as &mut dyn AnyStoredVec,
&mut self.both.block as &mut dyn AnyStoredVec,
&mut self.sending.block,
&mut self.receiving.block,
&mut self.bidirectional.block,
&mut self.active.block,
]
.into_par_iter()
}
@@ -136,7 +155,8 @@ impl ActivityCountVecs {
self.reactivated.block.reset()?;
self.sending.block.reset()?;
self.receiving.block.reset()?;
self.both.block.reset()?;
self.bidirectional.block.reset()?;
self.active.block.reset()?;
Ok(())
}
@@ -145,14 +165,19 @@ impl ActivityCountVecs {
self.reactivated.block.push(counts.reactivated.into());
self.sending.block.push(counts.sending.into());
self.receiving.block.push(counts.receiving.into());
self.both.block.push(counts.both.into());
self.bidirectional
.block
.push(counts.bidirectional.into());
let active = counts.sending + counts.receiving - counts.bidirectional;
self.active.block.push(active.into());
}
pub(crate) fn compute_rest(&mut self, max_from: Height, exit: &Exit) -> Result<()> {
self.reactivated.compute_rest(max_from, exit)?;
self.sending.compute_rest(max_from, exit)?;
self.receiving.compute_rest(max_from, exit)?;
self.both.compute_rest(max_from, exit)?;
self.bidirectional.compute_rest(max_from, exit)?;
self.active.compute_rest(max_from, exit)?;
Ok(())
}
}
@@ -171,7 +196,6 @@ impl From<ByAddrType<ActivityCountVecs>> for AddrTypeToActivityCountVecs {
impl AddrTypeToActivityCountVecs {
pub(crate) fn forced_import(
db: &Database,
name: &str,
version: Version,
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
@@ -180,7 +204,7 @@ impl AddrTypeToActivityCountVecs {
|type_name| {
ActivityCountVecs::forced_import(
db,
&format!("{type_name}_{name}"),
&format!("{type_name}_"),
version,
indexes,
cached_starts,
@@ -205,7 +229,8 @@ impl AddrTypeToActivityCountVecs {
vecs.push(&mut type_vecs.reactivated.block);
vecs.push(&mut type_vecs.sending.block);
vecs.push(&mut type_vecs.receiving.block);
vecs.push(&mut type_vecs.both.block);
vecs.push(&mut type_vecs.bidirectional.block);
vecs.push(&mut type_vecs.active.block);
}
vecs.into_par_iter()
}
@@ -243,16 +268,14 @@ pub struct AddrActivityVecs<M: StorageMode = Rw> {
impl AddrActivityVecs {
pub(crate) fn forced_import(
db: &Database,
name: &str,
version: Version,
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
Ok(Self {
all: ActivityCountVecs::forced_import(db, name, version, indexes, cached_starts)?,
all: ActivityCountVecs::forced_import(db, "", version, indexes, cached_starts)?,
by_addr_type: AddrTypeToActivityCountVecs::forced_import(
db,
name,
version,
indexes,
cached_starts,

View File

@@ -19,7 +19,7 @@ pub use exposed::{
pub use indexes::AnyAddrIndexesVecs;
pub use new_addr_count::NewAddrCountVecs;
pub use reused::{
AddrTypeToReusedAddrCount, AddrTypeToReusedAddrUseCount, ReusedAddrVecs,
AddrTypeToReusedAddrCount, AddrTypeToReusedAddrEventCount, ReusedAddrVecs,
};
pub use total_addr_count::TotalAddrCountVecs;
pub use type_map::{AddrTypeToTypeIndexMap, AddrTypeToVec, HeightToAddrTypeToVec};

View File

@@ -0,0 +1,11 @@
//! Per-block reused-address event tracking. Holds both the output-side
//! ("an output landed on a previously-used address") and input-side
//! ("an input spent from an address in the reused set") event counters.
//! See [`vecs::ReusedAddrEventsVecs`] for the full description of each
//! metric.
mod state;
mod vecs;
pub use state::AddrTypeToReusedAddrEventCount;
pub use vecs::ReusedAddrEventsVecs;

View File

@@ -0,0 +1,28 @@
use brk_cohort::ByAddrType;
use derive_more::{Deref, DerefMut};
/// Per-block running counter of reused-address events, per address type.
/// Shared runtime container for both output-side events
/// (`output_to_reused_addr_count`, outputs landing on addresses that
/// had already received ≥ 1 prior output) and input-side events
/// (`input_from_reused_addr_count`, inputs spending from addresses
/// with lifetime `funded_txo_count > 1`). Reset at the start of each
/// block (no disk recovery needed since per-block flow is
/// reconstructed deterministically from `process_received` /
/// `process_sent`).
#[derive(Debug, Default, Deref, DerefMut)]
pub struct AddrTypeToReusedAddrEventCount(ByAddrType<u64>);
impl AddrTypeToReusedAddrEventCount {
#[inline]
pub(crate) fn sum(&self) -> u64 {
self.0.values().sum()
}
#[inline]
pub(crate) fn reset(&mut self) {
for v in self.0.values_mut() {
*v = 0;
}
}
}

View File

@@ -0,0 +1,261 @@
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{BasisPoints16, Indexes, OutputType, StoredF32, StoredU32, StoredU64, Version};
use rayon::prelude::*;
use vecdb::{AnyStoredVec, AnyVec, Database, Exit, Rw, StorageMode, WritableVec};
use crate::{
indexes, inputs,
internal::{
PerBlockCumulativeRolling, PerBlockRollingAverage, PercentCumulativeRolling,
WindowStartVec, Windows, WithAddrTypes,
},
outputs,
};
use super::state::AddrTypeToReusedAddrEventCount;
/// Per-block reused-address event metrics. Holds three families of
/// signals: output-level (use), input-level (spend), and address-level
/// (active in block).
///
/// `output_to_reused_addr_count`: every output landing on an address that had
/// already received at least one prior output anywhere in its lifetime,
/// i.e. an output-level reuse event. Outputs are not deduplicated per
/// address within a block: an address receiving N outputs in one block
/// that had `before` lifetime outputs contributes
/// `max(0, N - max(0, 1 - before))` events. Only the very first output
/// an address ever sees is excluded. Every subsequent output counts,
/// matching the standard "% of outputs to previously-used addresses"
/// reuse ratio reported by external sources. `output_to_reused_addr_share`
/// uses `outputs::ByTypeVecs::output_count` (all 12 output types) as
/// denominator. `spendable_output_to_reused_addr_share` uses the
/// op_return-excluded 11-type aggregate (`spendable_output_count`).
///
/// `input_from_reused_addr_count`: every input spending from an address
/// whose lifetime `funded_txo_count > 1` at the time of the spend (i.e.
/// the address is in the same reused set tracked by
/// `reused_addr_count`). Every input is checked independently. If a
/// single address has multiple inputs in one block each one counts.
/// This is a *stable-predicate* signal about the sending address, not
/// an output-level repeat event: the first spend from a reused address
/// counts just as much as the tenth. Denominator
/// (`input_from_reused_addr_share`): `inputs::ByTypeVecs::input_count` (11
/// spendable types, where `p2ms`, `unknown`, `empty` count as true
/// negatives).
///
/// `active_reused_addr_count` / `active_reused_addr_share`: block-level
/// *address* signals (single aggregate, not per-type).
/// `active_reused_addr_count` is the count of distinct addresses
/// involved in this block (sent received) that satisfy `is_reused()`
/// after the block's events, populated inline in `process_received`
/// (each receiver, post-receive) and in `process_sent` (each
/// first-encounter sender, deduped against `received_addrs` so
/// addresses that did both aren't double-counted).
/// `active_reused_addr_share` is the per-block ratio
/// `reused / active * 100` as a percentage in `[0, 100]` (or `0.0` for
/// empty blocks). The denominator (distinct active addrs per block)
/// lives on `ActivityCountVecs::active` (`addrs.activity.all.active`),
/// derived from `sending + receiving - bidirectional`. Both fields
/// use `PerBlockRollingAverage` so their lazy 24h/1w/1m/1y series are
/// rolling *averages* of the per-block values. Sums and cumulatives of
/// distinct-address counts would be misleading because the same
/// address can appear in multiple blocks.
#[derive(Traversable)]
pub struct ReusedAddrEventsVecs<M: StorageMode = Rw> {
pub output_to_reused_addr_count:
WithAddrTypes<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
pub output_to_reused_addr_share: WithAddrTypes<PercentCumulativeRolling<BasisPoints16, M>>,
pub spendable_output_to_reused_addr_share: PercentCumulativeRolling<BasisPoints16, M>,
pub input_from_reused_addr_count:
WithAddrTypes<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
pub input_from_reused_addr_share: WithAddrTypes<PercentCumulativeRolling<BasisPoints16, M>>,
pub active_reused_addr_count: PerBlockRollingAverage<StoredU32, StoredU64, M>,
pub active_reused_addr_share: PerBlockRollingAverage<StoredF32, StoredF32, M>,
}
impl ReusedAddrEventsVecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
let import_count = |name: &str| {
WithAddrTypes::<PerBlockCumulativeRolling<StoredU64, StoredU64>>::forced_import(
db,
name,
version,
indexes,
cached_starts,
)
};
let import_percent = |name: &str| -> Result<WithAddrTypes<
PercentCumulativeRolling<BasisPoints16>,
>> {
Ok(WithAddrTypes {
all: PercentCumulativeRolling::forced_import(db, name, version, indexes)?,
by_addr_type: ByAddrType::new_with_name(|type_name| {
PercentCumulativeRolling::forced_import(
db,
&format!("{type_name}_{name}"),
version,
indexes,
)
})?,
})
};
let output_to_reused_addr_count = import_count("output_to_reused_addr_count")?;
let output_to_reused_addr_share = import_percent("output_to_reused_addr_share")?;
let spendable_output_to_reused_addr_share = PercentCumulativeRolling::forced_import(
db,
"spendable_output_to_reused_addr_share",
version,
indexes,
)?;
let input_from_reused_addr_count = import_count("input_from_reused_addr_count")?;
let input_from_reused_addr_share = import_percent("input_from_reused_addr_share")?;
let active_reused_addr_count = PerBlockRollingAverage::forced_import(
db,
"active_reused_addr_count",
version,
indexes,
cached_starts,
)?;
let active_reused_addr_share = PerBlockRollingAverage::forced_import(
db,
"active_reused_addr_share",
version,
indexes,
cached_starts,
)?;
Ok(Self {
output_to_reused_addr_count,
output_to_reused_addr_share,
spendable_output_to_reused_addr_share,
input_from_reused_addr_count,
input_from_reused_addr_share,
active_reused_addr_count,
active_reused_addr_share,
})
}
pub(crate) fn min_stateful_len(&self) -> usize {
self.output_to_reused_addr_count
.min_stateful_len()
.min(self.input_from_reused_addr_count.min_stateful_len())
.min(self.active_reused_addr_count.block.len())
.min(self.active_reused_addr_share.block.len())
}
pub(crate) fn par_iter_height_mut(
&mut self,
) -> impl ParallelIterator<Item = &mut dyn AnyStoredVec> {
self.output_to_reused_addr_count
.par_iter_height_mut()
.chain(self.input_from_reused_addr_count.par_iter_height_mut())
.chain([
&mut self.active_reused_addr_count.block as &mut dyn AnyStoredVec,
&mut self.active_reused_addr_share.block as &mut dyn AnyStoredVec,
])
}
pub(crate) fn reset_height(&mut self) -> Result<()> {
self.output_to_reused_addr_count.reset_height()?;
self.input_from_reused_addr_count.reset_height()?;
self.active_reused_addr_count.block.reset()?;
self.active_reused_addr_share.block.reset()?;
Ok(())
}
#[inline(always)]
pub(crate) fn push_height(
&mut self,
uses: &AddrTypeToReusedAddrEventCount,
spends: &AddrTypeToReusedAddrEventCount,
active_addr_count: u32,
active_reused_addr_count: u32,
) {
self.output_to_reused_addr_count
.push_height(uses.sum(), uses.values().copied());
self.input_from_reused_addr_count
.push_height(spends.sum(), spends.values().copied());
self.active_reused_addr_count
.block
.push(StoredU32::from(active_reused_addr_count));
// Stored as a percentage in [0, 100] to match the rest of the
// codebase (Unit.percentage on the website expects 0..100). The
// `active_addr_count` denominator lives on `ActivityCountVecs`
// (`addrs.activity.all.active`), passed in here so we can
// compute the per-block ratio inline.
let share = if active_addr_count > 0 {
100.0 * (active_reused_addr_count as f32 / active_addr_count as f32)
} else {
0.0
};
self.active_reused_addr_share
.block
.push(StoredF32::from(share));
}
pub(crate) fn compute_rest(
&mut self,
starting_indexes: &Indexes,
outputs_by_type: &outputs::ByTypeVecs,
inputs_by_type: &inputs::ByTypeVecs,
exit: &Exit,
) -> Result<()> {
self.output_to_reused_addr_count
.compute_rest(starting_indexes.height, exit)?;
self.input_from_reused_addr_count
.compute_rest(starting_indexes.height, exit)?;
self.active_reused_addr_count
.compute_rest(starting_indexes.height, exit)?;
self.active_reused_addr_share
.compute_rest(starting_indexes.height, exit)?;
self.output_to_reused_addr_share.all.compute_count_ratio(
&self.output_to_reused_addr_count.all,
&outputs_by_type.output_count.all,
starting_indexes.height,
exit,
)?;
self.spendable_output_to_reused_addr_share.compute_count_ratio(
&self.output_to_reused_addr_count.all,
&outputs_by_type.spendable_output_count,
starting_indexes.height,
exit,
)?;
self.input_from_reused_addr_share.all.compute_count_ratio(
&self.input_from_reused_addr_count.all,
&inputs_by_type.input_count.all,
starting_indexes.height,
exit,
)?;
for otype in OutputType::ADDR_TYPES {
self.output_to_reused_addr_share
.by_addr_type
.get_mut_unwrap(otype)
.compute_count_ratio(
self.output_to_reused_addr_count.by_addr_type.get_unwrap(otype),
outputs_by_type.output_count.by_type.get(otype),
starting_indexes.height,
exit,
)?;
self.input_from_reused_addr_share
.by_addr_type
.get_mut_unwrap(otype)
.compute_count_ratio(
self.input_from_reused_addr_count.by_addr_type.get_unwrap(otype),
inputs_by_type.input_count.by_type.get(otype),
starting_indexes.height,
exit,
)?;
}
Ok(())
}
}

View File

@@ -1,22 +1,26 @@
//! Reused address tracking.
//!
//! An address is "reused" if its lifetime `funded_txo_count > 1` i.e. it
//! has received more than one output across its lifetime. This is the
//! simplest output-multiplicity proxy for address linkability.
//! An address is "reused" if its lifetime `funded_txo_count > 1`, i.e.
//! it has received more than one output across its lifetime. This is
//! the simplest output-multiplicity proxy for address linkability.
//!
//! Two facets are tracked here:
//! - [`count`] how many distinct addresses are currently reused (funded)
//! and how many have *ever* been reused (total). Per address type plus
//! an aggregated `all`.
//! - [`uses`] per-block count of outputs going to addresses that were
//! already reused, plus the derived percent over total address-output
//! count (denominator from `outputs::by_type`).
//! - [`count`]: how many distinct addresses are currently reused
//! (funded) and how many have *ever* been reused (total). Per address
//! type plus an aggregated `all`.
//! - [`events`]: per-block address-reuse event counts on both sides.
//! Output-side (`output_to_reused_addr_count`, outputs landing on
//! addresses that had already received ≥ 1 prior output) and
//! input-side (`input_from_reused_addr_count`, inputs spending from
//! addresses with lifetime `funded_txo_count > 1`). Each count is
//! paired with a percent over the matching block-level output/input
//! total.
mod count;
mod uses;
mod events;
pub use count::{AddrTypeToReusedAddrCount, ReusedAddrCountsVecs};
pub use uses::{AddrTypeToReusedAddrUseCount, ReusedAddrUsesVecs};
pub use events::{AddrTypeToReusedAddrEventCount, ReusedAddrEventsVecs};
use brk_error::Result;
use brk_traversable::Traversable;
@@ -25,17 +29,17 @@ use rayon::prelude::*;
use vecdb::{AnyStoredVec, Database, Exit, Rw, StorageMode};
use crate::{
indexes,
indexes, inputs,
internal::{WindowStartVec, Windows},
outputs,
};
/// Top-level container for all reused address tracking: counts (funded +
/// total) plus per-block uses (count + percent).
/// total) plus per-block reuse events (output-side + input-side).
#[derive(Traversable)]
pub struct ReusedAddrVecs<M: StorageMode = Rw> {
pub count: ReusedAddrCountsVecs<M>,
pub uses: ReusedAddrUsesVecs<M>,
pub events: ReusedAddrEventsVecs<M>,
}
impl ReusedAddrVecs {
@@ -47,14 +51,14 @@ impl ReusedAddrVecs {
) -> Result<Self> {
Ok(Self {
count: ReusedAddrCountsVecs::forced_import(db, version, indexes)?,
uses: ReusedAddrUsesVecs::forced_import(db, version, indexes, cached_starts)?,
events: ReusedAddrEventsVecs::forced_import(db, version, indexes, cached_starts)?,
})
}
pub(crate) fn min_stateful_len(&self) -> usize {
self.count
.min_stateful_len()
.min(self.uses.min_stateful_len())
.min(self.events.min_stateful_len())
}
pub(crate) fn par_iter_height_mut(
@@ -62,12 +66,12 @@ impl ReusedAddrVecs {
) -> impl ParallelIterator<Item = &mut dyn AnyStoredVec> {
self.count
.par_iter_height_mut()
.chain(self.uses.par_iter_height_mut())
.chain(self.events.par_iter_height_mut())
}
pub(crate) fn reset_height(&mut self) -> Result<()> {
self.count.reset_height()?;
self.uses.reset_height()?;
self.events.reset_height()?;
Ok(())
}
@@ -75,11 +79,16 @@ impl ReusedAddrVecs {
&mut self,
starting_indexes: &Indexes,
outputs_by_type: &outputs::ByTypeVecs,
inputs_by_type: &inputs::ByTypeVecs,
exit: &Exit,
) -> Result<()> {
self.count.compute_rest(starting_indexes, exit)?;
self.uses
.compute_rest(starting_indexes, outputs_by_type, exit)?;
self.events.compute_rest(
starting_indexes,
outputs_by_type,
inputs_by_type,
exit,
)?;
Ok(())
}
}

View File

@@ -1,8 +0,0 @@
//! Per-block reused-address-use tracking. See [`vecs::ReusedAddrUsesVecs`]
//! for the full description of the metric.
mod state;
mod vecs;
pub use state::AddrTypeToReusedAddrUseCount;
pub use vecs::ReusedAddrUsesVecs;

View File

@@ -1,22 +0,0 @@
use brk_cohort::ByAddrType;
use derive_more::{Deref, DerefMut};
/// Per-block running counter of reused address uses, per address type.
/// Reset at the start of each block (no disk recovery needed since the
/// per-block flow is reconstructed from `process_received` deterministically).
#[derive(Debug, Default, Deref, DerefMut)]
pub struct AddrTypeToReusedAddrUseCount(ByAddrType<u64>);
impl AddrTypeToReusedAddrUseCount {
#[inline]
pub(crate) fn sum(&self) -> u64 {
self.0.values().sum()
}
#[inline]
pub(crate) fn reset(&mut self) {
for v in self.0.values_mut() {
*v = 0;
}
}
}

View File

@@ -1,116 +0,0 @@
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{BasisPoints16, Indexes, OutputType, StoredU64, Version};
use rayon::prelude::*;
use vecdb::{AnyStoredVec, Database, Exit, Rw, StorageMode};
use crate::{
indexes,
internal::{
PerBlockCumulativeRolling, PercentCumulativeRolling, WindowStartVec, Windows,
WithAddrTypes,
},
outputs,
};
use super::state::AddrTypeToReusedAddrUseCount;
/// Per-block reused-address-use metrics. A "use" is a single output going
/// to an address (not deduplicated): an address receiving N outputs in one
/// block contributes N. The count only includes uses going to addresses
/// that were *already* reused at the moment of the use, so the use that
/// makes an address reused is not itself counted.
///
/// The denominator for the percent (per-type and aggregate address-output
/// counts) is read from `outputs::ByTypeVecs::output_count` rather than
/// duplicated here.
#[derive(Traversable)]
pub struct ReusedAddrUsesVecs<M: StorageMode = Rw> {
pub reused_addr_use_count:
WithAddrTypes<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
pub reused_addr_use_percent: WithAddrTypes<PercentCumulativeRolling<BasisPoints16, M>>,
}
impl ReusedAddrUsesVecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
let reused_addr_use_count =
WithAddrTypes::<PerBlockCumulativeRolling<StoredU64, StoredU64>>::forced_import(
db,
"reused_addr_use_count",
version,
indexes,
cached_starts,
)?;
let percent_name = "reused_addr_use_percent";
let reused_addr_use_percent = WithAddrTypes {
all: PercentCumulativeRolling::forced_import(db, percent_name, version, indexes)?,
by_addr_type: ByAddrType::new_with_name(|type_name| {
PercentCumulativeRolling::forced_import(
db,
&format!("{type_name}_{percent_name}"),
version,
indexes,
)
})?,
};
Ok(Self {
reused_addr_use_count,
reused_addr_use_percent,
})
}
pub(crate) fn min_stateful_len(&self) -> usize {
self.reused_addr_use_count.min_stateful_len()
}
pub(crate) fn par_iter_height_mut(
&mut self,
) -> impl ParallelIterator<Item = &mut dyn AnyStoredVec> {
self.reused_addr_use_count.par_iter_height_mut()
}
pub(crate) fn reset_height(&mut self) -> Result<()> {
self.reused_addr_use_count.reset_height()
}
#[inline(always)]
pub(crate) fn push_height(&mut self, reused: &AddrTypeToReusedAddrUseCount) {
self.reused_addr_use_count
.push_height(reused.sum(), reused.values().copied());
}
pub(crate) fn compute_rest(
&mut self,
starting_indexes: &Indexes,
outputs_by_type: &outputs::ByTypeVecs,
exit: &Exit,
) -> Result<()> {
self.reused_addr_use_count
.compute_rest(starting_indexes.height, exit)?;
self.reused_addr_use_percent.all.compute_count_ratio(
&self.reused_addr_use_count.all,
&outputs_by_type.output_count.all,
starting_indexes.height,
exit,
)?;
for otype in OutputType::ADDR_TYPES {
self.reused_addr_use_percent
.by_addr_type
.get_mut_unwrap(otype)
.compute_count_ratio(
self.reused_addr_use_count.by_addr_type.get_unwrap(otype),
outputs_by_type.output_count.by_type.get(otype),
starting_indexes.height,
exit,
)?;
}
Ok(())
}
}

View File

@@ -5,7 +5,7 @@ use rustc_hash::FxHashMap;
use crate::distribution::{
addr::{
AddrTypeToActivityCounts, AddrTypeToExposedAddrCount, AddrTypeToExposedAddrSupply,
AddrTypeToReusedAddrCount, AddrTypeToReusedAddrUseCount, AddrTypeToVec,
AddrTypeToReusedAddrCount, AddrTypeToReusedAddrEventCount, AddrTypeToVec,
},
cohorts::AddrCohorts,
};
@@ -30,7 +30,8 @@ pub(crate) fn process_received(
activity_counts: &mut AddrTypeToActivityCounts,
reused_addr_count: &mut AddrTypeToReusedAddrCount,
total_reused_addr_count: &mut AddrTypeToReusedAddrCount,
reused_addr_use_count: &mut AddrTypeToReusedAddrUseCount,
output_to_reused_addr_count: &mut AddrTypeToReusedAddrEventCount,
active_reused_addr_count: &mut AddrTypeToReusedAddrEventCount,
exposed_addr_count: &mut AddrTypeToExposedAddrCount,
total_exposed_addr_count: &mut AddrTypeToExposedAddrCount,
exposed_addr_supply: &mut AddrTypeToExposedAddrSupply,
@@ -54,7 +55,9 @@ pub(crate) fn process_received(
let type_activity = activity_counts.get_mut_unwrap(output_type);
let type_reused_count = reused_addr_count.get_mut(output_type).unwrap();
let type_total_reused_count = total_reused_addr_count.get_mut(output_type).unwrap();
let type_reused_use_count = reused_addr_use_count.get_mut(output_type).unwrap();
let type_output_to_reused_count = output_to_reused_addr_count.get_mut(output_type).unwrap();
let type_active_reused_count =
active_reused_addr_count.get_mut(output_type).unwrap();
let type_exposed_count = exposed_addr_count.get_mut(output_type).unwrap();
let type_total_exposed_count = total_exposed_addr_count.get_mut(output_type).unwrap();
let type_exposed_supply = exposed_addr_supply.get_mut(output_type).unwrap();
@@ -168,15 +171,26 @@ pub(crate) fn process_received(
*type_reused_count += 1;
}
// Block-level "active reused address" count: each address
// is processed exactly once here (via aggregation), so we
// count it once iff it is reused after the block's receives.
// The sender-side counterpart in process_sent dedupes
// against `received_addrs` so addresses that did both
// aren't double-counted.
if is_now_reused {
*type_active_reused_count += 1;
}
// Per-block reused-use count: every individual output to this
// address counts iff the address was already reused at the
// moment of that output. With aggregation, that means we
// skip enough outputs at the front to take the lifetime
// funding count from `funded_txo_count_before` past 1, then
// count the rest. `skipped` is `max(0, 2 - before)`.
let skipped = 2u32.saturating_sub(funded_txo_count_before);
// address counts iff, at the moment the output arrives, the
// address had already received at least one prior output
// (i.e. it is an output-level "address reuse event"). With
// aggregation, that means we skip the very first output the
// address ever sees and count every subsequent one, so
// `skipped` is `max(0, 1 - before)`.
let skipped = 1u32.saturating_sub(funded_txo_count_before);
let counted = recv.output_count.saturating_sub(skipped);
*type_reused_use_count += u64::from(counted);
*type_output_to_reused_count += u64::from(counted);
// Update exposed counts. The address's pubkey-exposure state
// is unchanged by a receive (spent_txo_count unchanged), so we

View File

@@ -7,7 +7,7 @@ use vecdb::VecIndex;
use crate::distribution::{
addr::{
AddrTypeToActivityCounts, AddrTypeToExposedAddrCount, AddrTypeToExposedAddrSupply,
AddrTypeToReusedAddrCount, HeightToAddrTypeToVec,
AddrTypeToReusedAddrCount, AddrTypeToReusedAddrEventCount, HeightToAddrTypeToVec,
},
cohorts::AddrCohorts,
compute::PriceRangeMax,
@@ -39,6 +39,8 @@ pub(crate) fn process_sent(
empty_addr_count: &mut ByAddrType<u64>,
activity_counts: &mut AddrTypeToActivityCounts,
reused_addr_count: &mut AddrTypeToReusedAddrCount,
input_from_reused_addr_count: &mut AddrTypeToReusedAddrEventCount,
active_reused_addr_count: &mut AddrTypeToReusedAddrEventCount,
exposed_addr_count: &mut AddrTypeToExposedAddrCount,
total_exposed_addr_count: &mut AddrTypeToExposedAddrCount,
exposed_addr_supply: &mut AddrTypeToExposedAddrSupply,
@@ -65,6 +67,10 @@ pub(crate) fn process_sent(
let type_empty_count = empty_addr_count.get_mut(output_type).unwrap();
let type_activity = activity_counts.get_mut_unwrap(output_type);
let type_reused_count = reused_addr_count.get_mut(output_type).unwrap();
let type_input_from_reused_count =
input_from_reused_addr_count.get_mut(output_type).unwrap();
let type_active_reused_count =
active_reused_addr_count.get_mut(output_type).unwrap();
let type_exposed_count = exposed_addr_count.get_mut(output_type).unwrap();
let type_total_exposed_count = total_exposed_addr_count.get_mut(output_type).unwrap();
let type_exposed_supply = exposed_addr_supply.get_mut(output_type).unwrap();
@@ -74,6 +80,15 @@ pub(crate) fn process_sent(
for (type_index, value) in vec {
let addr_data = lookup.get_for_send(output_type, type_index);
// "Input from a reused address" event: the sending
// address is in the reused set (lifetime
// funded_txo_count > 1). Checked once per input. The
// spend itself doesn't touch funded_txo_count so the
// predicate is stable before/after `cohort_state.send`.
if addr_data.is_reused() {
*type_input_from_reused_count += 1;
}
let prev_balance = addr_data.balance();
let new_balance = prev_balance.checked_sub(value).unwrap();
@@ -81,9 +96,20 @@ pub(crate) fn process_sent(
if type_seen.insert(type_index) {
type_activity.sending += 1;
// Track "both" - addresses that sent AND received this block
if type_received.is_some_and(|s| s.contains(&type_index)) {
type_activity.both += 1;
let also_received =
type_received.is_some_and(|s| s.contains(&type_index));
// Track "bidirectional": addresses that sent AND
// received this block.
if also_received {
type_activity.bidirectional += 1;
}
// Block-level "active reused address" count: count
// every distinct sender that's reused, but skip
// those that also received this block (already
// counted in process_received).
if !also_received && addr_data.is_reused() {
*type_active_reused_count += 1;
}
}

View File

@@ -13,7 +13,8 @@ use crate::{
distribution::{
addr::{
AddrTypeToActivityCounts, AddrTypeToAddrCount, AddrTypeToExposedAddrCount,
AddrTypeToExposedAddrSupply, AddrTypeToReusedAddrCount, AddrTypeToReusedAddrUseCount,
AddrTypeToExposedAddrSupply, AddrTypeToReusedAddrCount,
AddrTypeToReusedAddrEventCount,
},
block::{
AddrCache, InputsResult, process_inputs, process_outputs, process_received,
@@ -228,8 +229,16 @@ pub(crate) fn process_blocks(
// Track activity counts - reset each block
let mut activity_counts = AddrTypeToActivityCounts::default();
// Reused-use count - per-block flow, reset each block
let mut reused_addr_use_counts = AddrTypeToReusedAddrUseCount::default();
// Reused-addr event counts (receive + spend side). Per-block
// flow, reset each block.
let mut output_to_reused_addr_counts = AddrTypeToReusedAddrEventCount::default();
let mut input_from_reused_addr_counts = AddrTypeToReusedAddrEventCount::default();
// Distinct addresses active this block whose lifetime
// funded_txo_count > 1 after this block's events. Incremented in
// process_received for every receiver that ends up reused, and in
// process_sent for every sender that's reused AND didn't also
// receive this block (deduped via `received_addrs`).
let mut active_reused_addr_counts = AddrTypeToReusedAddrEventCount::default();
debug!("creating AddrCache");
let mut cache = AddrCache::new();
@@ -302,7 +311,9 @@ pub(crate) fn process_blocks(
// Reset per-block activity counts
activity_counts.reset();
reused_addr_use_counts.reset();
output_to_reused_addr_counts.reset();
input_from_reused_addr_counts.reset();
active_reused_addr_counts.reset();
// Process outputs, inputs, and tick-tock in parallel via rayon::join.
// Collection (build tx_index mappings + bulk mmap reads) is merged into the
@@ -474,7 +485,8 @@ pub(crate) fn process_blocks(
&mut activity_counts,
&mut reused_addr_counts,
&mut total_reused_addr_counts,
&mut reused_addr_use_counts,
&mut output_to_reused_addr_counts,
&mut active_reused_addr_counts,
&mut exposed_addr_counts,
&mut total_exposed_addr_counts,
&mut exposed_addr_supply,
@@ -491,6 +503,8 @@ pub(crate) fn process_blocks(
&mut empty_addr_counts,
&mut activity_counts,
&mut reused_addr_counts,
&mut input_from_reused_addr_counts,
&mut active_reused_addr_counts,
&mut exposed_addr_counts,
&mut total_exposed_addr_counts,
&mut exposed_addr_supply,
@@ -524,7 +538,16 @@ pub(crate) fn process_blocks(
total_reused_addr_counts.sum(),
total_reused_addr_counts.values().copied(),
);
vecs.addrs.reused.uses.push_height(&reused_addr_use_counts);
let activity_totals = activity_counts.totals();
let active_addr_count = activity_totals.sending + activity_totals.receiving
- activity_totals.bidirectional;
let active_reused = u32::try_from(active_reused_addr_counts.sum()).unwrap();
vecs.addrs.reused.events.push_height(
&output_to_reused_addr_counts,
&input_from_reused_addr_counts,
active_addr_count,
active_reused,
);
vecs.addrs.exposed.count.funded.push_height(
exposed_addr_counts.sum(),
exposed_addr_counts.values().copied(),
@@ -609,7 +632,7 @@ fn push_cohort_states(
height: Height,
height_price: Cents,
) {
// Phase 1: push + unrealized (no reset yet states still needed for aggregation)
// Phase 1: push + unrealized (no reset yet; states still needed for aggregation)
rayon::join(
|| {
utxo_cohorts.par_iter_separate_mut().for_each(|v| {

View File

@@ -38,7 +38,7 @@ use super::{
},
};
const VERSION: Version = Version::new(22);
const VERSION: Version = Version::new(23);
#[derive(Traversable)]
pub struct AddrMetricsVecs<M: StorageMode = Rw> {
@@ -151,7 +151,7 @@ impl Vecs {
let empty_addr_count =
AddrCountsVecs::forced_import(&db, "empty_addr_count", version, indexes)?;
let addr_activity =
AddrActivityVecs::forced_import(&db, "addr_activity", version, indexes, cached_starts)?;
AddrActivityVecs::forced_import(&db, version, indexes, cached_starts)?;
// Stored total = addr_count + empty_addr_count (global + per-type, with all derived indexes)
let total_addr_count = TotalAddrCountVecs::forced_import(&db, version, indexes)?;
@@ -470,9 +470,12 @@ impl Vecs {
// 6b. Compute address count sum (by addr_type -> all)
self.addrs.funded.compute_rest(starting_indexes, exit)?;
self.addrs.empty.compute_rest(starting_indexes, exit)?;
self.addrs
.reused
.compute_rest(starting_indexes, &outputs.by_type, exit)?;
self.addrs.reused.compute_rest(
starting_indexes,
&outputs.by_type,
&inputs.by_type,
exit,
)?;
self.addrs
.exposed
.compute_rest(starting_indexes, prices, exit)?;

View File

@@ -90,7 +90,7 @@ impl Vecs {
let supply_total_sats = &all_metrics.supply.total.sats.height;
// Supply-Adjusted CDD = sum_24h(CDD) / circulating_supply_btc
self.coindays_destroyed_supply_adjusted
self.coindays_destroyed_supply_adj
.height
.compute_transform2(
starting_indexes.height,
@@ -108,7 +108,7 @@ impl Vecs {
)?;
// Supply-Adjusted CYD = CYD / circulating_supply_btc
self.coinyears_destroyed_supply_adjusted
self.coinyears_destroyed_supply_adj
.height
.compute_transform2(
starting_indexes.height,
@@ -126,7 +126,7 @@ impl Vecs {
)?;
// Supply-Adjusted Dormancy = dormancy / circulating_supply_btc
self.dormancy.supply_adjusted.height.compute_transform2(
self.dormancy.supply_adj.height.compute_transform2(
starting_indexes.height,
&all_activity.dormancy._24h.height,
supply_total_sats,

View File

@@ -29,12 +29,12 @@ impl Vecs {
let rhodl_ratio = RatioPerBlock::forced_import_raw(&db, "rhodl_ratio", v, indexes)?;
let thermo_cap_multiple =
RatioPerBlock::forced_import_raw(&db, "thermo_cap_multiple", v, indexes)?;
let coindays_destroyed_supply_adjusted =
PerBlock::forced_import(&db, "coindays_destroyed_supply_adjusted", v, indexes)?;
let coinyears_destroyed_supply_adjusted =
PerBlock::forced_import(&db, "coinyears_destroyed_supply_adjusted", v, indexes)?;
let coindays_destroyed_supply_adj =
PerBlock::forced_import(&db, "coindays_destroyed_supply_adj", v, indexes)?;
let coinyears_destroyed_supply_adj =
PerBlock::forced_import(&db, "coinyears_destroyed_supply_adj", v, indexes)?;
let dormancy = super::vecs::DormancyVecs {
supply_adjusted: PerBlock::forced_import(&db, "dormancy_supply_adjusted", v, indexes)?,
supply_adj: PerBlock::forced_import(&db, "dormancy_supply_adj", v, indexes)?,
flow: PerBlock::forced_import(&db, "dormancy_flow", v, indexes)?,
};
let stock_to_flow = PerBlock::forced_import(&db, "stock_to_flow", v, indexes)?;
@@ -49,8 +49,8 @@ impl Vecs {
gini,
rhodl_ratio,
thermo_cap_multiple,
coindays_destroyed_supply_adjusted,
coinyears_destroyed_supply_adjusted,
coindays_destroyed_supply_adj,
coinyears_destroyed_supply_adj,
dormancy,
stock_to_flow,
seller_exhaustion,

View File

@@ -7,7 +7,7 @@ use crate::internal::{PerBlock, PercentPerBlock, RatioPerBlock};
#[derive(Traversable)]
pub struct DormancyVecs<M: StorageMode = Rw> {
pub supply_adjusted: PerBlock<StoredF32, M>,
pub supply_adj: PerBlock<StoredF32, M>,
pub flow: PerBlock<StoredF32, M>,
}
@@ -20,8 +20,8 @@ pub struct Vecs<M: StorageMode = Rw> {
pub gini: PercentPerBlock<BasisPoints16, M>,
pub rhodl_ratio: RatioPerBlock<BasisPoints32, M>,
pub thermo_cap_multiple: RatioPerBlock<BasisPoints32, M>,
pub coindays_destroyed_supply_adjusted: PerBlock<StoredF32, M>,
pub coinyears_destroyed_supply_adjusted: PerBlock<StoredF32, M>,
pub coindays_destroyed_supply_adj: PerBlock<StoredF32, M>,
pub coinyears_destroyed_supply_adj: PerBlock<StoredF32, M>,
pub dormancy: DormancyVecs<M>,
pub stock_to_flow: PerBlock<StoredF32, M>,
pub seller_exhaustion: PerBlock<StoredF32, M>,

View File

@@ -85,8 +85,17 @@ impl Vecs {
.compute_rest(starting_indexes.height, exit)?;
}
for (otype, source) in self.input_count.by_type.iter_typed() {
self.input_share.get_mut(otype).compute_count_ratio(
source,
&self.input_count.all,
starting_indexes.height,
exit,
)?;
}
for (otype, source) in self.tx_count.by_type.iter_typed() {
self.tx_percent.get_mut(otype).compute_count_ratio(
self.tx_share.get_mut(otype).compute_count_ratio(
source,
&self.tx_count.all,
starting_indexes.height,

View File

@@ -39,18 +39,28 @@ impl Vecs {
cached_starts,
)?;
let tx_percent = SpendableType::try_new(|_, name| {
let input_share = SpendableType::try_new(|_, name| {
PercentCumulativeRolling::forced_import(
db,
&format!("tx_percent_with_{name}_prevout"),
&format!("{name}_prevout_share"),
version,
indexes,
)
})?;
let tx_share = SpendableType::try_new(|_, name| {
PercentCumulativeRolling::forced_import(
db,
&format!("tx_share_with_{name}_prevout"),
version,
indexes,
)
})?;
Ok(Self {
input_count,
input_share,
tx_count,
tx_percent,
tx_share,
})
}
}

View File

@@ -9,6 +9,7 @@ use crate::internal::{PerBlockCumulativeRolling, PercentCumulativeRolling};
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
pub input_count: WithInputTypes<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
pub input_share: SpendableType<PercentCumulativeRolling<BasisPoints16, M>>,
pub tx_count: WithInputTypes<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
pub tx_percent: SpendableType<PercentCumulativeRolling<BasisPoints16, M>>,
pub tx_share: SpendableType<PercentCumulativeRolling<BasisPoints16, M>>,
}

View File

@@ -1,7 +1,12 @@
//! PerBlock with rolling average (no distribution stats).
//!
//! Stored height data + f64 cumulative + lazy 4-window rolling averages.
//! Stored height data + cumulative + lazy 4-window rolling averages.
//! Rolling averages are computed on-the-fly from the cumulative via DeltaAvg.
//!
//! Type parameters:
//! - `T`: per-block value type
//! - `C`: cumulative type, defaults to `T`. Use a wider type (e.g., `StoredU64`)
//! when the prefix sum of `T` values could overflow `T`.
use brk_error::Result;
@@ -15,20 +20,22 @@ use crate::indexes;
use crate::internal::{LazyRollingAvgsFromHeight, NumericValue, WindowStartVec, Windows};
#[derive(Traversable)]
pub struct PerBlockRollingAverage<T, M: StorageMode = Rw>
pub struct PerBlockRollingAverage<T, C = T, M: StorageMode = Rw>
where
T: NumericValue + JsonSchema,
C: NumericValue + JsonSchema,
{
pub block: M::Stored<EagerVec<PcoVec<Height, T>>>,
#[traversable(hidden)]
pub cumulative: M::Stored<EagerVec<PcoVec<Height, T>>>,
pub cumulative: M::Stored<EagerVec<PcoVec<Height, C>>>,
#[traversable(flatten)]
pub average: LazyRollingAvgsFromHeight<T>,
pub average: LazyRollingAvgsFromHeight<C>,
}
impl<T> PerBlockRollingAverage<T>
impl<T, C> PerBlockRollingAverage<T, C>
where
T: NumericValue + JsonSchema,
T: NumericValue + JsonSchema + Into<C>,
C: NumericValue + JsonSchema,
{
pub(crate) fn forced_import(
db: &Database,
@@ -38,11 +45,11 @@ where
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
let block: EagerVec<PcoVec<Height, T>> = EagerVec::forced_import(db, name, version)?;
let cumulative: EagerVec<PcoVec<Height, T>> =
EagerVec::forced_import(db, &format!("{name}_cumulative"), version + Version::ONE)?;
let cumulative: EagerVec<PcoVec<Height, C>> =
EagerVec::forced_import(db, &format!("{name}_cumulative"), version + Version::TWO)?;
let average = LazyRollingAvgsFromHeight::new(
&format!("{name}_average"),
version + Version::ONE,
version + Version::TWO,
&cumulative,
cached_starts,
indexes,

View File

@@ -1,7 +1,7 @@
use brk_error::{OptionData, Result};
use brk_indexer::Indexer;
use brk_types::{Indexes, StoredU64};
use vecdb::{AnyVec, Exit, ReadableVec, VecIndex, WritableVec};
use brk_types::{Indexes, OutputType, StoredU64};
use vecdb::{AnyStoredVec, AnyVec, Exit, ReadableVec, VecIndex, WritableVec};
use super::{Vecs, WithOutputTypes};
use crate::internal::{CoinbasePolicy, PerBlockCumulativeRolling, walk_blocks};
@@ -20,18 +20,25 @@ impl Vecs {
self.output_count
.validate_and_truncate(dep_version, starting_indexes.height)?;
self.spendable_output_count
.block
.validate_and_truncate(dep_version, starting_indexes.height)?;
self.tx_count
.validate_and_truncate(dep_version, starting_indexes.height)?;
let skip = self
.output_count
.min_stateful_len()
.min(self.spendable_output_count.block.len())
.min(self.tx_count.min_stateful_len());
let first_tx_index = &indexer.vecs.transactions.first_tx_index;
let end = first_tx_index.len();
if skip < end {
self.output_count.truncate_if_needed_at(skip)?;
self.spendable_output_count
.block
.truncate_if_needed_at(skip)?;
self.tx_count.truncate_if_needed_at(skip)?;
let fi_batch = first_tx_index.collect_range_at(skip, end);
@@ -63,10 +70,16 @@ impl Vecs {
|agg| {
push_block(&mut self.output_count, agg.entries_all, &agg.entries_per_type);
push_block(&mut self.tx_count, agg.txs_all, &agg.txs_per_type);
let spendable_total = agg.entries_all
- agg.entries_per_type[OutputType::OpReturn as usize];
self.spendable_output_count
.block
.push(StoredU64::from(spendable_total));
if self.output_count.all.block.batch_limit_reached() {
let _lock = exit.lock();
self.output_count.write()?;
self.spendable_output_count.block.write()?;
self.tx_count.write()?;
}
Ok(())
@@ -76,17 +89,29 @@ impl Vecs {
{
let _lock = exit.lock();
self.output_count.write()?;
self.spendable_output_count.block.write()?;
self.tx_count.write()?;
}
self.output_count
.compute_rest(starting_indexes.height, exit)?;
self.spendable_output_count
.compute_rest(starting_indexes.height, exit)?;
self.tx_count
.compute_rest(starting_indexes.height, exit)?;
}
for (otype, source) in self.output_count.by_type.iter_typed() {
self.output_share.get_mut(otype).compute_count_ratio(
source,
&self.output_count.all,
starting_indexes.height,
exit,
)?;
}
for (otype, source) in self.tx_count.by_type.iter_typed() {
self.tx_percent.get_mut(otype).compute_count_ratio(
self.tx_share.get_mut(otype).compute_count_ratio(
source,
&self.tx_count.all,
starting_indexes.height,

View File

@@ -6,9 +6,7 @@ use vecdb::Database;
use super::{Vecs, WithOutputTypes};
use crate::{
indexes,
internal::{
PerBlockCumulativeRolling, PercentCumulativeRolling, WindowStartVec, Windows,
},
internal::{PerBlockCumulativeRolling, PercentCumulativeRolling, WindowStartVec, Windows},
};
impl Vecs {
@@ -39,18 +37,37 @@ impl Vecs {
cached_starts,
)?;
let tx_percent = ByType::try_new(|_, name| {
let spendable_output_count = PerBlockCumulativeRolling::forced_import(
db,
"spendable_output_count",
version,
indexes,
cached_starts,
)?;
let output_share = ByType::try_new(|_, name| {
PercentCumulativeRolling::forced_import(
db,
&format!("tx_percent_with_{name}_output"),
&format!("{name}_output_share"),
version,
indexes,
)
})?;
let tx_share = ByType::try_new(|_, name| {
PercentCumulativeRolling::forced_import(
db,
&format!("tx_share_with_{name}_output"),
version,
indexes,
)
})?;
Ok(Self {
output_count,
spendable_output_count,
output_share,
tx_count,
tx_percent,
tx_share,
})
}
}

View File

@@ -9,6 +9,8 @@ use crate::internal::{PerBlockCumulativeRolling, PercentCumulativeRolling};
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
pub output_count: WithOutputTypes<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
pub spendable_output_count: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub output_share: ByType<PercentCumulativeRolling<BasisPoints16, M>>,
pub tx_count: WithOutputTypes<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
pub tx_percent: ByType<PercentCumulativeRolling<BasisPoints16, M>>,
pub tx_share: ByType<PercentCumulativeRolling<BasisPoints16, M>>,
}