global: big snapshot part 2

This commit is contained in:
nym21
2026-04-13 22:47:08 +02:00
parent 765261648d
commit 283baca848
93 changed files with 3242 additions and 3067 deletions

View File

@@ -4,10 +4,10 @@ use derive_more::{Deref, DerefMut};
use crate::{
indexes,
internal::{LazyRollingDeltasFromHeight, WindowStartVec, Windows},
internal::{LazyRollingDeltasFromHeight, WindowStartVec, Windows, WithAddrTypes},
};
use super::{AddrCountsVecs, WithAddrTypes};
use super::AddrCountsVecs;
type AddrDelta = LazyRollingDeltasFromHeight<StoredU64, StoredI64, BasisPointsSigned32>;

View File

@@ -5,9 +5,8 @@ use derive_more::{Deref, DerefMut};
use vecdb::{Database, Rw, StorageMode};
use crate::{
distribution::addr::WithAddrTypes,
indexes,
internal::PerBlock,
internal::{PerBlock, WithAddrTypes},
};
/// Exposed address count (`all` + per-type) for a single variant (funded or total).

View File

@@ -45,7 +45,7 @@ use brk_types::{Indexes, Version};
use rayon::prelude::*;
use vecdb::{AnyStoredVec, Database, Exit, Rw, StorageMode};
use crate::indexes;
use crate::{indexes, prices};
/// Top-level container for all exposed address tracking: counts (funded +
/// total) plus the funded supply.
@@ -87,9 +87,15 @@ impl ExposedAddrVecs {
Ok(())
}
pub(crate) fn compute_rest(&mut self, starting_indexes: &Indexes, exit: &Exit) -> Result<()> {
pub(crate) fn compute_rest(
&mut self,
starting_indexes: &Indexes,
prices: &prices::Vecs,
exit: &Exit,
) -> Result<()> {
self.count.compute_rest(starting_indexes, exit)?;
self.supply.compute_rest(starting_indexes, exit)?;
self.supply
.compute_rest(starting_indexes.height, prices, exit)?;
Ok(())
}
}

View File

@@ -1,9 +1,9 @@
use brk_cohort::ByAddrType;
use brk_types::{Height, Sats};
use brk_types::Height;
use derive_more::{Deref, DerefMut};
use vecdb::ReadableVec;
use crate::internal::PerBlock;
use crate::internal::AmountPerBlock;
use super::vecs::ExposedAddrSupplyVecs;
@@ -23,8 +23,8 @@ impl From<(&ExposedAddrSupplyVecs, Height)> for AddrTypeToExposedAddrSupply {
#[inline]
fn from((vecs, starting_height): (&ExposedAddrSupplyVecs, Height)) -> Self {
if let Some(prev_height) = starting_height.decremented() {
let read = |v: &PerBlock<Sats>| -> u64 {
u64::from(v.height.collect_one(prev_height).unwrap())
let read = |v: &AmountPerBlock| -> u64 {
u64::from(v.sats.height.collect_one(prev_height).unwrap())
};
Self(ByAddrType {
p2pk65: read(&vecs.by_addr_type.p2pk65),

View File

@@ -1,20 +1,21 @@
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Sats, Version};
use brk_types::Version;
use derive_more::{Deref, DerefMut};
use vecdb::{Database, Rw, StorageMode};
use crate::{
distribution::addr::WithAddrTypes,
indexes,
internal::PerBlock,
internal::{AmountPerBlock, WithAddrTypes},
};
/// Exposed address supply (sats) — `all` + per-address-type. Tracks the total
/// balance held by addresses currently in the funded exposed set.
/// Exposed address supply (sats/btc/cents/usd) — `all` + per-address-type.
/// Tracks the total balance held by addresses currently in the funded
/// exposed set. Sats are pushed stateful per block; cents/usd are derived
/// post-hoc from sats × spot price.
#[derive(Deref, DerefMut, Traversable)]
pub struct ExposedAddrSupplyVecs<M: StorageMode = Rw>(
#[traversable(flatten)] pub WithAddrTypes<PerBlock<Sats, M>>,
#[traversable(flatten)] pub WithAddrTypes<AmountPerBlock<M>>,
);
impl ExposedAddrSupplyVecs {
@@ -23,7 +24,7 @@ impl ExposedAddrSupplyVecs {
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
Ok(Self(WithAddrTypes::<PerBlock<Sats>>::forced_import(
Ok(Self(WithAddrTypes::<AmountPerBlock>::forced_import(
db,
"exposed_addr_supply",
version,

View File

@@ -8,7 +8,6 @@ mod new_addr_count;
mod reused;
mod total_addr_count;
mod type_map;
mod with_addr_types;
pub use activity::{AddrActivityVecs, AddrTypeToActivityCounts};
pub use addr_count::{AddrCountsVecs, AddrTypeToAddrCount};
@@ -24,4 +23,3 @@ pub use reused::{
};
pub use total_addr_count::TotalAddrCountVecs;
pub use type_map::{AddrTypeToTypeIndexMap, AddrTypeToVec, HeightToAddrTypeToVec};
pub use with_addr_types::WithAddrTypes;

View File

@@ -6,10 +6,10 @@ use vecdb::{Database, Exit, Rw, StorageMode};
use crate::{
indexes,
internal::{PerBlockCumulativeRolling, WindowStartVec, Windows},
internal::{PerBlockCumulativeRolling, WindowStartVec, Windows, WithAddrTypes},
};
use super::{TotalAddrCountVecs, WithAddrTypes};
use super::TotalAddrCountVecs;
/// New address count per block (global + per-type).
#[derive(Deref, DerefMut, Traversable)]

View File

@@ -5,9 +5,8 @@ use derive_more::{Deref, DerefMut};
use vecdb::{Database, Rw, StorageMode};
use crate::{
distribution::addr::WithAddrTypes,
indexes,
internal::PerBlock,
internal::{PerBlock, WithAddrTypes},
};
/// Reused address count (`all` + per-type) for a single variant (funded or total).

View File

@@ -10,7 +10,7 @@
//! an aggregated `all`.
//! - [`uses`] — per-block count of outputs going to addresses that were
//! already reused, plus the derived percent over total address-output
//! count (denominator from `scripts::count`).
//! count (denominator from `outputs::by_type`).
mod count;
mod uses;
@@ -27,7 +27,7 @@ use vecdb::{AnyStoredVec, Database, Exit, Rw, StorageMode};
use crate::{
indexes,
internal::{WindowStartVec, Windows},
scripts,
outputs,
};
/// Top-level container for all reused address tracking: counts (funded +
@@ -74,12 +74,12 @@ impl ReusedAddrVecs {
pub(crate) fn compute_rest(
&mut self,
starting_indexes: &Indexes,
scripts_count: &scripts::CountVecs,
outputs_by_type: &outputs::ByTypeVecs,
exit: &Exit,
) -> Result<()> {
self.count.compute_rest(starting_indexes, exit)?;
self.uses
.compute_rest(starting_indexes, scripts_count, exit)?;
.compute_rest(starting_indexes, outputs_by_type, exit)?;
Ok(())
}
}

View File

@@ -1,17 +1,17 @@
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{BasisPoints16, Height, Indexes, OutputType, StoredU64, Version};
use brk_types::{BasisPoints16, Indexes, OutputType, StoredU64, Version};
use rayon::prelude::*;
use vecdb::{AnyStoredVec, Database, Exit, Rw, StorageMode};
use crate::{
distribution::addr::WithAddrTypes,
indexes,
internal::{
PerBlockCumulativeRolling, PercentCumulativeRolling, RatioU64Bp16, WindowStartVec, Windows,
PerBlockCumulativeRolling, PercentCumulativeRolling, WindowStartVec, Windows,
WithAddrTypes,
},
scripts,
outputs,
};
use super::state::AddrTypeToReusedAddrUseCount;
@@ -22,8 +22,9 @@ use super::state::AddrTypeToReusedAddrUseCount;
/// that were *already* reused at the moment of the use, so the use that
/// makes an address reused is not itself counted.
///
/// The denominator for the percent (total address-output count) lives in
/// `scripts::count` and is reused here rather than duplicated.
/// The denominator for the percent (per-type and aggregate address-output
/// counts) is read from `outputs::ByTypeVecs::output_count` rather than
/// duplicated here.
#[derive(Traversable)]
pub struct ReusedAddrUsesVecs<M: StorageMode = Rw> {
pub reused_addr_use_count:
@@ -87,66 +88,29 @@ impl ReusedAddrUsesVecs {
pub(crate) fn compute_rest(
&mut self,
starting_indexes: &Indexes,
scripts_count: &scripts::CountVecs,
outputs_by_type: &outputs::ByTypeVecs,
exit: &Exit,
) -> Result<()> {
self.reused_addr_use_count
.compute_rest(starting_indexes.height, exit)?;
compute_one_percent(
&mut self.reused_addr_use_percent.all,
self.reused_addr_use_percent.all.compute_count_ratio(
&self.reused_addr_use_count.all,
&scripts_count.addr_output_count,
&outputs_by_type.output_count.all,
starting_indexes.height,
exit,
)?;
for otype in OutputType::ADDR_TYPES {
compute_one_percent(
self.reused_addr_use_percent
.by_addr_type
.get_mut_unwrap(otype),
self.reused_addr_use_count.by_addr_type.get_unwrap(otype),
denom_for_type(scripts_count, otype),
starting_indexes.height,
exit,
)?;
self.reused_addr_use_percent
.by_addr_type
.get_mut_unwrap(otype)
.compute_count_ratio(
self.reused_addr_use_count.by_addr_type.get_unwrap(otype),
outputs_by_type.output_count.by_type.get(otype),
starting_indexes.height,
exit,
)?;
}
Ok(())
}
}
#[inline]
fn compute_one_percent(
percent: &mut PercentCumulativeRolling<BasisPoints16>,
reused: &PerBlockCumulativeRolling<StoredU64, StoredU64>,
denom: &PerBlockCumulativeRolling<StoredU64, StoredU64>,
starting_height: Height,
exit: &Exit,
) -> Result<()> {
percent.compute_binary::<StoredU64, StoredU64, RatioU64Bp16, _, _, _, _>(
starting_height,
&reused.cumulative.height,
&denom.cumulative.height,
reused.sum.as_array().map(|w| &w.height),
denom.sum.as_array().map(|w| &w.height),
exit,
)
}
#[inline]
fn denom_for_type(
scripts_count: &scripts::CountVecs,
otype: OutputType,
) -> &PerBlockCumulativeRolling<StoredU64, StoredU64> {
match otype {
OutputType::P2PK33 => &scripts_count.p2pk33,
OutputType::P2PK65 => &scripts_count.p2pk65,
OutputType::P2PKH => &scripts_count.p2pkh,
OutputType::P2SH => &scripts_count.p2sh,
OutputType::P2WPKH => &scripts_count.p2wpkh,
OutputType::P2WSH => &scripts_count.p2wsh,
OutputType::P2TR => &scripts_count.p2tr,
OutputType::P2A => &scripts_count.p2a,
_ => unreachable!("OutputType::ADDR_TYPES contains only address types"),
}
}

View File

@@ -4,9 +4,12 @@ use brk_types::{Height, StoredU64, Version};
use derive_more::{Deref, DerefMut};
use vecdb::{Database, Exit, Rw, StorageMode};
use crate::{indexes, internal::PerBlock};
use crate::{
indexes,
internal::{PerBlock, WithAddrTypes},
};
use super::{AddrCountsVecs, WithAddrTypes};
use super::AddrCountsVecs;
/// Total address count (global + per-type) with all derived indexes.
#[derive(Deref, DerefMut, Traversable)]

View File

@@ -27,7 +27,7 @@ use crate::{
PerBlockCumulativeRolling, WindowStartVec, Windows,
db_utils::{finalize_db, open_db},
},
outputs, prices, scripts, transactions,
outputs, prices, transactions,
};
use super::{
@@ -235,7 +235,6 @@ impl Vecs {
indexes: &indexes::Vecs,
inputs: &inputs::Vecs,
outputs: &outputs::Vecs,
scripts: &scripts::Vecs,
transactions: &transactions::Vecs,
blocks: &blocks::Vecs,
prices: &prices::Vecs,
@@ -473,8 +472,10 @@ impl Vecs {
self.addrs.empty.compute_rest(starting_indexes, exit)?;
self.addrs
.reused
.compute_rest(starting_indexes, &scripts.count, exit)?;
self.addrs.exposed.compute_rest(starting_indexes, exit)?;
.compute_rest(starting_indexes, &outputs.by_type, exit)?;
self.addrs
.exposed
.compute_rest(starting_indexes, prices, exit)?;
// 6c. Compute total_addr_count = addr_count + empty_addr_count
self.addrs.total.compute(

View File

@@ -3,15 +3,11 @@ use brk_indexer::Indexer;
use brk_types::{Indexes, StoredU64};
use vecdb::{AnyVec, Exit, ReadableVec, VecIndex, WritableVec};
use super::Vecs;
use crate::internal::{
PerBlockFull, compute_by_addr_type_block_counts, compute_by_addr_type_tx_percents,
};
use super::{Vecs, WithInputTypes};
use crate::internal::{CoinbasePolicy, PerBlockCumulativeRolling, walk_blocks};
impl Vecs {
/// Phase 1: walk inputs and populate `input_count` + `tx_count`.
/// Independent of transactions, can run alongside other inputs work.
pub(crate) fn compute_counts(
pub(crate) fn compute(
&mut self,
indexer: &Indexer,
starting_indexes: &Indexes,
@@ -22,83 +18,93 @@ impl Vecs {
+ indexer.vecs.transactions.first_txin_index.version()
+ indexer.vecs.transactions.txid.version();
for (_, v) in self.input_count.iter_mut() {
v.block
.validate_and_truncate(dep_version, starting_indexes.height)?;
}
for (_, v) in self.tx_count.iter_mut() {
v.block
.validate_and_truncate(dep_version, starting_indexes.height)?;
}
self.input_count
.validate_and_truncate(dep_version, starting_indexes.height)?;
self.tx_count
.validate_and_truncate(dep_version, starting_indexes.height)?;
let skip = self
.input_count
.values()
.map(|v| v.block.len())
.min()
.unwrap()
.min(self.tx_count.values().map(|v| v.block.len()).min().unwrap());
.min_stateful_len()
.min(self.tx_count.min_stateful_len());
let first_tx_index = &indexer.vecs.transactions.first_tx_index;
let end = first_tx_index.len();
if skip >= end {
return Ok(());
if skip < end {
self.input_count.truncate_if_needed_at(skip)?;
self.tx_count.truncate_if_needed_at(skip)?;
let fi_batch = first_tx_index.collect_range_at(skip, end);
let txid_len = indexer.vecs.transactions.txid.len();
let total_txin_len = indexer.vecs.inputs.output_type.len();
let mut itype_cursor = indexer.vecs.inputs.output_type.cursor();
let mut fi_in_cursor = indexer.vecs.transactions.first_txin_index.cursor();
walk_blocks(
&fi_batch,
txid_len,
CoinbasePolicy::Skip,
|tx_pos, per_tx| {
let fi_in = fi_in_cursor.get(tx_pos).data()?.to_usize();
let next_fi_in = if tx_pos + 1 < txid_len {
fi_in_cursor.get(tx_pos + 1).data()?.to_usize()
} else {
total_txin_len
};
itype_cursor.advance(fi_in - itype_cursor.position());
for _ in fi_in..next_fi_in {
let otype = itype_cursor.next().unwrap();
per_tx[otype as usize] += 1;
}
Ok(())
},
|agg| {
push_block(&mut self.input_count, agg.entries_all, &agg.entries_per_type);
push_block(&mut self.tx_count, agg.txs_all, &agg.txs_per_type);
if self.input_count.all.block.batch_limit_reached() {
let _lock = exit.lock();
self.input_count.write()?;
self.tx_count.write()?;
}
Ok(())
},
)?;
{
let _lock = exit.lock();
self.input_count.write()?;
self.tx_count.write()?;
}
self.input_count
.compute_rest(starting_indexes.height, exit)?;
self.tx_count
.compute_rest(starting_indexes.height, exit)?;
}
for (_, v) in self.input_count.iter_mut() {
v.block.truncate_if_needed_at(skip)?;
for (otype, source) in self.tx_count.by_type.iter_typed() {
self.tx_percent.get_mut(otype).compute_count_ratio(
source,
&self.tx_count.all,
starting_indexes.height,
exit,
)?;
}
for (_, v) in self.tx_count.iter_mut() {
v.block.truncate_if_needed_at(skip)?;
}
let fi_batch = first_tx_index.collect_range_at(skip, end);
let txid_len = indexer.vecs.transactions.txid.len();
let total_txin_len = indexer.vecs.inputs.output_type.len();
let mut itype_cursor = indexer.vecs.inputs.output_type.cursor();
let mut fi_in_cursor = indexer.vecs.transactions.first_txin_index.cursor();
compute_by_addr_type_block_counts(
&mut self.input_count,
&mut self.tx_count,
&fi_batch,
txid_len,
true, // skip coinbase (1 fake input)
starting_indexes.height,
exit,
|tx_pos, per_tx| {
let fi_in = fi_in_cursor.get(tx_pos).data()?.to_usize();
let next_fi_in = if tx_pos + 1 < txid_len {
fi_in_cursor.get(tx_pos + 1).data()?.to_usize()
} else {
total_txin_len
};
itype_cursor.advance(fi_in - itype_cursor.position());
for _ in fi_in..next_fi_in {
let otype = itype_cursor.next().unwrap();
per_tx[otype as usize] += 1;
}
Ok(())
},
)
}
/// Phase 2: derive `tx_percent` from `tx_count` and the total tx count.
/// Must run after `transactions::Vecs::compute`.
pub(crate) fn compute_percents(
&mut self,
transactions_count_total: &PerBlockFull<StoredU64>,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
compute_by_addr_type_tx_percents(
&self.tx_count,
&mut self.tx_percent,
transactions_count_total,
starting_indexes,
exit,
)
Ok(())
}
}
#[inline]
fn push_block(
metric: &mut WithInputTypes<PerBlockCumulativeRolling<StoredU64, StoredU64>>,
total: u64,
per_type: &[u64; 12],
) {
metric.all.block.push(StoredU64::from(total));
for (otype, vec) in metric.by_type.iter_typed_mut() {
vec.block.push(StoredU64::from(per_type[otype as usize]));
}
}

View File

@@ -1,12 +1,14 @@
use brk_cohort::ByAddrType;
use brk_cohort::SpendableType;
use brk_error::Result;
use brk_types::Version;
use brk_types::{StoredU64, Version};
use vecdb::Database;
use super::Vecs;
use super::{Vecs, WithInputTypes};
use crate::{
indexes,
internal::{PerBlockCumulativeRolling, PercentCumulativeRolling, WindowStartVec, Windows},
internal::{
PerBlockCumulativeRolling, PercentCumulativeRolling, WindowStartVec, Windows,
},
};
impl Vecs {
@@ -16,33 +18,39 @@ impl Vecs {
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
let input_count = WithInputTypes::<
PerBlockCumulativeRolling<StoredU64, StoredU64>,
>::forced_import_with(
db,
"input_count_bis",
|t| format!("{t}_prevout_count"),
version,
indexes,
cached_starts,
)?;
let tx_count = WithInputTypes::<
PerBlockCumulativeRolling<StoredU64, StoredU64>,
>::forced_import_with(
db,
"non_coinbase_tx_count",
|t| format!("tx_count_with_{t}_prevout"),
version,
indexes,
cached_starts,
)?;
let tx_percent = SpendableType::try_new(|_, name| {
PercentCumulativeRolling::forced_import(
db,
&format!("tx_percent_with_{name}_prevout"),
version,
indexes,
)
})?;
Ok(Self {
input_count: ByAddrType::new_with_name(|name| {
PerBlockCumulativeRolling::forced_import(
db,
&format!("{name}_input_count"),
version,
indexes,
cached_starts,
)
})?,
tx_count: ByAddrType::new_with_name(|name| {
PerBlockCumulativeRolling::forced_import(
db,
&format!("tx_count_with_{name}_in"),
version,
indexes,
cached_starts,
)
})?,
tx_percent: ByAddrType::new_with_name(|name| {
PercentCumulativeRolling::forced_import(
db,
&format!("tx_count_with_{name}_in_rel_to_all"),
version,
indexes,
)
})?,
input_count,
tx_count,
tx_percent,
})
}
}

View File

@@ -1,5 +1,7 @@
mod compute;
mod import;
mod vecs;
mod with_input_types;
pub use vecs::Vecs;
pub(crate) use with_input_types::WithInputTypes;

View File

@@ -1,18 +1,14 @@
use brk_cohort::ByAddrType;
use brk_cohort::SpendableType;
use brk_traversable::Traversable;
use brk_types::{BasisPoints16, StoredU64};
use vecdb::{Rw, StorageMode};
use super::WithInputTypes;
use crate::internal::{PerBlockCumulativeRolling, PercentCumulativeRolling};
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
/// Per-block, per-type total input count (granular). The "type" is the
/// type of the spent output that the input consumes.
pub input_count: ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
/// Per-block, per-type count of TXs containing at least one input that
/// spends an output of this type.
pub tx_count: ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
/// Per-type tx_count as a percent of total tx count.
pub tx_percent: ByAddrType<PercentCumulativeRolling<BasisPoints16, M>>,
pub input_count: WithInputTypes<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
pub tx_count: WithInputTypes<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
pub tx_percent: SpendableType<PercentCumulativeRolling<BasisPoints16, M>>,
}

View File

@@ -0,0 +1,93 @@
//! Generic `all` + per-input-type container (11 spendable types — no
//! op_return since op_return outputs are non-spendable). Used by
//! `inputs/by_type/`. Mirrors `WithAddrTypes` and `WithOutputTypes`.
use brk_cohort::SpendableType;
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Height, Version};
use schemars::JsonSchema;
use vecdb::{AnyStoredVec, AnyVec, Database, Exit, WritableVec};
use crate::{
indexes,
internal::{NumericValue, PerBlockCumulativeRolling, WindowStartVec, Windows},
};
/// `all` aggregate plus per-input-type breakdown across the 11 spendable
/// output types (everything except op_return). The "type" of an input is
/// the type of the previous output it spends.
#[derive(Clone, Traversable)]
pub struct WithInputTypes<T> {
pub all: T,
#[traversable(flatten)]
pub by_type: SpendableType<T>,
}
impl<T, C> WithInputTypes<PerBlockCumulativeRolling<T, C>>
where
T: NumericValue + JsonSchema + Into<C>,
C: NumericValue + JsonSchema,
{
pub(crate) fn forced_import_with(
db: &Database,
all_name: &str,
per_type_name: impl Fn(&str) -> String,
version: Version,
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
let make = |name: &str| {
PerBlockCumulativeRolling::forced_import(db, name, version, indexes, cached_starts)
};
Ok(Self {
all: make(all_name)?,
by_type: SpendableType::try_new(|_, name| make(&per_type_name(name)))?,
})
}
pub(crate) fn min_stateful_len(&self) -> usize {
self.by_type
.iter()
.map(|v| v.block.len())
.min()
.unwrap()
.min(self.all.block.len())
}
pub(crate) fn write(&mut self) -> Result<()> {
self.all.block.write()?;
for v in self.by_type.iter_mut() {
v.block.write()?;
}
Ok(())
}
pub(crate) fn validate_and_truncate(
&mut self,
dep_version: Version,
at_height: Height,
) -> Result<()> {
self.all.block.validate_and_truncate(dep_version, at_height)?;
for v in self.by_type.iter_mut() {
v.block.validate_and_truncate(dep_version, at_height)?;
}
Ok(())
}
pub(crate) fn truncate_if_needed_at(&mut self, len: usize) -> Result<()> {
self.all.block.truncate_if_needed_at(len)?;
for v in self.by_type.iter_mut() {
v.block.truncate_if_needed_at(len)?;
}
Ok(())
}
pub(crate) fn compute_rest(&mut self, max_from: Height, exit: &Exit) -> Result<()> {
self.all.compute_rest(max_from, exit)?;
for v in self.by_type.iter_mut() {
v.compute_rest(max_from, exit)?;
}
Ok(())
}
}

View File

@@ -20,6 +20,9 @@ impl Vecs {
self.spent.compute(indexer, starting_indexes, exit)?;
self.count
.compute(indexer, indexes, blocks, starting_indexes, exit)?;
self.per_sec
.compute(&self.count, starting_indexes, exit)?;
self.by_type.compute(indexer, starting_indexes, exit)?;
let exit = exit.clone();
self.db.run_bg(move |db| {

View File

@@ -11,7 +11,7 @@ use crate::{
},
};
use super::{CountVecs, SpentVecs, Vecs};
use super::{ByTypeVecs, CountVecs, PerSecVecs, SpentVecs, Vecs};
impl Vecs {
pub(crate) fn forced_import(
@@ -25,8 +25,16 @@ impl Vecs {
let spent = SpentVecs::forced_import(&db, version)?;
let count = CountVecs::forced_import(&db, version, indexes, cached_starts)?;
let per_sec = PerSecVecs::forced_import(&db, version, indexes)?;
let by_type = ByTypeVecs::forced_import(&db, version, indexes, cached_starts)?;
let this = Self { db, spent, count };
let this = Self {
db,
spent,
count,
per_sec,
by_type,
};
finalize_db(&this.db, &this)?;
Ok(this)
}

View File

@@ -1,4 +1,6 @@
pub mod by_type;
pub mod count;
pub mod per_sec;
pub mod spent;
mod compute;
@@ -7,7 +9,9 @@ mod import;
use brk_traversable::Traversable;
use vecdb::{Database, Rw, StorageMode};
pub use by_type::Vecs as ByTypeVecs;
pub use count::Vecs as CountVecs;
pub use per_sec::Vecs as PerSecVecs;
pub use spent::Vecs as SpentVecs;
pub const DB_NAME: &str = "inputs";
@@ -19,4 +23,6 @@ pub struct Vecs<M: StorageMode = Rw> {
pub spent: SpentVecs<M>,
pub count: CountVecs<M>,
pub per_sec: PerSecVecs<M>,
pub by_type: ByTypeVecs<M>,
}

View File

@@ -0,0 +1,28 @@
use brk_error::Result;
use brk_types::{Indexes, StoredF32};
use vecdb::Exit;
use super::Vecs;
use crate::{inputs::CountVecs, internal::Windows};
impl Vecs {
pub(crate) fn compute(
&mut self,
count: &CountVecs,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
let h = starting_indexes.height;
let sums = count.rolling.sum.0.as_array();
let per_sec = self.0.as_mut_array();
for (i, &secs) in Windows::<()>::SECS.iter().enumerate() {
per_sec[i].height.compute_transform(
h,
&sums[i].height,
|(h, sum, ..)| (h, StoredF32::from(*sum as f64 / secs)),
exit,
)?;
}
Ok(())
}
}

View File

@@ -0,0 +1,21 @@
use brk_error::Result;
use brk_types::Version;
use vecdb::Database;
use super::Vecs;
use crate::{
indexes,
internal::{PerBlock, Windows},
};
impl Vecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
Ok(Self(Windows::try_from_fn(|suffix| {
PerBlock::forced_import(db, &format!("inputs_per_sec_{suffix}"), version, indexes)
})?))
}
}

View File

@@ -0,0 +1,8 @@
use brk_traversable::Traversable;
use brk_types::StoredF32;
use vecdb::{Rw, StorageMode};
use crate::internal::{PerBlock, Windows};
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw>(#[traversable(flatten)] pub Windows<PerBlock<StoredF32, M>>);

View File

@@ -0,0 +1,84 @@
//! Shared per-block-per-type cursor walker used by `outputs/by_type/` and
//! `inputs/by_type/`. The walker iterates blocks and aggregates the
//! per-tx output-type counts; pushing into a particular wrapper is left
//! to the caller.
use brk_error::Result;
use brk_types::TxIndex;
use vecdb::VecIndex;
/// Aggregated per-block counters produced by [`walk_blocks`].
pub(crate) struct BlockAggregate {
pub entries_all: u64,
pub entries_per_type: [u64; 12],
pub txs_all: u64,
pub txs_per_type: [u64; 12],
}
/// Whether to include the coinbase tx (first tx in each block) in the walk.
#[derive(Clone, Copy)]
pub(crate) enum CoinbasePolicy {
Include,
Skip,
}
/// Walk every block in `fi_batch`, calling `scan_tx` once per tx (which
/// fills a `[u32; 12]` with the per-output-type count for that tx),
/// aggregating into a [`BlockAggregate`] and handing it to `store`.
///
/// `entries_all` and `txs_all` aggregate over the 12 output types
/// indistinguishably; downstream consumers can cap to the 11 spendable
/// types if op_return is non-applicable.
#[inline]
pub(crate) fn walk_blocks(
fi_batch: &[TxIndex],
txid_len: usize,
coinbase: CoinbasePolicy,
mut scan_tx: impl FnMut(usize, &mut [u32; 12]) -> Result<()>,
mut store: impl FnMut(BlockAggregate) -> Result<()>,
) -> Result<()> {
for (j, first_tx) in fi_batch.iter().enumerate() {
let fi = first_tx.to_usize();
let next_fi = fi_batch
.get(j + 1)
.map(|v| v.to_usize())
.unwrap_or(txid_len);
let start_tx = match coinbase {
CoinbasePolicy::Include => fi,
CoinbasePolicy::Skip => fi + 1,
};
let mut entries_per_type = [0u64; 12];
let mut txs_per_type = [0u64; 12];
let mut entries_all = 0u64;
let mut txs_all = 0u64;
for tx_pos in start_tx..next_fi {
let mut per_tx = [0u32; 12];
scan_tx(tx_pos, &mut per_tx)?;
let mut tx_has_any = false;
for (i, &n) in per_tx.iter().enumerate() {
if n > 0 {
entries_per_type[i] += u64::from(n);
txs_per_type[i] += 1;
entries_all += u64::from(n);
tx_has_any = true;
}
}
if tx_has_any {
txs_all += 1;
}
}
store(BlockAggregate {
entries_all,
entries_per_type,
txs_all,
txs_per_type,
})?;
}
Ok(())
}

View File

@@ -1,125 +0,0 @@
//! Shared per-block per-address-type counters.
//!
//! Used by `outputs/by_type/` (counts outputs per type) and `inputs/by_type/`
//! (counts inputs per type). Walks each block's tx range, calls a scanner
//! callback that fills a `[u32; 12]` per-tx counter, and produces two
//! per-block aggregates in a single pass:
//!
//! - `entry_count` — total number of items (outputs / inputs) per type
//! - `tx_count` — number of txs that contain at least one item of each type
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_types::{BasisPoints16, Height, Indexes, OutputType, StoredU64, TxIndex};
use vecdb::{AnyStoredVec, Exit, VecIndex, WritableVec};
use crate::internal::{
PerBlockCumulativeRolling, PerBlockFull, PercentCumulativeRolling, RatioU64Bp16,
};
/// Per-block scan that simultaneously computes:
/// - `entry_count[type] += per_tx[type]` (sum of items)
/// - `tx_count[type] += 1 if per_tx[type] > 0` (presence flag)
///
/// `scan_tx` is called once per tx with a zeroed `[u32; 12]` buffer that
/// it must fill with the per-type item count for that tx.
#[allow(clippy::too_many_arguments)]
pub(crate) fn compute_by_addr_type_block_counts(
entry_count: &mut ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64>>,
tx_count: &mut ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64>>,
fi_batch: &[TxIndex],
txid_len: usize,
skip_first_tx: bool,
starting_height: Height,
exit: &Exit,
mut scan_tx: impl FnMut(usize, &mut [u32; 12]) -> Result<()>,
) -> Result<()> {
for (j, first_tx) in fi_batch.iter().enumerate() {
let fi = first_tx.to_usize();
let next_fi = fi_batch
.get(j + 1)
.map(|v| v.to_usize())
.unwrap_or(txid_len);
let start_tx = if skip_first_tx { fi + 1 } else { fi };
let mut entries_per_block = [0u64; 12];
let mut txs_per_block = [0u64; 12];
for tx_pos in start_tx..next_fi {
let mut per_tx = [0u32; 12];
scan_tx(tx_pos, &mut per_tx)?;
for (i, &n) in per_tx.iter().enumerate() {
if n > 0 {
entries_per_block[i] += u64::from(n);
txs_per_block[i] += 1;
}
}
}
for otype in OutputType::ADDR_TYPES {
let idx = otype as usize;
entry_count
.get_mut_unwrap(otype)
.block
.push(StoredU64::from(entries_per_block[idx]));
tx_count
.get_mut_unwrap(otype)
.block
.push(StoredU64::from(txs_per_block[idx]));
}
if entry_count.p2pkh.block.batch_limit_reached() {
let _lock = exit.lock();
for (_, v) in entry_count.iter_mut() {
v.block.write()?;
}
for (_, v) in tx_count.iter_mut() {
v.block.write()?;
}
}
}
{
let _lock = exit.lock();
for (_, v) in entry_count.iter_mut() {
v.block.write()?;
}
for (_, v) in tx_count.iter_mut() {
v.block.write()?;
}
}
for (_, v) in entry_count.iter_mut() {
v.compute_rest(starting_height, exit)?;
}
for (_, v) in tx_count.iter_mut() {
v.compute_rest(starting_height, exit)?;
}
Ok(())
}
/// Compute per-type tx-count percent over total tx count, for all 8 address types.
pub(crate) fn compute_by_addr_type_tx_percents(
tx_count: &ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64>>,
tx_percent: &mut ByAddrType<PercentCumulativeRolling<BasisPoints16>>,
count_total: &PerBlockFull<StoredU64>,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
for otype in OutputType::ADDR_TYPES {
let source = tx_count.get_unwrap(otype);
tx_percent
.get_mut_unwrap(otype)
.compute_binary::<StoredU64, StoredU64, RatioU64Bp16, _, _, _, _>(
starting_indexes.height,
&source.cumulative.height,
&count_total.cumulative.height,
source.sum.as_array().map(|w| &w.height),
count_total.rolling.sum.as_array().map(|w| &w.height),
exit,
)?;
}
Ok(())
}

View File

@@ -11,6 +11,12 @@ pub struct Windows<A> {
impl<A> Windows<A> {
pub const SUFFIXES: [&'static str; 4] = ["24h", "1w", "1m", "1y"];
pub const DAYS: [usize; 4] = [1, 7, 30, 365];
pub const SECS: [f64; 4] = [
Self::DAYS[0] as f64 * 86400.0,
Self::DAYS[1] as f64 * 86400.0,
Self::DAYS[2] as f64 * 86400.0,
Self::DAYS[3] as f64 * 86400.0,
];
pub fn try_from_fn<E>(
mut f: impl FnMut(&str) -> std::result::Result<A, E>,

View File

@@ -1,6 +1,6 @@
pub(crate) mod algo;
mod amount;
mod by_type_counts;
mod block_walker;
mod cache_budget;
mod containers;
pub(crate) mod db_utils;
@@ -9,9 +9,10 @@ mod per_block;
mod per_tx;
mod traits;
mod transform;
mod with_addr_types;
pub(crate) use amount::*;
pub(crate) use by_type_counts::*;
pub(crate) use block_walker::*;
pub(crate) use cache_budget::*;
pub(crate) use containers::*;
pub(crate) use indexes::*;
@@ -19,3 +20,4 @@ pub(crate) use per_block::*;
pub(crate) use per_tx::*;
pub(crate) use traits::*;
pub use transform::*;
pub(crate) use with_addr_types::*;

View File

@@ -5,16 +5,17 @@
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Height, Version};
use brk_types::{BasisPoints16, Height, StoredU64, Version};
use vecdb::{BinaryTransform, Database, Exit, ReadableVec, Rw, StorageMode, VecValue};
use crate::{
indexes,
internal::{BpsType, PercentPerBlock, PercentRollingWindows},
internal::{BpsType, PerBlockCumulativeRolling, PercentPerBlock, PercentRollingWindows, RatioU64Bp16},
};
#[derive(Traversable)]
pub struct PercentCumulativeRolling<B: BpsType, M: StorageMode = Rw> {
#[traversable(flatten)]
pub cumulative: PercentPerBlock<B, M>,
#[traversable(flatten)]
pub rolling: PercentRollingWindows<B, M>,
@@ -26,26 +27,6 @@ impl<B: BpsType> PercentCumulativeRolling<B> {
name: &str,
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
let cumulative =
PercentPerBlock::forced_import(db, &format!("{name}_cumulative"), version, indexes)?;
let rolling =
PercentRollingWindows::forced_import(db, &format!("{name}_sum"), version, indexes)?;
Ok(Self {
cumulative,
rolling,
})
}
/// Alternate constructor that uses the same base name for both the
/// cumulative `PercentPerBlock` and the `PercentRollingWindows`, relying on
/// the window suffix to disambiguate. Useful for preserving legacy disk
/// names where the two variants historically shared a prefix.
pub(crate) fn forced_import_flat(
db: &Database,
name: &str,
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
let cumulative = PercentPerBlock::forced_import(db, name, version, indexes)?;
let rolling = PercentRollingWindows::forced_import(db, name, version, indexes)?;
@@ -89,3 +70,26 @@ impl<B: BpsType> PercentCumulativeRolling<B> {
Ok(())
}
}
impl PercentCumulativeRolling<BasisPoints16> {
/// Derive a percent from two `PerBlockCumulativeRolling<StoredU64>`
/// sources (numerator and denominator). Both sources must already have
/// their cumulative and rolling sums computed.
#[inline]
pub(crate) fn compute_count_ratio(
&mut self,
numerator: &PerBlockCumulativeRolling<StoredU64, StoredU64>,
denominator: &PerBlockCumulativeRolling<StoredU64, StoredU64>,
starting_height: Height,
exit: &Exit,
) -> Result<()> {
self.compute_binary::<StoredU64, StoredU64, RatioU64Bp16, _, _, _, _>(
starting_height,
&numerator.cumulative.height,
&denominator.cumulative.height,
numerator.sum.as_array().map(|w| &w.height),
denominator.sum.as_array().map(|w| &w.height),
exit,
)
}
}

View File

@@ -0,0 +1,38 @@
use brk_traversable::Traversable;
use brk_types::Version;
use vecdb::UnaryTransform;
use crate::internal::{
BpsType, LazyPercentPerBlock, LazyPercentRollingWindows, PercentCumulativeRolling,
};
/// Fully lazy variant of `PercentCumulativeRolling` — no stored vecs.
///
/// Mirrors the flat shape of `PercentCumulativeRolling`: cumulative and
/// rolling window fields are both flattened to the same tree level, so
/// consumers see `{ bps, percent, ratio, _24h, _1w, _1m, _1y }`.
#[derive(Clone, Traversable)]
pub struct LazyPercentCumulativeRolling<B: BpsType> {
#[traversable(flatten)]
pub cumulative: LazyPercentPerBlock<B>,
#[traversable(flatten)]
pub rolling: LazyPercentRollingWindows<B>,
}
impl<B: BpsType> LazyPercentCumulativeRolling<B> {
/// Derive from a stored `PercentCumulativeRolling` source via a
/// BPS-to-BPS unary transform applied to both cumulative and rolling.
pub(crate) fn from_source<F: UnaryTransform<B, B>>(
name: &str,
version: Version,
source: &PercentCumulativeRolling<B>,
) -> Self {
let cumulative =
LazyPercentPerBlock::from_percent::<F>(name, version, &source.cumulative);
let rolling = LazyPercentRollingWindows::from_rolling::<F>(name, version, &source.rolling);
Self {
cumulative,
rolling,
}
}
}

View File

@@ -1,6 +1,7 @@
mod base;
mod cumulative_rolling;
mod lazy;
mod lazy_cumulative_rolling;
mod lazy_windows;
mod vec;
mod windows;
@@ -8,6 +9,7 @@ mod windows;
pub use base::*;
pub use cumulative_rolling::*;
pub use lazy::*;
pub use lazy_cumulative_rolling::*;
pub use lazy_windows::*;
pub use vec::*;
pub use windows::*;

View File

@@ -5,14 +5,15 @@
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Height, Indexes, Version};
use brk_types::{Height, Indexes, Sats, Version};
use rayon::prelude::*;
use schemars::JsonSchema;
use vecdb::{AnyStoredVec, AnyVec, Database, EagerVec, Exit, PcoVec, WritableVec};
use crate::{
indexes,
internal::{NumericValue, PerBlock, PerBlockCumulativeRolling, WindowStartVec, Windows},
use crate::{indexes, prices};
use super::{
AmountPerBlock, NumericValue, PerBlock, PerBlockCumulativeRolling, WindowStartVec, Windows,
};
/// `all` aggregate plus per-`AddrType` breakdown.
@@ -171,3 +172,75 @@ where
Ok(())
}
}
impl WithAddrTypes<AmountPerBlock> {
pub(crate) fn forced_import(
db: &Database,
name: &str,
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
let all = AmountPerBlock::forced_import(db, name, version, indexes)?;
let by_addr_type = ByAddrType::new_with_name(|type_name| {
AmountPerBlock::forced_import(db, &format!("{type_name}_{name}"), version, indexes)
})?;
Ok(Self { all, by_addr_type })
}
pub(crate) fn min_stateful_len(&self) -> usize {
self.by_addr_type
.values()
.map(|v| v.sats.height.len())
.min()
.unwrap()
.min(self.all.sats.height.len())
}
pub(crate) fn par_iter_height_mut(
&mut self,
) -> impl ParallelIterator<Item = &mut dyn AnyStoredVec> {
rayon::iter::once(&mut self.all.sats.height as &mut dyn AnyStoredVec).chain(
self.by_addr_type
.par_values_mut()
.map(|v| &mut v.sats.height as &mut dyn AnyStoredVec),
)
}
pub(crate) fn reset_height(&mut self) -> Result<()> {
self.all.sats.height.reset()?;
self.all.cents.height.reset()?;
for v in self.by_addr_type.values_mut() {
v.sats.height.reset()?;
v.cents.height.reset()?;
}
Ok(())
}
/// Push the stateful sats value for `all` and each per-type. Cents are
/// derived post-hoc from sats × price in [`Self::compute_rest`].
#[inline(always)]
pub(crate) fn push_height<U>(&mut self, total: U, per_type: impl IntoIterator<Item = U>)
where
U: Into<Sats>,
{
self.all.sats.height.push(total.into());
for (v, value) in self.by_addr_type.values_mut().zip(per_type) {
v.sats.height.push(value.into());
}
}
/// Derive cents (and thus lazy btc/usd) for `all` and every per-type vec
/// from the stateful sats values × spot price.
pub(crate) fn compute_rest(
&mut self,
max_from: Height,
prices: &prices::Vecs,
exit: &Exit,
) -> Result<()> {
self.all.compute(prices, max_from, exit)?;
for v in self.by_addr_type.values_mut() {
v.compute(prices, max_from, exit)?;
}
Ok(())
}
}

View File

@@ -23,7 +23,6 @@ mod mining;
mod outputs;
mod pools;
pub mod prices;
mod scripts;
mod supply;
mod transactions;
@@ -32,7 +31,6 @@ pub struct Computer<M: StorageMode = Rw> {
pub blocks: Box<blocks::Vecs<M>>,
pub mining: Box<mining::Vecs<M>>,
pub transactions: Box<transactions::Vecs<M>>,
pub scripts: Box<scripts::Vecs<M>>,
pub cointime: Box<cointime::Vecs<M>>,
pub constants: Box<constants::Vecs>,
pub indexes: Box<indexes::Vecs<M>>,
@@ -89,8 +87,8 @@ impl Computer {
let cached_starts = blocks.lookback.cached_window_starts();
let (inputs, outputs, mining, transactions, scripts, pools, cointime) = timed(
"Imported inputs/outputs/mining/tx/scripts/pools/cointime",
let (inputs, outputs, mining, transactions, pools, cointime) = timed(
"Imported inputs/outputs/mining/tx/pools/cointime",
|| {
thread::scope(|s| -> Result<_> {
let inputs_handle = big_thread().spawn_scoped(s, || -> Result<_> {
@@ -130,15 +128,6 @@ impl Computer {
)?))
})?;
let scripts_handle = big_thread().spawn_scoped(s, || -> Result<_> {
Ok(Box::new(scripts::Vecs::forced_import(
&computed_path,
VERSION,
&indexes,
&cached_starts,
)?))
})?;
let pools_handle = big_thread().spawn_scoped(s, || -> Result<_> {
Ok(Box::new(pools::Vecs::forced_import(
&computed_path,
@@ -159,18 +148,9 @@ impl Computer {
let outputs = outputs_handle.join().unwrap()?;
let mining = mining_handle.join().unwrap()?;
let transactions = transactions_handle.join().unwrap()?;
let scripts = scripts_handle.join().unwrap()?;
let pools = pools_handle.join().unwrap()?;
Ok((
inputs,
outputs,
mining,
transactions,
scripts,
pools,
cointime,
))
Ok((inputs, outputs, mining, transactions, pools, cointime))
})
},
)?;
@@ -235,7 +215,6 @@ impl Computer {
blocks,
mining,
transactions,
scripts,
constants,
indicators,
investing,
@@ -261,7 +240,6 @@ impl Computer {
blocks::DB_NAME,
mining::DB_NAME,
transactions::DB_NAME,
scripts::DB_NAME,
cointime::DB_NAME,
indicators::DB_NAME,
indexes::DB_NAME,
@@ -340,6 +318,8 @@ impl Computer {
inputs_result?;
prices_result?;
// market, outputs, and (transactions → mining) are pairwise
// independent. Run all three in parallel.
let market = scope.spawn(|| {
timed("Computed market", || {
self.market.compute(
@@ -352,48 +332,44 @@ impl Computer {
})
});
timed("Computed scripts", || {
self.scripts
.compute(indexer, &self.prices, &starting_indexes, exit)
})?;
let tx_mining = scope.spawn(|| -> Result<()> {
timed("Computed transactions", || {
self.transactions.compute(
indexer,
&self.indexes,
&self.blocks,
&self.inputs,
&self.prices,
&starting_indexes,
exit,
)
})?;
timed("Computed mining", || {
self.mining.compute(
indexer,
&self.indexes,
&self.blocks,
&self.transactions,
&self.prices,
&starting_indexes,
exit,
)
})
});
timed("Computed outputs", || {
self.outputs.compute(
indexer,
&self.indexes,
&self.inputs,
&self.scripts,
&self.blocks,
&starting_indexes,
exit,
)
})?;
timed("Computed transactions", || {
self.transactions.compute(
indexer,
&self.indexes,
&self.blocks,
&self.inputs,
&self.outputs,
&self.prices,
&starting_indexes,
exit,
)
})?;
timed("Computed mining", || {
self.mining.compute(
indexer,
&self.indexes,
&self.blocks,
&self.transactions,
&self.prices,
&starting_indexes,
exit,
)
})?;
tx_mining.join().unwrap()?;
market.join().unwrap()?;
Ok(())
})?;
@@ -433,7 +409,6 @@ impl Computer {
&self.indexes,
&self.inputs,
&self.outputs,
&self.scripts,
&self.transactions,
&self.blocks,
&self.prices,
@@ -465,7 +440,7 @@ impl Computer {
timed("Computed supply", || {
self.supply.compute(
&self.scripts,
&self.outputs,
&self.blocks,
&self.mining,
&self.transactions,
@@ -535,7 +510,6 @@ impl_iter_named!(
blocks,
mining,
transactions,
scripts,
cointime,
constants,
indicators,

View File

@@ -125,14 +125,6 @@ impl Vecs {
exit,
)?;
self.subsidy_dominance
.compute_binary::<Sats, Sats, RatioSatsBp16>(
starting_indexes.height,
&self.subsidy.cumulative.sats.height,
&self.coinbase.cumulative.sats.height,
exit,
)?;
self.fee_to_subsidy_ratio
.compute_binary::<Dollars, Dollars, RatioDollarsBp32, _, _>(
starting_indexes.height,

View File

@@ -7,8 +7,8 @@ use crate::{
indexes,
internal::{
AmountPerBlockCumulative, AmountPerBlockCumulativeRolling, AmountPerBlockFull,
LazyPercentRollingWindows, OneMinusBp16, PercentCumulativeRolling, PercentPerBlock,
RatioRollingWindows, WindowStartVec, Windows,
LazyPercentCumulativeRolling, OneMinusBp16, PercentCumulativeRolling, RatioRollingWindows,
WindowStartVec, Windows,
},
};
@@ -20,12 +20,12 @@ impl Vecs {
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
let fee_dominance =
PercentCumulativeRolling::forced_import_flat(db, "fee_dominance", version, indexes)?;
PercentCumulativeRolling::forced_import(db, "fee_dominance", version, indexes)?;
let subsidy_dominance_rolling = LazyPercentRollingWindows::from_rolling::<OneMinusBp16>(
let subsidy_dominance = LazyPercentCumulativeRolling::from_source::<OneMinusBp16>(
"subsidy_dominance",
version,
&fee_dominance.rolling,
&fee_dominance,
);
Ok(Self {
@@ -52,13 +52,7 @@ impl Vecs {
indexes,
)?,
fee_dominance,
subsidy_dominance: PercentPerBlock::forced_import(
db,
"subsidy_dominance",
version,
indexes,
)?,
subsidy_dominance_rolling,
subsidy_dominance,
fee_to_subsidy_ratio: RatioRollingWindows::forced_import(
db,
"fee_to_subsidy_ratio",

View File

@@ -4,7 +4,7 @@ use vecdb::{EagerVec, PcoVec, Rw, StorageMode};
use crate::internal::{
AmountPerBlockCumulative, AmountPerBlockCumulativeRolling, AmountPerBlockFull,
LazyPercentRollingWindows, PercentCumulativeRolling, PercentPerBlock, RatioRollingWindows,
LazyPercentCumulativeRolling, PercentCumulativeRolling, RatioRollingWindows,
};
#[derive(Traversable)]
@@ -17,9 +17,7 @@ pub struct Vecs<M: StorageMode = Rw> {
#[traversable(wrap = "fees", rename = "dominance")]
pub fee_dominance: PercentCumulativeRolling<BasisPoints16, M>,
#[traversable(wrap = "subsidy", rename = "dominance")]
pub subsidy_dominance: PercentPerBlock<BasisPoints16, M>,
#[traversable(wrap = "subsidy", rename = "dominance")]
pub subsidy_dominance_rolling: LazyPercentRollingWindows<BasisPoints16>,
pub subsidy_dominance: LazyPercentCumulativeRolling<BasisPoints16>,
#[traversable(wrap = "fees", rename = "to_subsidy_ratio")]
pub fee_to_subsidy_ratio: RatioRollingWindows<BasisPoints32, M>,
}

View File

@@ -3,15 +3,11 @@ use brk_indexer::Indexer;
use brk_types::{Indexes, StoredU64};
use vecdb::{AnyVec, Exit, ReadableVec, VecIndex, WritableVec};
use super::Vecs;
use crate::internal::{
PerBlockFull, compute_by_addr_type_block_counts, compute_by_addr_type_tx_percents,
};
use super::{Vecs, WithOutputTypes};
use crate::internal::{CoinbasePolicy, PerBlockCumulativeRolling, walk_blocks};
impl Vecs {
/// Phase 1: walk outputs and populate `output_count` + `tx_count`.
/// Independent of transactions, can run alongside other outputs work.
pub(crate) fn compute_counts(
pub(crate) fn compute(
&mut self,
indexer: &Indexer,
starting_indexes: &Indexes,
@@ -22,83 +18,93 @@ impl Vecs {
+ indexer.vecs.transactions.first_txout_index.version()
+ indexer.vecs.transactions.txid.version();
for (_, v) in self.output_count.iter_mut() {
v.block
.validate_and_truncate(dep_version, starting_indexes.height)?;
}
for (_, v) in self.tx_count.iter_mut() {
v.block
.validate_and_truncate(dep_version, starting_indexes.height)?;
}
self.output_count
.validate_and_truncate(dep_version, starting_indexes.height)?;
self.tx_count
.validate_and_truncate(dep_version, starting_indexes.height)?;
let skip = self
.output_count
.values()
.map(|v| v.block.len())
.min()
.unwrap()
.min(self.tx_count.values().map(|v| v.block.len()).min().unwrap());
.min_stateful_len()
.min(self.tx_count.min_stateful_len());
let first_tx_index = &indexer.vecs.transactions.first_tx_index;
let end = first_tx_index.len();
if skip >= end {
return Ok(());
if skip < end {
self.output_count.truncate_if_needed_at(skip)?;
self.tx_count.truncate_if_needed_at(skip)?;
let fi_batch = first_tx_index.collect_range_at(skip, end);
let txid_len = indexer.vecs.transactions.txid.len();
let total_txout_len = indexer.vecs.outputs.output_type.len();
let mut otype_cursor = indexer.vecs.outputs.output_type.cursor();
let mut fo_cursor = indexer.vecs.transactions.first_txout_index.cursor();
walk_blocks(
&fi_batch,
txid_len,
CoinbasePolicy::Include,
|tx_pos, per_tx| {
let fo = fo_cursor.get(tx_pos).data()?.to_usize();
let next_fo = if tx_pos + 1 < txid_len {
fo_cursor.get(tx_pos + 1).data()?.to_usize()
} else {
total_txout_len
};
otype_cursor.advance(fo - otype_cursor.position());
for _ in fo..next_fo {
let otype = otype_cursor.next().unwrap();
per_tx[otype as usize] += 1;
}
Ok(())
},
|agg| {
push_block(&mut self.output_count, agg.entries_all, &agg.entries_per_type);
push_block(&mut self.tx_count, agg.txs_all, &agg.txs_per_type);
if self.output_count.all.block.batch_limit_reached() {
let _lock = exit.lock();
self.output_count.write()?;
self.tx_count.write()?;
}
Ok(())
},
)?;
{
let _lock = exit.lock();
self.output_count.write()?;
self.tx_count.write()?;
}
self.output_count
.compute_rest(starting_indexes.height, exit)?;
self.tx_count
.compute_rest(starting_indexes.height, exit)?;
}
for (_, v) in self.output_count.iter_mut() {
v.block.truncate_if_needed_at(skip)?;
for (otype, source) in self.tx_count.by_type.iter_typed() {
self.tx_percent.get_mut(otype).compute_count_ratio(
source,
&self.tx_count.all,
starting_indexes.height,
exit,
)?;
}
for (_, v) in self.tx_count.iter_mut() {
v.block.truncate_if_needed_at(skip)?;
}
let fi_batch = first_tx_index.collect_range_at(skip, end);
let txid_len = indexer.vecs.transactions.txid.len();
let total_txout_len = indexer.vecs.outputs.output_type.len();
let mut otype_cursor = indexer.vecs.outputs.output_type.cursor();
let mut fo_cursor = indexer.vecs.transactions.first_txout_index.cursor();
compute_by_addr_type_block_counts(
&mut self.output_count,
&mut self.tx_count,
&fi_batch,
txid_len,
false,
starting_indexes.height,
exit,
|tx_pos, per_tx| {
let fo = fo_cursor.get(tx_pos).data()?.to_usize();
let next_fo = if tx_pos + 1 < txid_len {
fo_cursor.get(tx_pos + 1).data()?.to_usize()
} else {
total_txout_len
};
otype_cursor.advance(fo - otype_cursor.position());
for _ in fo..next_fo {
let otype = otype_cursor.next().unwrap();
per_tx[otype as usize] += 1;
}
Ok(())
},
)
}
/// Phase 2: derive `tx_percent` from `tx_count` and the total tx count.
/// Must run after `transactions::Vecs::compute` (depends on tx count totals).
pub(crate) fn compute_percents(
&mut self,
transactions_count_total: &PerBlockFull<StoredU64>,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
compute_by_addr_type_tx_percents(
&self.tx_count,
&mut self.tx_percent,
transactions_count_total,
starting_indexes,
exit,
)
Ok(())
}
}
#[inline]
fn push_block(
metric: &mut WithOutputTypes<PerBlockCumulativeRolling<StoredU64, StoredU64>>,
total: u64,
per_type: &[u64; 12],
) {
metric.all.block.push(StoredU64::from(total));
for (otype, vec) in metric.by_type.iter_typed_mut() {
vec.block.push(StoredU64::from(per_type[otype as usize]));
}
}

View File

@@ -1,12 +1,14 @@
use brk_cohort::ByAddrType;
use brk_cohort::ByType;
use brk_error::Result;
use brk_types::Version;
use brk_types::{StoredU64, Version};
use vecdb::Database;
use super::Vecs;
use super::{Vecs, WithOutputTypes};
use crate::{
indexes,
internal::{PerBlockCumulativeRolling, PercentCumulativeRolling, WindowStartVec, Windows},
internal::{
PerBlockCumulativeRolling, PercentCumulativeRolling, WindowStartVec, Windows,
},
};
impl Vecs {
@@ -16,33 +18,39 @@ impl Vecs {
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
let output_count = WithOutputTypes::<
PerBlockCumulativeRolling<StoredU64, StoredU64>,
>::forced_import_with(
db,
"output_count_bis",
|t| format!("{t}_output_count"),
version,
indexes,
cached_starts,
)?;
let tx_count = WithOutputTypes::<
PerBlockCumulativeRolling<StoredU64, StoredU64>,
>::forced_import_with(
db,
"tx_count_bis",
|t| format!("tx_count_with_{t}_output"),
version,
indexes,
cached_starts,
)?;
let tx_percent = ByType::try_new(|_, name| {
PercentCumulativeRolling::forced_import(
db,
&format!("tx_percent_with_{name}_output"),
version,
indexes,
)
})?;
Ok(Self {
output_count: ByAddrType::new_with_name(|name| {
PerBlockCumulativeRolling::forced_import(
db,
&format!("{name}_output_count"),
version,
indexes,
cached_starts,
)
})?,
tx_count: ByAddrType::new_with_name(|name| {
PerBlockCumulativeRolling::forced_import(
db,
&format!("tx_count_with_{name}_out"),
version,
indexes,
cached_starts,
)
})?,
tx_percent: ByAddrType::new_with_name(|name| {
PercentCumulativeRolling::forced_import(
db,
&format!("tx_count_with_{name}_out_rel_to_all"),
version,
indexes,
)
})?,
output_count,
tx_count,
tx_percent,
})
}
}

View File

@@ -1,5 +1,7 @@
mod compute;
mod import;
mod vecs;
mod with_output_types;
pub use vecs::Vecs;
pub(crate) use with_output_types::WithOutputTypes;

View File

@@ -1,16 +1,14 @@
use brk_cohort::ByAddrType;
use brk_cohort::ByType;
use brk_traversable::Traversable;
use brk_types::{BasisPoints16, StoredU64};
use vecdb::{Rw, StorageMode};
use super::WithOutputTypes;
use crate::internal::{PerBlockCumulativeRolling, PercentCumulativeRolling};
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
/// Per-block, per-type total output count (granular).
pub output_count: ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
/// Per-block, per-type count of TXs containing at least one output of this type.
pub tx_count: ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
/// Per-type tx_count as a percent of total tx count.
pub tx_percent: ByAddrType<PercentCumulativeRolling<BasisPoints16, M>>,
pub output_count: WithOutputTypes<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
pub tx_count: WithOutputTypes<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
pub tx_percent: ByType<PercentCumulativeRolling<BasisPoints16, M>>,
}

View File

@@ -0,0 +1,92 @@
//! Generic `all` + per-`OutputType` container (12 output types, including
//! op_return). Used by `outputs/by_type/`. Mirrors `WithAddrTypes` and
//! `WithInputTypes`.
use brk_cohort::ByType;
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Height, Version};
use schemars::JsonSchema;
use vecdb::{AnyStoredVec, AnyVec, Database, Exit, WritableVec};
use crate::{
indexes,
internal::{NumericValue, PerBlockCumulativeRolling, WindowStartVec, Windows},
};
/// `all` aggregate plus per-`OutputType` breakdown across all 12 output
/// types (spendable + op_return).
#[derive(Clone, Traversable)]
pub struct WithOutputTypes<T> {
pub all: T,
#[traversable(flatten)]
pub by_type: ByType<T>,
}
impl<T, C> WithOutputTypes<PerBlockCumulativeRolling<T, C>>
where
T: NumericValue + JsonSchema + Into<C>,
C: NumericValue + JsonSchema,
{
pub(crate) fn forced_import_with(
db: &Database,
all_name: &str,
per_type_name: impl Fn(&str) -> String,
version: Version,
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
let make = |name: &str| {
PerBlockCumulativeRolling::forced_import(db, name, version, indexes, cached_starts)
};
Ok(Self {
all: make(all_name)?,
by_type: ByType::try_new(|_, name| make(&per_type_name(name)))?,
})
}
pub(crate) fn min_stateful_len(&self) -> usize {
self.by_type
.iter()
.map(|v| v.block.len())
.min()
.unwrap()
.min(self.all.block.len())
}
pub(crate) fn write(&mut self) -> Result<()> {
self.all.block.write()?;
for v in self.by_type.iter_mut() {
v.block.write()?;
}
Ok(())
}
pub(crate) fn validate_and_truncate(
&mut self,
dep_version: Version,
at_height: Height,
) -> Result<()> {
self.all.block.validate_and_truncate(dep_version, at_height)?;
for v in self.by_type.iter_mut() {
v.block.validate_and_truncate(dep_version, at_height)?;
}
Ok(())
}
pub(crate) fn truncate_if_needed_at(&mut self, len: usize) -> Result<()> {
self.all.block.truncate_if_needed_at(len)?;
for v in self.by_type.iter_mut() {
v.block.truncate_if_needed_at(len)?;
}
Ok(())
}
pub(crate) fn compute_rest(&mut self, max_from: Height, exit: &Exit) -> Result<()> {
self.all.compute_rest(max_from, exit)?;
for v in self.by_type.iter_mut() {
v.compute_rest(max_from, exit)?;
}
Ok(())
}
}

View File

@@ -4,7 +4,7 @@ use brk_types::Indexes;
use vecdb::Exit;
use super::Vecs;
use crate::{blocks, indexes, inputs, scripts};
use crate::{blocks, indexes, inputs, prices};
impl Vecs {
#[allow(clippy::too_many_arguments)]
@@ -13,19 +13,24 @@ impl Vecs {
indexer: &Indexer,
indexes: &indexes::Vecs,
inputs: &inputs::Vecs,
scripts: &scripts::Vecs,
blocks: &blocks::Vecs,
prices: &prices::Vecs,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
self.db.sync_bg_tasks()?;
self.count.compute(
indexer,
indexes,
self.count
.compute(indexer, indexes, blocks, starting_indexes, exit)?;
self.per_sec
.compute(&self.count, starting_indexes, exit)?;
self.value
.compute(indexer, prices, starting_indexes, exit)?;
self.by_type.compute(indexer, starting_indexes, exit)?;
self.unspent.compute(
&self.count,
&inputs.count,
&scripts.count,
blocks,
&self.by_type,
starting_indexes,
exit,
)?;

View File

@@ -1,19 +1,16 @@
use brk_error::Result;
use brk_indexer::Indexer;
use brk_types::{Height, Indexes, StoredU64};
use brk_types::Indexes;
use vecdb::Exit;
use super::Vecs;
use crate::{blocks, indexes, inputs, scripts};
use crate::{blocks, indexes};
impl Vecs {
#[allow(clippy::too_many_arguments)]
pub(crate) fn compute(
&mut self,
indexer: &Indexer,
indexes: &indexes::Vecs,
inputs_count: &inputs::CountVecs,
scripts_count: &scripts::CountVecs,
blocks: &blocks::Vecs,
starting_indexes: &Indexes,
exit: &Exit,
@@ -28,41 +25,6 @@ impl Vecs {
exit,
0,
)?;
self.unspent.height.compute_transform3(
starting_indexes.height,
&self.total.cumulative.height,
&inputs_count.cumulative.height,
&scripts_count.op_return.cumulative.height,
|(h, output_count, input_count, op_return_count, ..)| {
let block_count = u64::from(h + 1_usize);
// -1 > genesis output is unspendable
let mut utxo_count =
*output_count - (*input_count - block_count) - *op_return_count - 1;
// txid dup: e3bf3d07d4b0375638d5f1db5255fe07ba2c4cb067cd81b84ee974b6585fb468
// Block 91_722 https://mempool.space/block/00000000000271a2dc26e7667f8419f2e15416dc6955e5a6c6cdf3f2574dd08e
// Block 91_880 https://mempool.space/block/00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721
//
// txid dup: d5d27987d2a3dfc724e359870c6644b40e497bdc0589a033220fe15429d88599
// Block 91_812 https://mempool.space/block/00000000000af0aed4792b1acee3d966af36cf5def14935db8de83d6f9306f2f
// Block 91_842 https://mempool.space/block/00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec
//
// Warning: Dups invalidate the previous coinbase according to
// https://chainquery.com/bitcoin-cli/gettxoutsetinfo
if h >= Height::new(91_842) {
utxo_count -= 1;
}
if h >= Height::new(91_880) {
utxo_count -= 1;
}
(h, StoredU64::from(utxo_count))
},
exit,
)?;
Ok(())
}
}

View File

@@ -5,7 +5,7 @@ use vecdb::Database;
use super::Vecs;
use crate::{
indexes,
internal::{PerBlock, PerBlockAggregated, WindowStartVec, Windows},
internal::{PerBlockAggregated, WindowStartVec, Windows},
};
impl Vecs {
@@ -23,7 +23,6 @@ impl Vecs {
indexes,
cached_starts,
)?,
unspent: PerBlock::forced_import(db, "utxo_count_bis", version, indexes)?,
})
}
}

View File

@@ -2,10 +2,9 @@ use brk_traversable::Traversable;
use brk_types::StoredU64;
use vecdb::{Rw, StorageMode};
use crate::internal::{PerBlock, PerBlockAggregated};
use crate::internal::PerBlockAggregated;
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
pub total: PerBlockAggregated<StoredU64, M>,
pub unspent: PerBlock<StoredU64, M>,
}

View File

@@ -11,7 +11,7 @@ use crate::{
},
};
use super::{ByTypeVecs, CountVecs, SpentVecs, Vecs};
use super::{ByTypeVecs, CountVecs, PerSecVecs, SpentVecs, UnspentVecs, ValueVecs, Vecs};
impl Vecs {
pub(crate) fn forced_import(
@@ -25,13 +25,19 @@ impl Vecs {
let spent = SpentVecs::forced_import(&db, version)?;
let count = CountVecs::forced_import(&db, version, indexes, cached_starts)?;
let per_sec = PerSecVecs::forced_import(&db, version, indexes)?;
let unspent = UnspentVecs::forced_import(&db, version, indexes)?;
let by_type = ByTypeVecs::forced_import(&db, version, indexes, cached_starts)?;
let value = ValueVecs::forced_import(&db, version, indexes)?;
let this = Self {
db,
spent,
count,
per_sec,
unspent,
by_type,
value,
};
finalize_db(&this.db, &this)?;
Ok(this)

View File

@@ -1,6 +1,9 @@
pub mod by_type;
pub mod count;
pub mod per_sec;
pub mod spent;
pub mod unspent;
pub mod value;
mod compute;
mod import;
@@ -10,7 +13,10 @@ use vecdb::{Database, Rw, StorageMode};
pub use by_type::Vecs as ByTypeVecs;
pub use count::Vecs as CountVecs;
pub use per_sec::Vecs as PerSecVecs;
pub use spent::Vecs as SpentVecs;
pub use unspent::Vecs as UnspentVecs;
pub use value::Vecs as ValueVecs;
pub const DB_NAME: &str = "outputs";
@@ -21,5 +27,8 @@ pub struct Vecs<M: StorageMode = Rw> {
pub spent: SpentVecs<M>,
pub count: CountVecs<M>,
pub per_sec: PerSecVecs<M>,
pub unspent: UnspentVecs<M>,
pub by_type: ByTypeVecs<M>,
pub value: ValueVecs<M>,
}

View File

@@ -0,0 +1,28 @@
use brk_error::Result;
use brk_types::{Indexes, StoredF32};
use vecdb::Exit;
use super::Vecs;
use crate::{internal::Windows, outputs::CountVecs};
impl Vecs {
pub(crate) fn compute(
&mut self,
count: &CountVecs,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
let h = starting_indexes.height;
let sums = count.total.rolling.sum.0.as_array();
let per_sec = self.0.as_mut_array();
for (i, &secs) in Windows::<()>::SECS.iter().enumerate() {
per_sec[i].height.compute_transform(
h,
&sums[i].height,
|(h, sum, ..)| (h, StoredF32::from(*sum as f64 / secs)),
exit,
)?;
}
Ok(())
}
}

View File

@@ -0,0 +1,21 @@
use brk_error::Result;
use brk_types::Version;
use vecdb::Database;
use super::Vecs;
use crate::{
indexes,
internal::{PerBlock, Windows},
};
impl Vecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
Ok(Self(Windows::try_from_fn(|suffix| {
PerBlock::forced_import(db, &format!("outputs_per_sec_{suffix}"), version, indexes)
})?))
}
}

View File

@@ -0,0 +1,8 @@
use brk_traversable::Traversable;
use brk_types::StoredF32;
use vecdb::{Rw, StorageMode};
use crate::internal::{PerBlock, Windows};
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw>(#[traversable(flatten)] pub Windows<PerBlock<StoredF32, M>>);

View File

@@ -0,0 +1,49 @@
use brk_error::Result;
use brk_types::{Height, Indexes, StoredU64};
use vecdb::Exit;
use super::Vecs;
use crate::{
inputs,
internal::PerBlockCumulativeRolling,
outputs::{ByTypeVecs, CountVecs},
};
impl Vecs {
pub(crate) fn compute(
&mut self,
count: &CountVecs,
inputs_count: &inputs::CountVecs,
by_type: &ByTypeVecs,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
let op_return: &PerBlockCumulativeRolling<StoredU64, StoredU64> =
&by_type.output_count.by_type.unspendable.op_return;
self.count.height.compute_transform3(
starting_indexes.height,
&count.total.cumulative.height,
&inputs_count.cumulative.height,
&op_return.cumulative.height,
|(h, output_count, input_count, op_return_count, ..)| {
let block_count = u64::from(h + 1_usize);
// -1 > genesis output is unspendable
let mut utxo_count =
*output_count - (*input_count - block_count) - *op_return_count - 1;
// BIP30 duplicate txid corrections
if h >= Height::new(91_842) {
utxo_count -= 1;
}
if h >= Height::new(91_880) {
utxo_count -= 1;
}
(h, StoredU64::from(utxo_count))
},
exit,
)?;
Ok(())
}
}

View File

@@ -0,0 +1,18 @@
use brk_error::Result;
use brk_types::Version;
use vecdb::Database;
use super::Vecs;
use crate::{indexes, internal::PerBlock};
impl Vecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
Ok(Self {
count: PerBlock::forced_import(db, "utxo_count_bis", version, indexes)?,
})
}
}

View File

@@ -0,0 +1,11 @@
use brk_traversable::Traversable;
use brk_types::StoredU64;
use vecdb::{Rw, StorageMode};
use crate::internal::PerBlock;
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
/// UTXO count per block: `total - inputs - op_return - genesis`.
pub count: PerBlock<StoredU64, M>,
}

View File

@@ -1,32 +0,0 @@
use brk_error::Result;
use brk_indexer::Indexer;
use brk_types::Indexes;
use vecdb::Exit;
use crate::prices;
use super::Vecs;
impl Vecs {
pub(crate) fn compute(
&mut self,
indexer: &Indexer,
prices: &prices::Vecs,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
self.db.sync_bg_tasks()?;
self.count.compute(indexer, starting_indexes, exit)?;
self.value
.compute(indexer, prices, starting_indexes, exit)?;
let exit = exit.clone();
self.db.run_bg(move |db| {
let _lock = exit.lock();
db.compact_deferred_default()
});
Ok(())
}
}

View File

@@ -1,147 +0,0 @@
use brk_error::Result;
use brk_indexer::Indexer;
use brk_types::Indexes;
use vecdb::Exit;
use super::Vecs;
impl Vecs {
pub(crate) fn compute(
&mut self,
indexer: &Indexer,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
self.p2a.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.addrs.p2a.first_index,
&indexer.vecs.addrs.p2a.bytes,
exit,
)?)
})?;
self.p2ms.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.scripts.p2ms.first_index,
&indexer.vecs.scripts.p2ms.to_tx_index,
exit,
)?)
})?;
self.p2pk33.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.addrs.p2pk33.first_index,
&indexer.vecs.addrs.p2pk33.bytes,
exit,
)?)
})?;
self.p2pk65.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.addrs.p2pk65.first_index,
&indexer.vecs.addrs.p2pk65.bytes,
exit,
)?)
})?;
self.p2pkh.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.addrs.p2pkh.first_index,
&indexer.vecs.addrs.p2pkh.bytes,
exit,
)?)
})?;
self.p2sh.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.addrs.p2sh.first_index,
&indexer.vecs.addrs.p2sh.bytes,
exit,
)?)
})?;
self.p2tr.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.addrs.p2tr.first_index,
&indexer.vecs.addrs.p2tr.bytes,
exit,
)?)
})?;
self.p2wpkh.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.addrs.p2wpkh.first_index,
&indexer.vecs.addrs.p2wpkh.bytes,
exit,
)?)
})?;
self.p2wsh.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.addrs.p2wsh.first_index,
&indexer.vecs.addrs.p2wsh.bytes,
exit,
)?)
})?;
// addr_output_count = sum of the 8 address-type per-block counts.
// Lives here (not in addr/) because every consumer that asks "what
// fraction of address outputs are X" needs it as the denominator.
self.addr_output_count.block.compute_sum_of_others(
starting_indexes.height,
&[
&self.p2pk65.block,
&self.p2pk33.block,
&self.p2pkh.block,
&self.p2sh.block,
&self.p2wpkh.block,
&self.p2wsh.block,
&self.p2tr.block,
&self.p2a.block,
],
exit,
)?;
self.addr_output_count
.compute_rest(starting_indexes.height, exit)?;
self.op_return.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.scripts.op_return.first_index,
&indexer.vecs.scripts.op_return.to_tx_index,
exit,
)?)
})?;
self.unknown_output
.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.scripts.unknown.first_index,
&indexer.vecs.scripts.unknown.to_tx_index,
exit,
)?)
})?;
self.empty_output
.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.scripts.empty.first_index,
&indexer.vecs.scripts.empty.to_tx_index,
exit,
)?)
})?;
Ok(())
}
}

View File

@@ -1,121 +0,0 @@
use brk_error::Result;
use brk_types::Version;
use vecdb::Database;
use super::Vecs;
use crate::{
indexes,
internal::{PerBlockCumulativeRolling, WindowStartVec, Windows},
};
impl Vecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
let p2a = PerBlockCumulativeRolling::forced_import(
db,
"p2a_count",
version,
indexes,
cached_starts,
)?;
let p2ms = PerBlockCumulativeRolling::forced_import(
db,
"p2ms_count",
version,
indexes,
cached_starts,
)?;
let p2pk33 = PerBlockCumulativeRolling::forced_import(
db,
"p2pk33_count",
version,
indexes,
cached_starts,
)?;
let p2pk65 = PerBlockCumulativeRolling::forced_import(
db,
"p2pk65_count",
version,
indexes,
cached_starts,
)?;
let p2pkh = PerBlockCumulativeRolling::forced_import(
db,
"p2pkh_count",
version,
indexes,
cached_starts,
)?;
let p2sh = PerBlockCumulativeRolling::forced_import(
db,
"p2sh_count",
version,
indexes,
cached_starts,
)?;
let p2tr = PerBlockCumulativeRolling::forced_import(
db,
"p2tr_count",
version,
indexes,
cached_starts,
)?;
let p2wpkh = PerBlockCumulativeRolling::forced_import(
db,
"p2wpkh_count",
version,
indexes,
cached_starts,
)?;
let p2wsh = PerBlockCumulativeRolling::forced_import(
db,
"p2wsh_count",
version,
indexes,
cached_starts,
)?;
Ok(Self {
p2a,
p2ms,
p2pk33,
p2pk65,
p2pkh,
p2sh,
p2tr,
p2wpkh,
p2wsh,
addr_output_count: PerBlockCumulativeRolling::forced_import(
db,
"addr_output_count",
version,
indexes,
cached_starts,
)?,
op_return: PerBlockCumulativeRolling::forced_import(
db,
"op_return_count",
version,
indexes,
cached_starts,
)?,
empty_output: PerBlockCumulativeRolling::forced_import(
db,
"empty_output_count",
version,
indexes,
cached_starts,
)?,
unknown_output: PerBlockCumulativeRolling::forced_import(
db,
"unknown_output_count",
version,
indexes,
cached_starts,
)?,
})
}
}

View File

@@ -1,24 +0,0 @@
use brk_traversable::Traversable;
use brk_types::StoredU64;
use vecdb::{Rw, StorageMode};
use crate::internal::PerBlockCumulativeRolling;
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
pub p2a: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub p2ms: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub p2pk33: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub p2pk65: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub p2pkh: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub p2sh: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub p2tr: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub p2wpkh: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub p2wsh: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
/// Sum of the 8 address-type per-block counts. Useful as a denominator
/// for any "fraction of address outputs that …" metric.
pub addr_output_count: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub op_return: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub empty_output: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub unknown_output: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
}

View File

@@ -1,31 +0,0 @@
use std::path::Path;
use brk_error::Result;
use brk_types::Version;
use crate::{
indexes,
internal::db_utils::{finalize_db, open_db},
};
use super::{CountVecs, ValueVecs, Vecs};
use crate::internal::{WindowStartVec, Windows};
impl Vecs {
pub(crate) fn forced_import(
parent_path: &Path,
parent_version: Version,
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
let db = open_db(parent_path, super::DB_NAME, 1_000_000)?;
let version = parent_version;
let count = CountVecs::forced_import(&db, version, indexes, cached_starts)?;
let value = ValueVecs::forced_import(&db, version, indexes)?;
let this = Self { db, count, value };
finalize_db(&this.db, &this)?;
Ok(this)
}
}

View File

@@ -1,22 +0,0 @@
pub mod count;
pub mod value;
mod compute;
mod import;
use brk_traversable::Traversable;
use vecdb::{Database, Rw, StorageMode};
pub use count::Vecs as CountVecs;
pub use value::Vecs as ValueVecs;
pub const DB_NAME: &str = "scripts";
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
#[traversable(skip)]
pub(crate) db: Database,
pub count: CountVecs<M>,
pub value: ValueVecs<M>,
}

View File

@@ -3,12 +3,12 @@ use brk_types::{Indexes, Sats};
use vecdb::{Exit, VecIndex};
use super::Vecs;
use crate::{mining, prices, scripts};
use crate::{mining, outputs, prices};
impl Vecs {
pub(crate) fn compute(
&mut self,
scripts: &scripts::Vecs,
outputs: &outputs::Vecs,
mining: &mining::Vecs,
prices: &prices::Vecs,
starting_indexes: &Indexes,
@@ -18,7 +18,7 @@ impl Vecs {
.compute_with(starting_indexes.height, prices, exit, |sats| {
Ok(sats.compute_transform2(
starting_indexes.height,
&scripts.value.op_return.block.sats,
&outputs.value.op_return.block.sats,
&mining.rewards.unclaimed.block.sats,
|(h, op_return, unclaimed, ..)| {
let genesis = if h.to_usize() == 0 {

View File

@@ -6,13 +6,13 @@ use vecdb::Exit;
const INITIAL_SUBSIDY: f64 = Sats::ONE_BTC_U64 as f64 * 50.0;
use super::Vecs;
use crate::{blocks, distribution, mining, prices, scripts, transactions};
use crate::{blocks, distribution, mining, outputs, prices, transactions};
impl Vecs {
#[allow(clippy::too_many_arguments)]
pub(crate) fn compute(
&mut self,
scripts: &scripts::Vecs,
outputs: &outputs::Vecs,
blocks: &blocks::Vecs,
mining: &mining::Vecs,
transactions: &transactions::Vecs,
@@ -25,7 +25,7 @@ impl Vecs {
// 1. Compute burned/unspendable supply
self.burned
.compute(scripts, mining, prices, starting_indexes, exit)?;
.compute(outputs, mining, prices, starting_indexes, exit)?;
// 2. Compute inflation rate: (supply[h] / supply[1y_ago]) - 1
// Skip when lookback supply <= first block (50 BTC = 5B sats),

View File

@@ -3,8 +3,8 @@ use brk_indexer::Indexer;
use brk_types::Indexes;
use vecdb::Exit;
use super::{Vecs, type_counts::compute_type_percents};
use crate::{blocks, indexes, inputs, outputs, prices};
use super::Vecs;
use crate::{blocks, indexes, inputs, prices};
impl Vecs {
#[allow(clippy::too_many_arguments)]
@@ -14,14 +14,13 @@ impl Vecs {
indexes: &indexes::Vecs,
blocks: &blocks::Vecs,
inputs: &inputs::Vecs,
outputs: &outputs::Vecs,
prices: &prices::Vecs,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
self.db.sync_bg_tasks()?;
let (r1, (r2, (r3, (r4, r5)))) = rayon::join(
let (r1, (r2, r3)) = rayon::join(
|| {
self.count
.compute(indexer, &blocks.lookback, starting_indexes, exit)
@@ -29,56 +28,13 @@ impl Vecs {
|| {
rayon::join(
|| self.versions.compute(indexer, starting_indexes, exit),
|| {
rayon::join(
|| self.size.compute(indexer, indexes, starting_indexes, exit),
|| {
rayon::join(
|| {
self.input_types
.compute(indexer, starting_indexes, exit)
},
|| {
self.output_types
.compute(indexer, starting_indexes, exit)
},
)
},
)
},
|| self.size.compute(indexer, indexes, starting_indexes, exit),
)
},
);
r1?;
r2?;
r3?;
r4?;
r5?;
let count_total = &self.count.total;
let (input_types, output_types) = (&mut self.input_types, &mut self.output_types);
let (r6, r7) = rayon::join(
|| {
compute_type_percents(
&input_types.by_type,
&mut input_types.percent,
count_total,
starting_indexes.height,
exit,
)
},
|| {
compute_type_percents(
&output_types.by_type,
&mut output_types.percent,
count_total,
starting_indexes.height,
exit,
)
},
);
r6?;
r7?;
self.fees.compute(
indexer,
@@ -95,8 +51,6 @@ impl Vecs {
prices,
&self.count,
&self.fees,
&inputs.count,
&outputs.count,
starting_indexes,
exit,
)?;

View File

@@ -12,9 +12,7 @@ use crate::{
},
};
use super::{
CountVecs, FeesVecs, InputTypesVecs, OutputTypesVecs, SizeVecs, Vecs, VersionsVecs, VolumeVecs,
};
use super::{CountVecs, FeesVecs, SizeVecs, Vecs, VersionsVecs, VolumeVecs};
impl Vecs {
pub(crate) fn forced_import(
@@ -32,8 +30,6 @@ impl Vecs {
let fees = FeesVecs::forced_import(&db, version, indexes)?;
let versions = VersionsVecs::forced_import(&db, version, indexes, cached_starts)?;
let volume = VolumeVecs::forced_import(&db, version, indexes, cached_starts)?;
let input_types = InputTypesVecs::forced_import(&db, version, indexes, cached_starts)?;
let output_types = OutputTypesVecs::forced_import(&db, version, indexes, cached_starts)?;
let this = Self {
db,
@@ -42,8 +38,6 @@ impl Vecs {
fees,
versions,
volume,
input_types,
output_types,
};
finalize_db(&this.db, &this)?;
Ok(this)

View File

@@ -1,68 +0,0 @@
use brk_error::{OptionData, Result};
use brk_indexer::Indexer;
use brk_types::Indexes;
use vecdb::{AnyVec, Exit, ReadableVec, VecIndex, WritableVec};
use super::{super::type_counts::compute_type_counts, Vecs};
impl Vecs {
pub(crate) fn compute(
&mut self,
indexer: &Indexer,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
let dep_version = indexer.vecs.inputs.output_type.version()
+ indexer.vecs.transactions.first_tx_index.version()
+ indexer.vecs.transactions.first_txin_index.version()
+ indexer.vecs.transactions.txid.version();
for (_, v) in self.by_type.iter_mut() {
v.block
.validate_and_truncate(dep_version, starting_indexes.height)?;
}
let skip = self.by_type.values().map(|v| v.block.len()).min().unwrap();
let first_tx_index = &indexer.vecs.transactions.first_tx_index;
let end = first_tx_index.len();
if skip >= end {
return Ok(());
}
for (_, v) in self.by_type.iter_mut() {
v.block.truncate_if_needed_at(skip)?;
}
let fi_batch = first_tx_index.collect_range_at(skip, end);
let txid_len = indexer.vecs.transactions.txid.len();
let total_txin_len = indexer.vecs.inputs.output_type.len();
let mut itype_cursor = indexer.vecs.inputs.output_type.cursor();
let mut fi_in_cursor = indexer.vecs.transactions.first_txin_index.cursor();
compute_type_counts(
&mut self.by_type,
&fi_batch,
txid_len,
true,
starting_indexes.height,
exit,
|tx_pos| {
let fi_in = fi_in_cursor.get(tx_pos).data()?.to_usize();
let next_fi_in = if tx_pos + 1 < txid_len {
fi_in_cursor.get(tx_pos + 1).data()?.to_usize()
} else {
total_txin_len
};
let mut seen: u16 = 0;
itype_cursor.advance(fi_in - itype_cursor.position());
for _ in fi_in..next_fi_in {
seen |= 1u16 << (itype_cursor.next().unwrap() as u8);
}
Ok(seen)
},
)
}
}

View File

@@ -1,39 +0,0 @@
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_types::Version;
use vecdb::Database;
use super::Vecs;
use crate::{
indexes,
internal::{PerBlockCumulativeRolling, PercentCumulativeRolling, WindowStartVec, Windows},
};
impl Vecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
Ok(Self {
by_type: ByAddrType::new_with_name(|name| {
PerBlockCumulativeRolling::forced_import(
db,
&format!("tx_count_with_{name}_in"),
version,
indexes,
cached_starts,
)
})?,
percent: ByAddrType::new_with_name(|name| {
PercentCumulativeRolling::forced_import(
db,
&format!("tx_count_with_{name}_in_rel_to_all"),
version,
indexes,
)
})?,
})
}
}

View File

@@ -1,12 +0,0 @@
use brk_cohort::ByAddrType;
use brk_traversable::Traversable;
use brk_types::{BasisPoints16, StoredU64};
use vecdb::{Rw, StorageMode};
use crate::internal::{PerBlockCumulativeRolling, PercentCumulativeRolling};
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
pub by_type: ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
pub percent: ByAddrType<PercentCumulativeRolling<BasisPoints16, M>>,
}

View File

@@ -1,12 +1,9 @@
pub mod count;
pub mod fees;
pub mod input_types;
pub mod output_types;
pub mod size;
pub mod versions;
pub mod volume;
mod type_counts;
mod compute;
mod import;
@@ -15,8 +12,6 @@ use vecdb::{Database, Rw, StorageMode};
pub use count::Vecs as CountVecs;
pub use fees::Vecs as FeesVecs;
pub use input_types::Vecs as InputTypesVecs;
pub use output_types::Vecs as OutputTypesVecs;
pub use size::Vecs as SizeVecs;
pub use versions::Vecs as VersionsVecs;
pub use volume::Vecs as VolumeVecs;
@@ -33,6 +28,4 @@ pub struct Vecs<M: StorageMode = Rw> {
pub fees: FeesVecs<M>,
pub versions: VersionsVecs<M>,
pub volume: VolumeVecs<M>,
pub input_types: InputTypesVecs<M>,
pub output_types: OutputTypesVecs<M>,
}

View File

@@ -1,68 +0,0 @@
use brk_error::{OptionData, Result};
use brk_indexer::Indexer;
use brk_types::Indexes;
use vecdb::{AnyVec, Exit, ReadableVec, VecIndex, WritableVec};
use super::{super::type_counts::compute_type_counts, Vecs};
impl Vecs {
pub(crate) fn compute(
&mut self,
indexer: &Indexer,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
let dep_version = indexer.vecs.outputs.output_type.version()
+ indexer.vecs.transactions.first_tx_index.version()
+ indexer.vecs.transactions.first_txout_index.version()
+ indexer.vecs.transactions.txid.version();
for (_, v) in self.by_type.iter_mut() {
v.block
.validate_and_truncate(dep_version, starting_indexes.height)?;
}
let skip = self.by_type.values().map(|v| v.block.len()).min().unwrap();
let first_tx_index = &indexer.vecs.transactions.first_tx_index;
let end = first_tx_index.len();
if skip >= end {
return Ok(());
}
for (_, v) in self.by_type.iter_mut() {
v.block.truncate_if_needed_at(skip)?;
}
let fi_batch = first_tx_index.collect_range_at(skip, end);
let txid_len = indexer.vecs.transactions.txid.len();
let total_txout_len = indexer.vecs.outputs.output_type.len();
let mut otype_cursor = indexer.vecs.outputs.output_type.cursor();
let mut fo_cursor = indexer.vecs.transactions.first_txout_index.cursor();
compute_type_counts(
&mut self.by_type,
&fi_batch,
txid_len,
false,
starting_indexes.height,
exit,
|tx_pos| {
let fo = fo_cursor.get(tx_pos).data()?.to_usize();
let next_fo = if tx_pos + 1 < txid_len {
fo_cursor.get(tx_pos + 1).data()?.to_usize()
} else {
total_txout_len
};
let mut seen: u16 = 0;
otype_cursor.advance(fo - otype_cursor.position());
for _ in fo..next_fo {
seen |= 1u16 << (otype_cursor.next().unwrap() as u8);
}
Ok(seen)
},
)
}
}

View File

@@ -1,39 +0,0 @@
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_types::Version;
use vecdb::Database;
use super::Vecs;
use crate::{
indexes,
internal::{PerBlockCumulativeRolling, PercentCumulativeRolling, WindowStartVec, Windows},
};
impl Vecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
Ok(Self {
by_type: ByAddrType::new_with_name(|name| {
PerBlockCumulativeRolling::forced_import(
db,
&format!("tx_count_with_{name}_out"),
version,
indexes,
cached_starts,
)
})?,
percent: ByAddrType::new_with_name(|name| {
PercentCumulativeRolling::forced_import(
db,
&format!("tx_count_with_{name}_out_rel_to_all"),
version,
indexes,
)
})?,
})
}
}

View File

@@ -1,12 +0,0 @@
use brk_cohort::ByAddrType;
use brk_traversable::Traversable;
use brk_types::{BasisPoints16, StoredU64};
use vecdb::{Rw, StorageMode};
use crate::internal::{PerBlockCumulativeRolling, PercentCumulativeRolling};
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
pub by_type: ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
pub percent: ByAddrType<PercentCumulativeRolling<BasisPoints16, M>>,
}

View File

@@ -1,91 +0,0 @@
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_types::{BasisPoints16, Height, OutputType, StoredU64, TxIndex};
use vecdb::{AnyStoredVec, Exit, VecIndex, WritableVec};
use crate::internal::{
PerBlockCumulativeRolling, PerBlockFull, PercentCumulativeRolling, RatioU64Bp16,
};
pub(super) fn compute_type_counts(
by_type: &mut ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64>>,
fi_batch: &[TxIndex],
txid_len: usize,
skip_first_tx: bool,
starting_height: Height,
exit: &Exit,
mut scan_tx: impl FnMut(usize) -> Result<u16>,
) -> Result<()> {
for (j, first_tx) in fi_batch.iter().enumerate() {
let fi = first_tx.to_usize();
let next_fi = fi_batch
.get(j + 1)
.map(|v| v.to_usize())
.unwrap_or(txid_len);
let start_tx = if skip_first_tx { fi + 1 } else { fi };
let mut counts = [0u64; 12];
for tx_pos in start_tx..next_fi {
let seen = scan_tx(tx_pos)?;
let mut bits = seen;
while bits != 0 {
let idx = bits.trailing_zeros() as usize;
counts[idx] += 1;
bits &= bits - 1;
}
}
for otype in OutputType::ADDR_TYPES {
by_type
.get_mut_unwrap(otype)
.block
.push(StoredU64::from(counts[otype as usize]));
}
if by_type.p2pkh.block.batch_limit_reached() {
let _lock = exit.lock();
for (_, v) in by_type.iter_mut() {
v.block.write()?;
}
}
}
{
let _lock = exit.lock();
for (_, v) in by_type.iter_mut() {
v.block.write()?;
}
}
for (_, v) in by_type.iter_mut() {
v.compute_rest(starting_height, exit)?;
}
Ok(())
}
pub(super) fn compute_type_percents(
by_type: &ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64>>,
percent: &mut ByAddrType<PercentCumulativeRolling<BasisPoints16>>,
count_total: &PerBlockFull<StoredU64>,
starting_height: Height,
exit: &Exit,
) -> Result<()> {
for otype in OutputType::ADDR_TYPES {
let source = by_type.get_unwrap(otype);
percent
.get_mut_unwrap(otype)
.compute_binary::<StoredU64, StoredU64, RatioU64Bp16, _, _, _, _>(
starting_height,
&source.cumulative.height,
&count_total.cumulative.height,
source.sum.as_array().map(|w| &w.height),
count_total.rolling.sum.as_array().map(|w| &w.height),
exit,
)?;
}
Ok(())
}

View File

@@ -5,9 +5,7 @@ use vecdb::Exit;
use super::Vecs;
use crate::transactions::{count, fees};
use crate::{indexes, inputs, outputs, prices};
const WINDOW_SECS: [f64; 4] = [86400.0, 7.0 * 86400.0, 30.0 * 86400.0, 365.0 * 86400.0];
use crate::{indexes, internal::Windows, prices};
impl Vecs {
#[allow(clippy::too_many_arguments)]
@@ -18,8 +16,6 @@ impl Vecs {
prices: &prices::Vecs,
count_vecs: &count::Vecs,
fees_vecs: &fees::Vecs,
inputs_count: &inputs::CountVecs,
outputs_count: &outputs::CountVecs,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
@@ -37,32 +33,14 @@ impl Vecs {
let h = starting_indexes.height;
let tx_sums = count_vecs.total.rolling.sum.0.as_array();
let input_sums = inputs_count.rolling.sum.0.as_array();
let output_sums = outputs_count.total.rolling.sum.0.as_array();
for (i, &secs) in WINDOW_SECS.iter().enumerate() {
self.tx_per_sec.as_mut_array()[i].height.compute_transform(
let tx_per_sec = self.tx_per_sec.as_mut_array();
for (i, &secs) in Windows::<()>::SECS.iter().enumerate() {
tx_per_sec[i].height.compute_transform(
h,
&tx_sums[i].height,
|(h, sum, ..)| (h, StoredF32::from(*sum as f64 / secs)),
exit,
)?;
self.inputs_per_sec.as_mut_array()[i]
.height
.compute_transform(
h,
&input_sums[i].height,
|(h, sum, ..)| (h, StoredF32::from(*sum as f64 / secs)),
exit,
)?;
self.outputs_per_sec.as_mut_array()[i]
.height
.compute_transform(
h,
&output_sums[i].height,
|(h, sum, ..)| (h, StoredF32::from(*sum as f64 / secs)),
exit,
)?;
}
Ok(())

View File

@@ -27,12 +27,6 @@ impl Vecs {
tx_per_sec: Windows::try_from_fn(|suffix| {
PerBlock::forced_import(db, &format!("tx_per_sec_{suffix}"), v, indexes)
})?,
outputs_per_sec: Windows::try_from_fn(|suffix| {
PerBlock::forced_import(db, &format!("outputs_per_sec_{suffix}"), v, indexes)
})?,
inputs_per_sec: Windows::try_from_fn(|suffix| {
PerBlock::forced_import(db, &format!("inputs_per_sec_{suffix}"), v, indexes)
})?,
})
}
}

View File

@@ -8,6 +8,4 @@ use crate::internal::{AmountPerBlockCumulativeRolling, PerBlock, Windows};
pub struct Vecs<M: StorageMode = Rw> {
pub transfer_volume: AmountPerBlockCumulativeRolling<M>,
pub tx_per_sec: Windows<PerBlock<StoredF32, M>>,
pub outputs_per_sec: Windows<PerBlock<StoredF32, M>>,
pub inputs_per_sec: Windows<PerBlock<StoredF32, M>>,
}