global: big snapshot part 2

This commit is contained in:
nym21
2026-04-13 22:47:08 +02:00
parent 765261648d
commit 283baca848
93 changed files with 3242 additions and 3067 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -1,16 +1,34 @@
use std::ops::{Add, AddAssign};
use brk_traversable::Traversable;
use brk_types::OutputType;
use rayon::prelude::*;
use super::{SpendableType, UnspendableType};
use super::{Filter, SpendableType, UnspendableType};
#[derive(Default, Clone, Debug)]
pub const OP_RETURN: &str = "op_return";
#[derive(Default, Clone, Debug, Traversable)]
pub struct ByType<T> {
#[traversable(flatten)]
pub spendable: SpendableType<T>,
#[traversable(flatten)]
pub unspendable: UnspendableType<T>,
}
impl<T> ByType<T> {
pub fn try_new<F, E>(mut create: F) -> Result<Self, E>
where
F: FnMut(Filter, &'static str) -> Result<T, E>,
{
Ok(Self {
spendable: SpendableType::try_new(&mut create)?,
unspendable: UnspendableType {
op_return: create(Filter::Type(OutputType::OpReturn), OP_RETURN)?,
},
})
}
pub fn get(&self, output_type: OutputType) -> &T {
match output_type {
OutputType::P2PK65 => &self.spendable.p2pk65,
@@ -44,6 +62,49 @@ impl<T> ByType<T> {
OutputType::OpReturn => &mut self.unspendable.op_return,
}
}
pub fn iter(&self) -> impl Iterator<Item = &T> {
self.spendable
.iter()
.chain(std::iter::once(&self.unspendable.op_return))
}
pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut T> {
self.spendable
.iter_mut()
.chain(std::iter::once(&mut self.unspendable.op_return))
}
pub fn par_iter_mut(&mut self) -> impl ParallelIterator<Item = &mut T>
where
T: Send + Sync,
{
let Self {
spendable,
unspendable,
} = self;
spendable
.par_iter_mut()
.chain([&mut unspendable.op_return].into_par_iter())
}
pub fn iter_typed(&self) -> impl Iterator<Item = (OutputType, &T)> {
self.spendable
.iter_typed()
.chain(std::iter::once((
OutputType::OpReturn,
&self.unspendable.op_return,
)))
}
pub fn iter_typed_mut(&mut self) -> impl Iterator<Item = (OutputType, &mut T)> {
self.spendable
.iter_typed_mut()
.chain(std::iter::once((
OutputType::OpReturn,
&mut self.unspendable.op_return,
)))
}
}
impl<T> Add for ByType<T>

View File

@@ -4,10 +4,10 @@ use derive_more::{Deref, DerefMut};
use crate::{
indexes,
internal::{LazyRollingDeltasFromHeight, WindowStartVec, Windows},
internal::{LazyRollingDeltasFromHeight, WindowStartVec, Windows, WithAddrTypes},
};
use super::{AddrCountsVecs, WithAddrTypes};
use super::AddrCountsVecs;
type AddrDelta = LazyRollingDeltasFromHeight<StoredU64, StoredI64, BasisPointsSigned32>;

View File

@@ -5,9 +5,8 @@ use derive_more::{Deref, DerefMut};
use vecdb::{Database, Rw, StorageMode};
use crate::{
distribution::addr::WithAddrTypes,
indexes,
internal::PerBlock,
internal::{PerBlock, WithAddrTypes},
};
/// Exposed address count (`all` + per-type) for a single variant (funded or total).

View File

@@ -45,7 +45,7 @@ use brk_types::{Indexes, Version};
use rayon::prelude::*;
use vecdb::{AnyStoredVec, Database, Exit, Rw, StorageMode};
use crate::indexes;
use crate::{indexes, prices};
/// Top-level container for all exposed address tracking: counts (funded +
/// total) plus the funded supply.
@@ -87,9 +87,15 @@ impl ExposedAddrVecs {
Ok(())
}
pub(crate) fn compute_rest(&mut self, starting_indexes: &Indexes, exit: &Exit) -> Result<()> {
pub(crate) fn compute_rest(
&mut self,
starting_indexes: &Indexes,
prices: &prices::Vecs,
exit: &Exit,
) -> Result<()> {
self.count.compute_rest(starting_indexes, exit)?;
self.supply.compute_rest(starting_indexes, exit)?;
self.supply
.compute_rest(starting_indexes.height, prices, exit)?;
Ok(())
}
}

View File

@@ -1,9 +1,9 @@
use brk_cohort::ByAddrType;
use brk_types::{Height, Sats};
use brk_types::Height;
use derive_more::{Deref, DerefMut};
use vecdb::ReadableVec;
use crate::internal::PerBlock;
use crate::internal::AmountPerBlock;
use super::vecs::ExposedAddrSupplyVecs;
@@ -23,8 +23,8 @@ impl From<(&ExposedAddrSupplyVecs, Height)> for AddrTypeToExposedAddrSupply {
#[inline]
fn from((vecs, starting_height): (&ExposedAddrSupplyVecs, Height)) -> Self {
if let Some(prev_height) = starting_height.decremented() {
let read = |v: &PerBlock<Sats>| -> u64 {
u64::from(v.height.collect_one(prev_height).unwrap())
let read = |v: &AmountPerBlock| -> u64 {
u64::from(v.sats.height.collect_one(prev_height).unwrap())
};
Self(ByAddrType {
p2pk65: read(&vecs.by_addr_type.p2pk65),

View File

@@ -1,20 +1,21 @@
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Sats, Version};
use brk_types::Version;
use derive_more::{Deref, DerefMut};
use vecdb::{Database, Rw, StorageMode};
use crate::{
distribution::addr::WithAddrTypes,
indexes,
internal::PerBlock,
internal::{AmountPerBlock, WithAddrTypes},
};
/// Exposed address supply (sats) — `all` + per-address-type. Tracks the total
/// balance held by addresses currently in the funded exposed set.
/// Exposed address supply (sats/btc/cents/usd) — `all` + per-address-type.
/// Tracks the total balance held by addresses currently in the funded
/// exposed set. Sats are pushed stateful per block; cents/usd are derived
/// post-hoc from sats × spot price.
#[derive(Deref, DerefMut, Traversable)]
pub struct ExposedAddrSupplyVecs<M: StorageMode = Rw>(
#[traversable(flatten)] pub WithAddrTypes<PerBlock<Sats, M>>,
#[traversable(flatten)] pub WithAddrTypes<AmountPerBlock<M>>,
);
impl ExposedAddrSupplyVecs {
@@ -23,7 +24,7 @@ impl ExposedAddrSupplyVecs {
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
Ok(Self(WithAddrTypes::<PerBlock<Sats>>::forced_import(
Ok(Self(WithAddrTypes::<AmountPerBlock>::forced_import(
db,
"exposed_addr_supply",
version,

View File

@@ -8,7 +8,6 @@ mod new_addr_count;
mod reused;
mod total_addr_count;
mod type_map;
mod with_addr_types;
pub use activity::{AddrActivityVecs, AddrTypeToActivityCounts};
pub use addr_count::{AddrCountsVecs, AddrTypeToAddrCount};
@@ -24,4 +23,3 @@ pub use reused::{
};
pub use total_addr_count::TotalAddrCountVecs;
pub use type_map::{AddrTypeToTypeIndexMap, AddrTypeToVec, HeightToAddrTypeToVec};
pub use with_addr_types::WithAddrTypes;

View File

@@ -6,10 +6,10 @@ use vecdb::{Database, Exit, Rw, StorageMode};
use crate::{
indexes,
internal::{PerBlockCumulativeRolling, WindowStartVec, Windows},
internal::{PerBlockCumulativeRolling, WindowStartVec, Windows, WithAddrTypes},
};
use super::{TotalAddrCountVecs, WithAddrTypes};
use super::TotalAddrCountVecs;
/// New address count per block (global + per-type).
#[derive(Deref, DerefMut, Traversable)]

View File

@@ -5,9 +5,8 @@ use derive_more::{Deref, DerefMut};
use vecdb::{Database, Rw, StorageMode};
use crate::{
distribution::addr::WithAddrTypes,
indexes,
internal::PerBlock,
internal::{PerBlock, WithAddrTypes},
};
/// Reused address count (`all` + per-type) for a single variant (funded or total).

View File

@@ -10,7 +10,7 @@
//! an aggregated `all`.
//! - [`uses`] — per-block count of outputs going to addresses that were
//! already reused, plus the derived percent over total address-output
//! count (denominator from `scripts::count`).
//! count (denominator from `outputs::by_type`).
mod count;
mod uses;
@@ -27,7 +27,7 @@ use vecdb::{AnyStoredVec, Database, Exit, Rw, StorageMode};
use crate::{
indexes,
internal::{WindowStartVec, Windows},
scripts,
outputs,
};
/// Top-level container for all reused address tracking: counts (funded +
@@ -74,12 +74,12 @@ impl ReusedAddrVecs {
pub(crate) fn compute_rest(
&mut self,
starting_indexes: &Indexes,
scripts_count: &scripts::CountVecs,
outputs_by_type: &outputs::ByTypeVecs,
exit: &Exit,
) -> Result<()> {
self.count.compute_rest(starting_indexes, exit)?;
self.uses
.compute_rest(starting_indexes, scripts_count, exit)?;
.compute_rest(starting_indexes, outputs_by_type, exit)?;
Ok(())
}
}

View File

@@ -1,17 +1,17 @@
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{BasisPoints16, Height, Indexes, OutputType, StoredU64, Version};
use brk_types::{BasisPoints16, Indexes, OutputType, StoredU64, Version};
use rayon::prelude::*;
use vecdb::{AnyStoredVec, Database, Exit, Rw, StorageMode};
use crate::{
distribution::addr::WithAddrTypes,
indexes,
internal::{
PerBlockCumulativeRolling, PercentCumulativeRolling, RatioU64Bp16, WindowStartVec, Windows,
PerBlockCumulativeRolling, PercentCumulativeRolling, WindowStartVec, Windows,
WithAddrTypes,
},
scripts,
outputs,
};
use super::state::AddrTypeToReusedAddrUseCount;
@@ -22,8 +22,9 @@ use super::state::AddrTypeToReusedAddrUseCount;
/// that were *already* reused at the moment of the use, so the use that
/// makes an address reused is not itself counted.
///
/// The denominator for the percent (total address-output count) lives in
/// `scripts::count` and is reused here rather than duplicated.
/// The denominator for the percent (per-type and aggregate address-output
/// counts) is read from `outputs::ByTypeVecs::output_count` rather than
/// duplicated here.
#[derive(Traversable)]
pub struct ReusedAddrUsesVecs<M: StorageMode = Rw> {
pub reused_addr_use_count:
@@ -87,26 +88,25 @@ impl ReusedAddrUsesVecs {
pub(crate) fn compute_rest(
&mut self,
starting_indexes: &Indexes,
scripts_count: &scripts::CountVecs,
outputs_by_type: &outputs::ByTypeVecs,
exit: &Exit,
) -> Result<()> {
self.reused_addr_use_count
.compute_rest(starting_indexes.height, exit)?;
compute_one_percent(
&mut self.reused_addr_use_percent.all,
self.reused_addr_use_percent.all.compute_count_ratio(
&self.reused_addr_use_count.all,
&scripts_count.addr_output_count,
&outputs_by_type.output_count.all,
starting_indexes.height,
exit,
)?;
for otype in OutputType::ADDR_TYPES {
compute_one_percent(
self.reused_addr_use_percent
.by_addr_type
.get_mut_unwrap(otype),
.get_mut_unwrap(otype)
.compute_count_ratio(
self.reused_addr_use_count.by_addr_type.get_unwrap(otype),
denom_for_type(scripts_count, otype),
outputs_by_type.output_count.by_type.get(otype),
starting_indexes.height,
exit,
)?;
@@ -114,39 +114,3 @@ impl ReusedAddrUsesVecs {
Ok(())
}
}
#[inline]
fn compute_one_percent(
percent: &mut PercentCumulativeRolling<BasisPoints16>,
reused: &PerBlockCumulativeRolling<StoredU64, StoredU64>,
denom: &PerBlockCumulativeRolling<StoredU64, StoredU64>,
starting_height: Height,
exit: &Exit,
) -> Result<()> {
percent.compute_binary::<StoredU64, StoredU64, RatioU64Bp16, _, _, _, _>(
starting_height,
&reused.cumulative.height,
&denom.cumulative.height,
reused.sum.as_array().map(|w| &w.height),
denom.sum.as_array().map(|w| &w.height),
exit,
)
}
#[inline]
fn denom_for_type(
scripts_count: &scripts::CountVecs,
otype: OutputType,
) -> &PerBlockCumulativeRolling<StoredU64, StoredU64> {
match otype {
OutputType::P2PK33 => &scripts_count.p2pk33,
OutputType::P2PK65 => &scripts_count.p2pk65,
OutputType::P2PKH => &scripts_count.p2pkh,
OutputType::P2SH => &scripts_count.p2sh,
OutputType::P2WPKH => &scripts_count.p2wpkh,
OutputType::P2WSH => &scripts_count.p2wsh,
OutputType::P2TR => &scripts_count.p2tr,
OutputType::P2A => &scripts_count.p2a,
_ => unreachable!("OutputType::ADDR_TYPES contains only address types"),
}
}

View File

@@ -4,9 +4,12 @@ use brk_types::{Height, StoredU64, Version};
use derive_more::{Deref, DerefMut};
use vecdb::{Database, Exit, Rw, StorageMode};
use crate::{indexes, internal::PerBlock};
use crate::{
indexes,
internal::{PerBlock, WithAddrTypes},
};
use super::{AddrCountsVecs, WithAddrTypes};
use super::AddrCountsVecs;
/// Total address count (global + per-type) with all derived indexes.
#[derive(Deref, DerefMut, Traversable)]

View File

@@ -27,7 +27,7 @@ use crate::{
PerBlockCumulativeRolling, WindowStartVec, Windows,
db_utils::{finalize_db, open_db},
},
outputs, prices, scripts, transactions,
outputs, prices, transactions,
};
use super::{
@@ -235,7 +235,6 @@ impl Vecs {
indexes: &indexes::Vecs,
inputs: &inputs::Vecs,
outputs: &outputs::Vecs,
scripts: &scripts::Vecs,
transactions: &transactions::Vecs,
blocks: &blocks::Vecs,
prices: &prices::Vecs,
@@ -473,8 +472,10 @@ impl Vecs {
self.addrs.empty.compute_rest(starting_indexes, exit)?;
self.addrs
.reused
.compute_rest(starting_indexes, &scripts.count, exit)?;
self.addrs.exposed.compute_rest(starting_indexes, exit)?;
.compute_rest(starting_indexes, &outputs.by_type, exit)?;
self.addrs
.exposed
.compute_rest(starting_indexes, prices, exit)?;
// 6c. Compute total_addr_count = addr_count + empty_addr_count
self.addrs.total.compute(

View File

@@ -3,15 +3,11 @@ use brk_indexer::Indexer;
use brk_types::{Indexes, StoredU64};
use vecdb::{AnyVec, Exit, ReadableVec, VecIndex, WritableVec};
use super::Vecs;
use crate::internal::{
PerBlockFull, compute_by_addr_type_block_counts, compute_by_addr_type_tx_percents,
};
use super::{Vecs, WithInputTypes};
use crate::internal::{CoinbasePolicy, PerBlockCumulativeRolling, walk_blocks};
impl Vecs {
/// Phase 1: walk inputs and populate `input_count` + `tx_count`.
/// Independent of transactions, can run alongside other inputs work.
pub(crate) fn compute_counts(
pub(crate) fn compute(
&mut self,
indexer: &Indexer,
starting_indexes: &Indexes,
@@ -22,35 +18,21 @@ impl Vecs {
+ indexer.vecs.transactions.first_txin_index.version()
+ indexer.vecs.transactions.txid.version();
for (_, v) in self.input_count.iter_mut() {
v.block
self.input_count
.validate_and_truncate(dep_version, starting_indexes.height)?;
}
for (_, v) in self.tx_count.iter_mut() {
v.block
self.tx_count
.validate_and_truncate(dep_version, starting_indexes.height)?;
}
let skip = self
.input_count
.values()
.map(|v| v.block.len())
.min()
.unwrap()
.min(self.tx_count.values().map(|v| v.block.len()).min().unwrap());
.min_stateful_len()
.min(self.tx_count.min_stateful_len());
let first_tx_index = &indexer.vecs.transactions.first_tx_index;
let end = first_tx_index.len();
if skip >= end {
return Ok(());
}
for (_, v) in self.input_count.iter_mut() {
v.block.truncate_if_needed_at(skip)?;
}
for (_, v) in self.tx_count.iter_mut() {
v.block.truncate_if_needed_at(skip)?;
}
if skip < end {
self.input_count.truncate_if_needed_at(skip)?;
self.tx_count.truncate_if_needed_at(skip)?;
let fi_batch = first_tx_index.collect_range_at(skip, end);
let txid_len = indexer.vecs.transactions.txid.len();
@@ -59,14 +41,10 @@ impl Vecs {
let mut itype_cursor = indexer.vecs.inputs.output_type.cursor();
let mut fi_in_cursor = indexer.vecs.transactions.first_txin_index.cursor();
compute_by_addr_type_block_counts(
&mut self.input_count,
&mut self.tx_count,
walk_blocks(
&fi_batch,
txid_len,
true, // skip coinbase (1 fake input)
starting_indexes.height,
exit,
CoinbasePolicy::Skip,
|tx_pos, per_tx| {
let fi_in = fi_in_cursor.get(tx_pos).data()?.to_usize();
let next_fi_in = if tx_pos + 1 < txid_len {
@@ -82,23 +60,51 @@ impl Vecs {
}
Ok(())
},
)
|agg| {
push_block(&mut self.input_count, agg.entries_all, &agg.entries_per_type);
push_block(&mut self.tx_count, agg.txs_all, &agg.txs_per_type);
if self.input_count.all.block.batch_limit_reached() {
let _lock = exit.lock();
self.input_count.write()?;
self.tx_count.write()?;
}
Ok(())
},
)?;
{
let _lock = exit.lock();
self.input_count.write()?;
self.tx_count.write()?;
}
/// Phase 2: derive `tx_percent` from `tx_count` and the total tx count.
/// Must run after `transactions::Vecs::compute`.
pub(crate) fn compute_percents(
&mut self,
transactions_count_total: &PerBlockFull<StoredU64>,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
compute_by_addr_type_tx_percents(
&self.tx_count,
&mut self.tx_percent,
transactions_count_total,
starting_indexes,
self.input_count
.compute_rest(starting_indexes.height, exit)?;
self.tx_count
.compute_rest(starting_indexes.height, exit)?;
}
for (otype, source) in self.tx_count.by_type.iter_typed() {
self.tx_percent.get_mut(otype).compute_count_ratio(
source,
&self.tx_count.all,
starting_indexes.height,
exit,
)
)?;
}
Ok(())
}
}
#[inline]
fn push_block(
metric: &mut WithInputTypes<PerBlockCumulativeRolling<StoredU64, StoredU64>>,
total: u64,
per_type: &[u64; 12],
) {
metric.all.block.push(StoredU64::from(total));
for (otype, vec) in metric.by_type.iter_typed_mut() {
vec.block.push(StoredU64::from(per_type[otype as usize]));
}
}

View File

@@ -1,12 +1,14 @@
use brk_cohort::ByAddrType;
use brk_cohort::SpendableType;
use brk_error::Result;
use brk_types::Version;
use brk_types::{StoredU64, Version};
use vecdb::Database;
use super::Vecs;
use super::{Vecs, WithInputTypes};
use crate::{
indexes,
internal::{PerBlockCumulativeRolling, PercentCumulativeRolling, WindowStartVec, Windows},
internal::{
PerBlockCumulativeRolling, PercentCumulativeRolling, WindowStartVec, Windows,
},
};
impl Vecs {
@@ -16,33 +18,39 @@ impl Vecs {
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
Ok(Self {
input_count: ByAddrType::new_with_name(|name| {
PerBlockCumulativeRolling::forced_import(
let input_count = WithInputTypes::<
PerBlockCumulativeRolling<StoredU64, StoredU64>,
>::forced_import_with(
db,
&format!("{name}_input_count"),
"input_count_bis",
|t| format!("{t}_prevout_count"),
version,
indexes,
cached_starts,
)
})?,
tx_count: ByAddrType::new_with_name(|name| {
PerBlockCumulativeRolling::forced_import(
)?;
let tx_count = WithInputTypes::<
PerBlockCumulativeRolling<StoredU64, StoredU64>,
>::forced_import_with(
db,
&format!("tx_count_with_{name}_in"),
"non_coinbase_tx_count",
|t| format!("tx_count_with_{t}_prevout"),
version,
indexes,
cached_starts,
)
})?,
tx_percent: ByAddrType::new_with_name(|name| {
)?;
let tx_percent = SpendableType::try_new(|_, name| {
PercentCumulativeRolling::forced_import(
db,
&format!("tx_count_with_{name}_in_rel_to_all"),
&format!("tx_percent_with_{name}_prevout"),
version,
indexes,
)
})?,
})?;
Ok(Self {
input_count,
tx_count,
tx_percent,
})
}
}

View File

@@ -1,5 +1,7 @@
mod compute;
mod import;
mod vecs;
mod with_input_types;
pub use vecs::Vecs;
pub(crate) use with_input_types::WithInputTypes;

View File

@@ -1,18 +1,14 @@
use brk_cohort::ByAddrType;
use brk_cohort::SpendableType;
use brk_traversable::Traversable;
use brk_types::{BasisPoints16, StoredU64};
use vecdb::{Rw, StorageMode};
use super::WithInputTypes;
use crate::internal::{PerBlockCumulativeRolling, PercentCumulativeRolling};
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
/// Per-block, per-type total input count (granular). The "type" is the
/// type of the spent output that the input consumes.
pub input_count: ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
/// Per-block, per-type count of TXs containing at least one input that
/// spends an output of this type.
pub tx_count: ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
/// Per-type tx_count as a percent of total tx count.
pub tx_percent: ByAddrType<PercentCumulativeRolling<BasisPoints16, M>>,
pub input_count: WithInputTypes<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
pub tx_count: WithInputTypes<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
pub tx_percent: SpendableType<PercentCumulativeRolling<BasisPoints16, M>>,
}

View File

@@ -0,0 +1,93 @@
//! Generic `all` + per-input-type container (11 spendable types — no
//! op_return since op_return outputs are non-spendable). Used by
//! `inputs/by_type/`. Mirrors `WithAddrTypes` and `WithOutputTypes`.
use brk_cohort::SpendableType;
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Height, Version};
use schemars::JsonSchema;
use vecdb::{AnyStoredVec, AnyVec, Database, Exit, WritableVec};
use crate::{
indexes,
internal::{NumericValue, PerBlockCumulativeRolling, WindowStartVec, Windows},
};
/// `all` aggregate plus per-input-type breakdown across the 11 spendable
/// output types (everything except op_return). The "type" of an input is
/// the type of the previous output it spends.
#[derive(Clone, Traversable)]
pub struct WithInputTypes<T> {
pub all: T,
#[traversable(flatten)]
pub by_type: SpendableType<T>,
}
impl<T, C> WithInputTypes<PerBlockCumulativeRolling<T, C>>
where
T: NumericValue + JsonSchema + Into<C>,
C: NumericValue + JsonSchema,
{
pub(crate) fn forced_import_with(
db: &Database,
all_name: &str,
per_type_name: impl Fn(&str) -> String,
version: Version,
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
let make = |name: &str| {
PerBlockCumulativeRolling::forced_import(db, name, version, indexes, cached_starts)
};
Ok(Self {
all: make(all_name)?,
by_type: SpendableType::try_new(|_, name| make(&per_type_name(name)))?,
})
}
pub(crate) fn min_stateful_len(&self) -> usize {
self.by_type
.iter()
.map(|v| v.block.len())
.min()
.unwrap()
.min(self.all.block.len())
}
pub(crate) fn write(&mut self) -> Result<()> {
self.all.block.write()?;
for v in self.by_type.iter_mut() {
v.block.write()?;
}
Ok(())
}
pub(crate) fn validate_and_truncate(
&mut self,
dep_version: Version,
at_height: Height,
) -> Result<()> {
self.all.block.validate_and_truncate(dep_version, at_height)?;
for v in self.by_type.iter_mut() {
v.block.validate_and_truncate(dep_version, at_height)?;
}
Ok(())
}
pub(crate) fn truncate_if_needed_at(&mut self, len: usize) -> Result<()> {
self.all.block.truncate_if_needed_at(len)?;
for v in self.by_type.iter_mut() {
v.block.truncate_if_needed_at(len)?;
}
Ok(())
}
pub(crate) fn compute_rest(&mut self, max_from: Height, exit: &Exit) -> Result<()> {
self.all.compute_rest(max_from, exit)?;
for v in self.by_type.iter_mut() {
v.compute_rest(max_from, exit)?;
}
Ok(())
}
}

View File

@@ -20,6 +20,9 @@ impl Vecs {
self.spent.compute(indexer, starting_indexes, exit)?;
self.count
.compute(indexer, indexes, blocks, starting_indexes, exit)?;
self.per_sec
.compute(&self.count, starting_indexes, exit)?;
self.by_type.compute(indexer, starting_indexes, exit)?;
let exit = exit.clone();
self.db.run_bg(move |db| {

View File

@@ -11,7 +11,7 @@ use crate::{
},
};
use super::{CountVecs, SpentVecs, Vecs};
use super::{ByTypeVecs, CountVecs, PerSecVecs, SpentVecs, Vecs};
impl Vecs {
pub(crate) fn forced_import(
@@ -25,8 +25,16 @@ impl Vecs {
let spent = SpentVecs::forced_import(&db, version)?;
let count = CountVecs::forced_import(&db, version, indexes, cached_starts)?;
let per_sec = PerSecVecs::forced_import(&db, version, indexes)?;
let by_type = ByTypeVecs::forced_import(&db, version, indexes, cached_starts)?;
let this = Self { db, spent, count };
let this = Self {
db,
spent,
count,
per_sec,
by_type,
};
finalize_db(&this.db, &this)?;
Ok(this)
}

View File

@@ -1,4 +1,6 @@
pub mod by_type;
pub mod count;
pub mod per_sec;
pub mod spent;
mod compute;
@@ -7,7 +9,9 @@ mod import;
use brk_traversable::Traversable;
use vecdb::{Database, Rw, StorageMode};
pub use by_type::Vecs as ByTypeVecs;
pub use count::Vecs as CountVecs;
pub use per_sec::Vecs as PerSecVecs;
pub use spent::Vecs as SpentVecs;
pub const DB_NAME: &str = "inputs";
@@ -19,4 +23,6 @@ pub struct Vecs<M: StorageMode = Rw> {
pub spent: SpentVecs<M>,
pub count: CountVecs<M>,
pub per_sec: PerSecVecs<M>,
pub by_type: ByTypeVecs<M>,
}

View File

@@ -0,0 +1,28 @@
use brk_error::Result;
use brk_types::{Indexes, StoredF32};
use vecdb::Exit;
use super::Vecs;
use crate::{inputs::CountVecs, internal::Windows};
impl Vecs {
pub(crate) fn compute(
&mut self,
count: &CountVecs,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
let h = starting_indexes.height;
let sums = count.rolling.sum.0.as_array();
let per_sec = self.0.as_mut_array();
for (i, &secs) in Windows::<()>::SECS.iter().enumerate() {
per_sec[i].height.compute_transform(
h,
&sums[i].height,
|(h, sum, ..)| (h, StoredF32::from(*sum as f64 / secs)),
exit,
)?;
}
Ok(())
}
}

View File

@@ -0,0 +1,21 @@
use brk_error::Result;
use brk_types::Version;
use vecdb::Database;
use super::Vecs;
use crate::{
indexes,
internal::{PerBlock, Windows},
};
impl Vecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
Ok(Self(Windows::try_from_fn(|suffix| {
PerBlock::forced_import(db, &format!("inputs_per_sec_{suffix}"), version, indexes)
})?))
}
}

View File

@@ -0,0 +1,8 @@
use brk_traversable::Traversable;
use brk_types::StoredF32;
use vecdb::{Rw, StorageMode};
use crate::internal::{PerBlock, Windows};
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw>(#[traversable(flatten)] pub Windows<PerBlock<StoredF32, M>>);

View File

@@ -0,0 +1,84 @@
//! Shared per-block-per-type cursor walker used by `outputs/by_type/` and
//! `inputs/by_type/`. The walker iterates blocks and aggregates the
//! per-tx output-type counts; pushing into a particular wrapper is left
//! to the caller.
use brk_error::Result;
use brk_types::TxIndex;
use vecdb::VecIndex;
/// Aggregated per-block counters produced by [`walk_blocks`].
pub(crate) struct BlockAggregate {
pub entries_all: u64,
pub entries_per_type: [u64; 12],
pub txs_all: u64,
pub txs_per_type: [u64; 12],
}
/// Whether to include the coinbase tx (first tx in each block) in the walk.
#[derive(Clone, Copy)]
pub(crate) enum CoinbasePolicy {
Include,
Skip,
}
/// Walk every block in `fi_batch`, calling `scan_tx` once per tx (which
/// fills a `[u32; 12]` with the per-output-type count for that tx),
/// aggregating into a [`BlockAggregate`] and handing it to `store`.
///
/// `entries_all` and `txs_all` aggregate over the 12 output types
/// indistinguishably; downstream consumers can cap to the 11 spendable
/// types if op_return is non-applicable.
#[inline]
pub(crate) fn walk_blocks(
fi_batch: &[TxIndex],
txid_len: usize,
coinbase: CoinbasePolicy,
mut scan_tx: impl FnMut(usize, &mut [u32; 12]) -> Result<()>,
mut store: impl FnMut(BlockAggregate) -> Result<()>,
) -> Result<()> {
for (j, first_tx) in fi_batch.iter().enumerate() {
let fi = first_tx.to_usize();
let next_fi = fi_batch
.get(j + 1)
.map(|v| v.to_usize())
.unwrap_or(txid_len);
let start_tx = match coinbase {
CoinbasePolicy::Include => fi,
CoinbasePolicy::Skip => fi + 1,
};
let mut entries_per_type = [0u64; 12];
let mut txs_per_type = [0u64; 12];
let mut entries_all = 0u64;
let mut txs_all = 0u64;
for tx_pos in start_tx..next_fi {
let mut per_tx = [0u32; 12];
scan_tx(tx_pos, &mut per_tx)?;
let mut tx_has_any = false;
for (i, &n) in per_tx.iter().enumerate() {
if n > 0 {
entries_per_type[i] += u64::from(n);
txs_per_type[i] += 1;
entries_all += u64::from(n);
tx_has_any = true;
}
}
if tx_has_any {
txs_all += 1;
}
}
store(BlockAggregate {
entries_all,
entries_per_type,
txs_all,
txs_per_type,
})?;
}
Ok(())
}

View File

@@ -1,125 +0,0 @@
//! Shared per-block per-address-type counters.
//!
//! Used by `outputs/by_type/` (counts outputs per type) and `inputs/by_type/`
//! (counts inputs per type). Walks each block's tx range, calls a scanner
//! callback that fills a `[u32; 12]` per-tx counter, and produces two
//! per-block aggregates in a single pass:
//!
//! - `entry_count` — total number of items (outputs / inputs) per type
//! - `tx_count` — number of txs that contain at least one item of each type
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_types::{BasisPoints16, Height, Indexes, OutputType, StoredU64, TxIndex};
use vecdb::{AnyStoredVec, Exit, VecIndex, WritableVec};
use crate::internal::{
PerBlockCumulativeRolling, PerBlockFull, PercentCumulativeRolling, RatioU64Bp16,
};
/// Per-block scan that simultaneously computes:
/// - `entry_count[type] += per_tx[type]` (sum of items)
/// - `tx_count[type] += 1 if per_tx[type] > 0` (presence flag)
///
/// `scan_tx` is called once per tx with a zeroed `[u32; 12]` buffer that
/// it must fill with the per-type item count for that tx.
#[allow(clippy::too_many_arguments)]
pub(crate) fn compute_by_addr_type_block_counts(
entry_count: &mut ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64>>,
tx_count: &mut ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64>>,
fi_batch: &[TxIndex],
txid_len: usize,
skip_first_tx: bool,
starting_height: Height,
exit: &Exit,
mut scan_tx: impl FnMut(usize, &mut [u32; 12]) -> Result<()>,
) -> Result<()> {
for (j, first_tx) in fi_batch.iter().enumerate() {
let fi = first_tx.to_usize();
let next_fi = fi_batch
.get(j + 1)
.map(|v| v.to_usize())
.unwrap_or(txid_len);
let start_tx = if skip_first_tx { fi + 1 } else { fi };
let mut entries_per_block = [0u64; 12];
let mut txs_per_block = [0u64; 12];
for tx_pos in start_tx..next_fi {
let mut per_tx = [0u32; 12];
scan_tx(tx_pos, &mut per_tx)?;
for (i, &n) in per_tx.iter().enumerate() {
if n > 0 {
entries_per_block[i] += u64::from(n);
txs_per_block[i] += 1;
}
}
}
for otype in OutputType::ADDR_TYPES {
let idx = otype as usize;
entry_count
.get_mut_unwrap(otype)
.block
.push(StoredU64::from(entries_per_block[idx]));
tx_count
.get_mut_unwrap(otype)
.block
.push(StoredU64::from(txs_per_block[idx]));
}
if entry_count.p2pkh.block.batch_limit_reached() {
let _lock = exit.lock();
for (_, v) in entry_count.iter_mut() {
v.block.write()?;
}
for (_, v) in tx_count.iter_mut() {
v.block.write()?;
}
}
}
{
let _lock = exit.lock();
for (_, v) in entry_count.iter_mut() {
v.block.write()?;
}
for (_, v) in tx_count.iter_mut() {
v.block.write()?;
}
}
for (_, v) in entry_count.iter_mut() {
v.compute_rest(starting_height, exit)?;
}
for (_, v) in tx_count.iter_mut() {
v.compute_rest(starting_height, exit)?;
}
Ok(())
}
/// Compute per-type tx-count percent over total tx count, for all 8 address types.
pub(crate) fn compute_by_addr_type_tx_percents(
tx_count: &ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64>>,
tx_percent: &mut ByAddrType<PercentCumulativeRolling<BasisPoints16>>,
count_total: &PerBlockFull<StoredU64>,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
for otype in OutputType::ADDR_TYPES {
let source = tx_count.get_unwrap(otype);
tx_percent
.get_mut_unwrap(otype)
.compute_binary::<StoredU64, StoredU64, RatioU64Bp16, _, _, _, _>(
starting_indexes.height,
&source.cumulative.height,
&count_total.cumulative.height,
source.sum.as_array().map(|w| &w.height),
count_total.rolling.sum.as_array().map(|w| &w.height),
exit,
)?;
}
Ok(())
}

View File

@@ -11,6 +11,12 @@ pub struct Windows<A> {
impl<A> Windows<A> {
pub const SUFFIXES: [&'static str; 4] = ["24h", "1w", "1m", "1y"];
pub const DAYS: [usize; 4] = [1, 7, 30, 365];
pub const SECS: [f64; 4] = [
Self::DAYS[0] as f64 * 86400.0,
Self::DAYS[1] as f64 * 86400.0,
Self::DAYS[2] as f64 * 86400.0,
Self::DAYS[3] as f64 * 86400.0,
];
pub fn try_from_fn<E>(
mut f: impl FnMut(&str) -> std::result::Result<A, E>,

View File

@@ -1,6 +1,6 @@
pub(crate) mod algo;
mod amount;
mod by_type_counts;
mod block_walker;
mod cache_budget;
mod containers;
pub(crate) mod db_utils;
@@ -9,9 +9,10 @@ mod per_block;
mod per_tx;
mod traits;
mod transform;
mod with_addr_types;
pub(crate) use amount::*;
pub(crate) use by_type_counts::*;
pub(crate) use block_walker::*;
pub(crate) use cache_budget::*;
pub(crate) use containers::*;
pub(crate) use indexes::*;
@@ -19,3 +20,4 @@ pub(crate) use per_block::*;
pub(crate) use per_tx::*;
pub(crate) use traits::*;
pub use transform::*;
pub(crate) use with_addr_types::*;

View File

@@ -5,16 +5,17 @@
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Height, Version};
use brk_types::{BasisPoints16, Height, StoredU64, Version};
use vecdb::{BinaryTransform, Database, Exit, ReadableVec, Rw, StorageMode, VecValue};
use crate::{
indexes,
internal::{BpsType, PercentPerBlock, PercentRollingWindows},
internal::{BpsType, PerBlockCumulativeRolling, PercentPerBlock, PercentRollingWindows, RatioU64Bp16},
};
#[derive(Traversable)]
pub struct PercentCumulativeRolling<B: BpsType, M: StorageMode = Rw> {
#[traversable(flatten)]
pub cumulative: PercentPerBlock<B, M>,
#[traversable(flatten)]
pub rolling: PercentRollingWindows<B, M>,
@@ -26,26 +27,6 @@ impl<B: BpsType> PercentCumulativeRolling<B> {
name: &str,
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
let cumulative =
PercentPerBlock::forced_import(db, &format!("{name}_cumulative"), version, indexes)?;
let rolling =
PercentRollingWindows::forced_import(db, &format!("{name}_sum"), version, indexes)?;
Ok(Self {
cumulative,
rolling,
})
}
/// Alternate constructor that uses the same base name for both the
/// cumulative `PercentPerBlock` and the `PercentRollingWindows`, relying on
/// the window suffix to disambiguate. Useful for preserving legacy disk
/// names where the two variants historically shared a prefix.
pub(crate) fn forced_import_flat(
db: &Database,
name: &str,
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
let cumulative = PercentPerBlock::forced_import(db, name, version, indexes)?;
let rolling = PercentRollingWindows::forced_import(db, name, version, indexes)?;
@@ -89,3 +70,26 @@ impl<B: BpsType> PercentCumulativeRolling<B> {
Ok(())
}
}
impl PercentCumulativeRolling<BasisPoints16> {
/// Derive a percent from two `PerBlockCumulativeRolling<StoredU64>`
/// sources (numerator and denominator). Both sources must already have
/// their cumulative and rolling sums computed.
#[inline]
pub(crate) fn compute_count_ratio(
&mut self,
numerator: &PerBlockCumulativeRolling<StoredU64, StoredU64>,
denominator: &PerBlockCumulativeRolling<StoredU64, StoredU64>,
starting_height: Height,
exit: &Exit,
) -> Result<()> {
self.compute_binary::<StoredU64, StoredU64, RatioU64Bp16, _, _, _, _>(
starting_height,
&numerator.cumulative.height,
&denominator.cumulative.height,
numerator.sum.as_array().map(|w| &w.height),
denominator.sum.as_array().map(|w| &w.height),
exit,
)
}
}

View File

@@ -0,0 +1,38 @@
use brk_traversable::Traversable;
use brk_types::Version;
use vecdb::UnaryTransform;
use crate::internal::{
BpsType, LazyPercentPerBlock, LazyPercentRollingWindows, PercentCumulativeRolling,
};
/// Fully lazy variant of `PercentCumulativeRolling` — no stored vecs.
///
/// Mirrors the flat shape of `PercentCumulativeRolling`: cumulative and
/// rolling window fields are both flattened to the same tree level, so
/// consumers see `{ bps, percent, ratio, _24h, _1w, _1m, _1y }`.
#[derive(Clone, Traversable)]
pub struct LazyPercentCumulativeRolling<B: BpsType> {
#[traversable(flatten)]
pub cumulative: LazyPercentPerBlock<B>,
#[traversable(flatten)]
pub rolling: LazyPercentRollingWindows<B>,
}
impl<B: BpsType> LazyPercentCumulativeRolling<B> {
/// Derive from a stored `PercentCumulativeRolling` source via a
/// BPS-to-BPS unary transform applied to both cumulative and rolling.
pub(crate) fn from_source<F: UnaryTransform<B, B>>(
name: &str,
version: Version,
source: &PercentCumulativeRolling<B>,
) -> Self {
let cumulative =
LazyPercentPerBlock::from_percent::<F>(name, version, &source.cumulative);
let rolling = LazyPercentRollingWindows::from_rolling::<F>(name, version, &source.rolling);
Self {
cumulative,
rolling,
}
}
}

View File

@@ -1,6 +1,7 @@
mod base;
mod cumulative_rolling;
mod lazy;
mod lazy_cumulative_rolling;
mod lazy_windows;
mod vec;
mod windows;
@@ -8,6 +9,7 @@ mod windows;
pub use base::*;
pub use cumulative_rolling::*;
pub use lazy::*;
pub use lazy_cumulative_rolling::*;
pub use lazy_windows::*;
pub use vec::*;
pub use windows::*;

View File

@@ -5,14 +5,15 @@
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Height, Indexes, Version};
use brk_types::{Height, Indexes, Sats, Version};
use rayon::prelude::*;
use schemars::JsonSchema;
use vecdb::{AnyStoredVec, AnyVec, Database, EagerVec, Exit, PcoVec, WritableVec};
use crate::{
indexes,
internal::{NumericValue, PerBlock, PerBlockCumulativeRolling, WindowStartVec, Windows},
use crate::{indexes, prices};
use super::{
AmountPerBlock, NumericValue, PerBlock, PerBlockCumulativeRolling, WindowStartVec, Windows,
};
/// `all` aggregate plus per-`AddrType` breakdown.
@@ -171,3 +172,75 @@ where
Ok(())
}
}
impl WithAddrTypes<AmountPerBlock> {
pub(crate) fn forced_import(
db: &Database,
name: &str,
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
let all = AmountPerBlock::forced_import(db, name, version, indexes)?;
let by_addr_type = ByAddrType::new_with_name(|type_name| {
AmountPerBlock::forced_import(db, &format!("{type_name}_{name}"), version, indexes)
})?;
Ok(Self { all, by_addr_type })
}
pub(crate) fn min_stateful_len(&self) -> usize {
self.by_addr_type
.values()
.map(|v| v.sats.height.len())
.min()
.unwrap()
.min(self.all.sats.height.len())
}
pub(crate) fn par_iter_height_mut(
&mut self,
) -> impl ParallelIterator<Item = &mut dyn AnyStoredVec> {
rayon::iter::once(&mut self.all.sats.height as &mut dyn AnyStoredVec).chain(
self.by_addr_type
.par_values_mut()
.map(|v| &mut v.sats.height as &mut dyn AnyStoredVec),
)
}
pub(crate) fn reset_height(&mut self) -> Result<()> {
self.all.sats.height.reset()?;
self.all.cents.height.reset()?;
for v in self.by_addr_type.values_mut() {
v.sats.height.reset()?;
v.cents.height.reset()?;
}
Ok(())
}
/// Push the stateful sats value for `all` and each per-type. Cents are
/// derived post-hoc from sats × price in [`Self::compute_rest`].
#[inline(always)]
pub(crate) fn push_height<U>(&mut self, total: U, per_type: impl IntoIterator<Item = U>)
where
U: Into<Sats>,
{
self.all.sats.height.push(total.into());
for (v, value) in self.by_addr_type.values_mut().zip(per_type) {
v.sats.height.push(value.into());
}
}
/// Derive cents (and thus lazy btc/usd) for `all` and every per-type vec
/// from the stateful sats values × spot price.
pub(crate) fn compute_rest(
&mut self,
max_from: Height,
prices: &prices::Vecs,
exit: &Exit,
) -> Result<()> {
self.all.compute(prices, max_from, exit)?;
for v in self.by_addr_type.values_mut() {
v.compute(prices, max_from, exit)?;
}
Ok(())
}
}

View File

@@ -23,7 +23,6 @@ mod mining;
mod outputs;
mod pools;
pub mod prices;
mod scripts;
mod supply;
mod transactions;
@@ -32,7 +31,6 @@ pub struct Computer<M: StorageMode = Rw> {
pub blocks: Box<blocks::Vecs<M>>,
pub mining: Box<mining::Vecs<M>>,
pub transactions: Box<transactions::Vecs<M>>,
pub scripts: Box<scripts::Vecs<M>>,
pub cointime: Box<cointime::Vecs<M>>,
pub constants: Box<constants::Vecs>,
pub indexes: Box<indexes::Vecs<M>>,
@@ -89,8 +87,8 @@ impl Computer {
let cached_starts = blocks.lookback.cached_window_starts();
let (inputs, outputs, mining, transactions, scripts, pools, cointime) = timed(
"Imported inputs/outputs/mining/tx/scripts/pools/cointime",
let (inputs, outputs, mining, transactions, pools, cointime) = timed(
"Imported inputs/outputs/mining/tx/pools/cointime",
|| {
thread::scope(|s| -> Result<_> {
let inputs_handle = big_thread().spawn_scoped(s, || -> Result<_> {
@@ -130,15 +128,6 @@ impl Computer {
)?))
})?;
let scripts_handle = big_thread().spawn_scoped(s, || -> Result<_> {
Ok(Box::new(scripts::Vecs::forced_import(
&computed_path,
VERSION,
&indexes,
&cached_starts,
)?))
})?;
let pools_handle = big_thread().spawn_scoped(s, || -> Result<_> {
Ok(Box::new(pools::Vecs::forced_import(
&computed_path,
@@ -159,18 +148,9 @@ impl Computer {
let outputs = outputs_handle.join().unwrap()?;
let mining = mining_handle.join().unwrap()?;
let transactions = transactions_handle.join().unwrap()?;
let scripts = scripts_handle.join().unwrap()?;
let pools = pools_handle.join().unwrap()?;
Ok((
inputs,
outputs,
mining,
transactions,
scripts,
pools,
cointime,
))
Ok((inputs, outputs, mining, transactions, pools, cointime))
})
},
)?;
@@ -235,7 +215,6 @@ impl Computer {
blocks,
mining,
transactions,
scripts,
constants,
indicators,
investing,
@@ -261,7 +240,6 @@ impl Computer {
blocks::DB_NAME,
mining::DB_NAME,
transactions::DB_NAME,
scripts::DB_NAME,
cointime::DB_NAME,
indicators::DB_NAME,
indexes::DB_NAME,
@@ -340,6 +318,8 @@ impl Computer {
inputs_result?;
prices_result?;
// market, outputs, and (transactions → mining) are pairwise
// independent. Run all three in parallel.
let market = scope.spawn(|| {
timed("Computed market", || {
self.market.compute(
@@ -352,36 +332,18 @@ impl Computer {
})
});
timed("Computed scripts", || {
self.scripts
.compute(indexer, &self.prices, &starting_indexes, exit)
})?;
timed("Computed outputs", || {
self.outputs.compute(
indexer,
&self.indexes,
&self.inputs,
&self.scripts,
&self.blocks,
&starting_indexes,
exit,
)
})?;
let tx_mining = scope.spawn(|| -> Result<()> {
timed("Computed transactions", || {
self.transactions.compute(
indexer,
&self.indexes,
&self.blocks,
&self.inputs,
&self.outputs,
&self.prices,
&starting_indexes,
exit,
)
})?;
timed("Computed mining", || {
self.mining.compute(
indexer,
@@ -392,8 +354,22 @@ impl Computer {
&starting_indexes,
exit,
)
})
});
timed("Computed outputs", || {
self.outputs.compute(
indexer,
&self.indexes,
&self.inputs,
&self.blocks,
&self.prices,
&starting_indexes,
exit,
)
})?;
tx_mining.join().unwrap()?;
market.join().unwrap()?;
Ok(())
})?;
@@ -433,7 +409,6 @@ impl Computer {
&self.indexes,
&self.inputs,
&self.outputs,
&self.scripts,
&self.transactions,
&self.blocks,
&self.prices,
@@ -465,7 +440,7 @@ impl Computer {
timed("Computed supply", || {
self.supply.compute(
&self.scripts,
&self.outputs,
&self.blocks,
&self.mining,
&self.transactions,
@@ -535,7 +510,6 @@ impl_iter_named!(
blocks,
mining,
transactions,
scripts,
cointime,
constants,
indicators,

View File

@@ -125,14 +125,6 @@ impl Vecs {
exit,
)?;
self.subsidy_dominance
.compute_binary::<Sats, Sats, RatioSatsBp16>(
starting_indexes.height,
&self.subsidy.cumulative.sats.height,
&self.coinbase.cumulative.sats.height,
exit,
)?;
self.fee_to_subsidy_ratio
.compute_binary::<Dollars, Dollars, RatioDollarsBp32, _, _>(
starting_indexes.height,

View File

@@ -7,8 +7,8 @@ use crate::{
indexes,
internal::{
AmountPerBlockCumulative, AmountPerBlockCumulativeRolling, AmountPerBlockFull,
LazyPercentRollingWindows, OneMinusBp16, PercentCumulativeRolling, PercentPerBlock,
RatioRollingWindows, WindowStartVec, Windows,
LazyPercentCumulativeRolling, OneMinusBp16, PercentCumulativeRolling, RatioRollingWindows,
WindowStartVec, Windows,
},
};
@@ -20,12 +20,12 @@ impl Vecs {
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
let fee_dominance =
PercentCumulativeRolling::forced_import_flat(db, "fee_dominance", version, indexes)?;
PercentCumulativeRolling::forced_import(db, "fee_dominance", version, indexes)?;
let subsidy_dominance_rolling = LazyPercentRollingWindows::from_rolling::<OneMinusBp16>(
let subsidy_dominance = LazyPercentCumulativeRolling::from_source::<OneMinusBp16>(
"subsidy_dominance",
version,
&fee_dominance.rolling,
&fee_dominance,
);
Ok(Self {
@@ -52,13 +52,7 @@ impl Vecs {
indexes,
)?,
fee_dominance,
subsidy_dominance: PercentPerBlock::forced_import(
db,
"subsidy_dominance",
version,
indexes,
)?,
subsidy_dominance_rolling,
subsidy_dominance,
fee_to_subsidy_ratio: RatioRollingWindows::forced_import(
db,
"fee_to_subsidy_ratio",

View File

@@ -4,7 +4,7 @@ use vecdb::{EagerVec, PcoVec, Rw, StorageMode};
use crate::internal::{
AmountPerBlockCumulative, AmountPerBlockCumulativeRolling, AmountPerBlockFull,
LazyPercentRollingWindows, PercentCumulativeRolling, PercentPerBlock, RatioRollingWindows,
LazyPercentCumulativeRolling, PercentCumulativeRolling, RatioRollingWindows,
};
#[derive(Traversable)]
@@ -17,9 +17,7 @@ pub struct Vecs<M: StorageMode = Rw> {
#[traversable(wrap = "fees", rename = "dominance")]
pub fee_dominance: PercentCumulativeRolling<BasisPoints16, M>,
#[traversable(wrap = "subsidy", rename = "dominance")]
pub subsidy_dominance: PercentPerBlock<BasisPoints16, M>,
#[traversable(wrap = "subsidy", rename = "dominance")]
pub subsidy_dominance_rolling: LazyPercentRollingWindows<BasisPoints16>,
pub subsidy_dominance: LazyPercentCumulativeRolling<BasisPoints16>,
#[traversable(wrap = "fees", rename = "to_subsidy_ratio")]
pub fee_to_subsidy_ratio: RatioRollingWindows<BasisPoints32, M>,
}

View File

@@ -3,15 +3,11 @@ use brk_indexer::Indexer;
use brk_types::{Indexes, StoredU64};
use vecdb::{AnyVec, Exit, ReadableVec, VecIndex, WritableVec};
use super::Vecs;
use crate::internal::{
PerBlockFull, compute_by_addr_type_block_counts, compute_by_addr_type_tx_percents,
};
use super::{Vecs, WithOutputTypes};
use crate::internal::{CoinbasePolicy, PerBlockCumulativeRolling, walk_blocks};
impl Vecs {
/// Phase 1: walk outputs and populate `output_count` + `tx_count`.
/// Independent of transactions, can run alongside other outputs work.
pub(crate) fn compute_counts(
pub(crate) fn compute(
&mut self,
indexer: &Indexer,
starting_indexes: &Indexes,
@@ -22,35 +18,21 @@ impl Vecs {
+ indexer.vecs.transactions.first_txout_index.version()
+ indexer.vecs.transactions.txid.version();
for (_, v) in self.output_count.iter_mut() {
v.block
self.output_count
.validate_and_truncate(dep_version, starting_indexes.height)?;
}
for (_, v) in self.tx_count.iter_mut() {
v.block
self.tx_count
.validate_and_truncate(dep_version, starting_indexes.height)?;
}
let skip = self
.output_count
.values()
.map(|v| v.block.len())
.min()
.unwrap()
.min(self.tx_count.values().map(|v| v.block.len()).min().unwrap());
.min_stateful_len()
.min(self.tx_count.min_stateful_len());
let first_tx_index = &indexer.vecs.transactions.first_tx_index;
let end = first_tx_index.len();
if skip >= end {
return Ok(());
}
for (_, v) in self.output_count.iter_mut() {
v.block.truncate_if_needed_at(skip)?;
}
for (_, v) in self.tx_count.iter_mut() {
v.block.truncate_if_needed_at(skip)?;
}
if skip < end {
self.output_count.truncate_if_needed_at(skip)?;
self.tx_count.truncate_if_needed_at(skip)?;
let fi_batch = first_tx_index.collect_range_at(skip, end);
let txid_len = indexer.vecs.transactions.txid.len();
@@ -59,14 +41,10 @@ impl Vecs {
let mut otype_cursor = indexer.vecs.outputs.output_type.cursor();
let mut fo_cursor = indexer.vecs.transactions.first_txout_index.cursor();
compute_by_addr_type_block_counts(
&mut self.output_count,
&mut self.tx_count,
walk_blocks(
&fi_batch,
txid_len,
false,
starting_indexes.height,
exit,
CoinbasePolicy::Include,
|tx_pos, per_tx| {
let fo = fo_cursor.get(tx_pos).data()?.to_usize();
let next_fo = if tx_pos + 1 < txid_len {
@@ -82,23 +60,51 @@ impl Vecs {
}
Ok(())
},
)
|agg| {
push_block(&mut self.output_count, agg.entries_all, &agg.entries_per_type);
push_block(&mut self.tx_count, agg.txs_all, &agg.txs_per_type);
if self.output_count.all.block.batch_limit_reached() {
let _lock = exit.lock();
self.output_count.write()?;
self.tx_count.write()?;
}
Ok(())
},
)?;
{
let _lock = exit.lock();
self.output_count.write()?;
self.tx_count.write()?;
}
/// Phase 2: derive `tx_percent` from `tx_count` and the total tx count.
/// Must run after `transactions::Vecs::compute` (depends on tx count totals).
pub(crate) fn compute_percents(
&mut self,
transactions_count_total: &PerBlockFull<StoredU64>,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
compute_by_addr_type_tx_percents(
&self.tx_count,
&mut self.tx_percent,
transactions_count_total,
starting_indexes,
self.output_count
.compute_rest(starting_indexes.height, exit)?;
self.tx_count
.compute_rest(starting_indexes.height, exit)?;
}
for (otype, source) in self.tx_count.by_type.iter_typed() {
self.tx_percent.get_mut(otype).compute_count_ratio(
source,
&self.tx_count.all,
starting_indexes.height,
exit,
)
)?;
}
Ok(())
}
}
#[inline]
fn push_block(
metric: &mut WithOutputTypes<PerBlockCumulativeRolling<StoredU64, StoredU64>>,
total: u64,
per_type: &[u64; 12],
) {
metric.all.block.push(StoredU64::from(total));
for (otype, vec) in metric.by_type.iter_typed_mut() {
vec.block.push(StoredU64::from(per_type[otype as usize]));
}
}

View File

@@ -1,12 +1,14 @@
use brk_cohort::ByAddrType;
use brk_cohort::ByType;
use brk_error::Result;
use brk_types::Version;
use brk_types::{StoredU64, Version};
use vecdb::Database;
use super::Vecs;
use super::{Vecs, WithOutputTypes};
use crate::{
indexes,
internal::{PerBlockCumulativeRolling, PercentCumulativeRolling, WindowStartVec, Windows},
internal::{
PerBlockCumulativeRolling, PercentCumulativeRolling, WindowStartVec, Windows,
},
};
impl Vecs {
@@ -16,33 +18,39 @@ impl Vecs {
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
Ok(Self {
output_count: ByAddrType::new_with_name(|name| {
PerBlockCumulativeRolling::forced_import(
let output_count = WithOutputTypes::<
PerBlockCumulativeRolling<StoredU64, StoredU64>,
>::forced_import_with(
db,
&format!("{name}_output_count"),
"output_count_bis",
|t| format!("{t}_output_count"),
version,
indexes,
cached_starts,
)
})?,
tx_count: ByAddrType::new_with_name(|name| {
PerBlockCumulativeRolling::forced_import(
)?;
let tx_count = WithOutputTypes::<
PerBlockCumulativeRolling<StoredU64, StoredU64>,
>::forced_import_with(
db,
&format!("tx_count_with_{name}_out"),
"tx_count_bis",
|t| format!("tx_count_with_{t}_output"),
version,
indexes,
cached_starts,
)
})?,
tx_percent: ByAddrType::new_with_name(|name| {
)?;
let tx_percent = ByType::try_new(|_, name| {
PercentCumulativeRolling::forced_import(
db,
&format!("tx_count_with_{name}_out_rel_to_all"),
&format!("tx_percent_with_{name}_output"),
version,
indexes,
)
})?,
})?;
Ok(Self {
output_count,
tx_count,
tx_percent,
})
}
}

View File

@@ -1,5 +1,7 @@
mod compute;
mod import;
mod vecs;
mod with_output_types;
pub use vecs::Vecs;
pub(crate) use with_output_types::WithOutputTypes;

View File

@@ -1,16 +1,14 @@
use brk_cohort::ByAddrType;
use brk_cohort::ByType;
use brk_traversable::Traversable;
use brk_types::{BasisPoints16, StoredU64};
use vecdb::{Rw, StorageMode};
use super::WithOutputTypes;
use crate::internal::{PerBlockCumulativeRolling, PercentCumulativeRolling};
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
/// Per-block, per-type total output count (granular).
pub output_count: ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
/// Per-block, per-type count of TXs containing at least one output of this type.
pub tx_count: ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
/// Per-type tx_count as a percent of total tx count.
pub tx_percent: ByAddrType<PercentCumulativeRolling<BasisPoints16, M>>,
pub output_count: WithOutputTypes<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
pub tx_count: WithOutputTypes<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
pub tx_percent: ByType<PercentCumulativeRolling<BasisPoints16, M>>,
}

View File

@@ -0,0 +1,92 @@
//! Generic `all` + per-`OutputType` container (12 output types, including
//! op_return). Used by `outputs/by_type/`. Mirrors `WithAddrTypes` and
//! `WithInputTypes`.
use brk_cohort::ByType;
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Height, Version};
use schemars::JsonSchema;
use vecdb::{AnyStoredVec, AnyVec, Database, Exit, WritableVec};
use crate::{
indexes,
internal::{NumericValue, PerBlockCumulativeRolling, WindowStartVec, Windows},
};
/// `all` aggregate plus per-`OutputType` breakdown across all 12 output
/// types (spendable + op_return).
#[derive(Clone, Traversable)]
pub struct WithOutputTypes<T> {
pub all: T,
#[traversable(flatten)]
pub by_type: ByType<T>,
}
impl<T, C> WithOutputTypes<PerBlockCumulativeRolling<T, C>>
where
T: NumericValue + JsonSchema + Into<C>,
C: NumericValue + JsonSchema,
{
pub(crate) fn forced_import_with(
db: &Database,
all_name: &str,
per_type_name: impl Fn(&str) -> String,
version: Version,
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
let make = |name: &str| {
PerBlockCumulativeRolling::forced_import(db, name, version, indexes, cached_starts)
};
Ok(Self {
all: make(all_name)?,
by_type: ByType::try_new(|_, name| make(&per_type_name(name)))?,
})
}
pub(crate) fn min_stateful_len(&self) -> usize {
self.by_type
.iter()
.map(|v| v.block.len())
.min()
.unwrap()
.min(self.all.block.len())
}
pub(crate) fn write(&mut self) -> Result<()> {
self.all.block.write()?;
for v in self.by_type.iter_mut() {
v.block.write()?;
}
Ok(())
}
pub(crate) fn validate_and_truncate(
&mut self,
dep_version: Version,
at_height: Height,
) -> Result<()> {
self.all.block.validate_and_truncate(dep_version, at_height)?;
for v in self.by_type.iter_mut() {
v.block.validate_and_truncate(dep_version, at_height)?;
}
Ok(())
}
pub(crate) fn truncate_if_needed_at(&mut self, len: usize) -> Result<()> {
self.all.block.truncate_if_needed_at(len)?;
for v in self.by_type.iter_mut() {
v.block.truncate_if_needed_at(len)?;
}
Ok(())
}
pub(crate) fn compute_rest(&mut self, max_from: Height, exit: &Exit) -> Result<()> {
self.all.compute_rest(max_from, exit)?;
for v in self.by_type.iter_mut() {
v.compute_rest(max_from, exit)?;
}
Ok(())
}
}

View File

@@ -4,7 +4,7 @@ use brk_types::Indexes;
use vecdb::Exit;
use super::Vecs;
use crate::{blocks, indexes, inputs, scripts};
use crate::{blocks, indexes, inputs, prices};
impl Vecs {
#[allow(clippy::too_many_arguments)]
@@ -13,19 +13,24 @@ impl Vecs {
indexer: &Indexer,
indexes: &indexes::Vecs,
inputs: &inputs::Vecs,
scripts: &scripts::Vecs,
blocks: &blocks::Vecs,
prices: &prices::Vecs,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
self.db.sync_bg_tasks()?;
self.count.compute(
indexer,
indexes,
self.count
.compute(indexer, indexes, blocks, starting_indexes, exit)?;
self.per_sec
.compute(&self.count, starting_indexes, exit)?;
self.value
.compute(indexer, prices, starting_indexes, exit)?;
self.by_type.compute(indexer, starting_indexes, exit)?;
self.unspent.compute(
&self.count,
&inputs.count,
&scripts.count,
blocks,
&self.by_type,
starting_indexes,
exit,
)?;

View File

@@ -1,19 +1,16 @@
use brk_error::Result;
use brk_indexer::Indexer;
use brk_types::{Height, Indexes, StoredU64};
use brk_types::Indexes;
use vecdb::Exit;
use super::Vecs;
use crate::{blocks, indexes, inputs, scripts};
use crate::{blocks, indexes};
impl Vecs {
#[allow(clippy::too_many_arguments)]
pub(crate) fn compute(
&mut self,
indexer: &Indexer,
indexes: &indexes::Vecs,
inputs_count: &inputs::CountVecs,
scripts_count: &scripts::CountVecs,
blocks: &blocks::Vecs,
starting_indexes: &Indexes,
exit: &Exit,
@@ -28,41 +25,6 @@ impl Vecs {
exit,
0,
)?;
self.unspent.height.compute_transform3(
starting_indexes.height,
&self.total.cumulative.height,
&inputs_count.cumulative.height,
&scripts_count.op_return.cumulative.height,
|(h, output_count, input_count, op_return_count, ..)| {
let block_count = u64::from(h + 1_usize);
// -1 > genesis output is unspendable
let mut utxo_count =
*output_count - (*input_count - block_count) - *op_return_count - 1;
// txid dup: e3bf3d07d4b0375638d5f1db5255fe07ba2c4cb067cd81b84ee974b6585fb468
// Block 91_722 https://mempool.space/block/00000000000271a2dc26e7667f8419f2e15416dc6955e5a6c6cdf3f2574dd08e
// Block 91_880 https://mempool.space/block/00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721
//
// txid dup: d5d27987d2a3dfc724e359870c6644b40e497bdc0589a033220fe15429d88599
// Block 91_812 https://mempool.space/block/00000000000af0aed4792b1acee3d966af36cf5def14935db8de83d6f9306f2f
// Block 91_842 https://mempool.space/block/00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec
//
// Warning: Dups invalidate the previous coinbase according to
// https://chainquery.com/bitcoin-cli/gettxoutsetinfo
if h >= Height::new(91_842) {
utxo_count -= 1;
}
if h >= Height::new(91_880) {
utxo_count -= 1;
}
(h, StoredU64::from(utxo_count))
},
exit,
)?;
Ok(())
}
}

View File

@@ -5,7 +5,7 @@ use vecdb::Database;
use super::Vecs;
use crate::{
indexes,
internal::{PerBlock, PerBlockAggregated, WindowStartVec, Windows},
internal::{PerBlockAggregated, WindowStartVec, Windows},
};
impl Vecs {
@@ -23,7 +23,6 @@ impl Vecs {
indexes,
cached_starts,
)?,
unspent: PerBlock::forced_import(db, "utxo_count_bis", version, indexes)?,
})
}
}

View File

@@ -2,10 +2,9 @@ use brk_traversable::Traversable;
use brk_types::StoredU64;
use vecdb::{Rw, StorageMode};
use crate::internal::{PerBlock, PerBlockAggregated};
use crate::internal::PerBlockAggregated;
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
pub total: PerBlockAggregated<StoredU64, M>,
pub unspent: PerBlock<StoredU64, M>,
}

View File

@@ -11,7 +11,7 @@ use crate::{
},
};
use super::{ByTypeVecs, CountVecs, SpentVecs, Vecs};
use super::{ByTypeVecs, CountVecs, PerSecVecs, SpentVecs, UnspentVecs, ValueVecs, Vecs};
impl Vecs {
pub(crate) fn forced_import(
@@ -25,13 +25,19 @@ impl Vecs {
let spent = SpentVecs::forced_import(&db, version)?;
let count = CountVecs::forced_import(&db, version, indexes, cached_starts)?;
let per_sec = PerSecVecs::forced_import(&db, version, indexes)?;
let unspent = UnspentVecs::forced_import(&db, version, indexes)?;
let by_type = ByTypeVecs::forced_import(&db, version, indexes, cached_starts)?;
let value = ValueVecs::forced_import(&db, version, indexes)?;
let this = Self {
db,
spent,
count,
per_sec,
unspent,
by_type,
value,
};
finalize_db(&this.db, &this)?;
Ok(this)

View File

@@ -1,6 +1,9 @@
pub mod by_type;
pub mod count;
pub mod per_sec;
pub mod spent;
pub mod unspent;
pub mod value;
mod compute;
mod import;
@@ -10,7 +13,10 @@ use vecdb::{Database, Rw, StorageMode};
pub use by_type::Vecs as ByTypeVecs;
pub use count::Vecs as CountVecs;
pub use per_sec::Vecs as PerSecVecs;
pub use spent::Vecs as SpentVecs;
pub use unspent::Vecs as UnspentVecs;
pub use value::Vecs as ValueVecs;
pub const DB_NAME: &str = "outputs";
@@ -21,5 +27,8 @@ pub struct Vecs<M: StorageMode = Rw> {
pub spent: SpentVecs<M>,
pub count: CountVecs<M>,
pub per_sec: PerSecVecs<M>,
pub unspent: UnspentVecs<M>,
pub by_type: ByTypeVecs<M>,
pub value: ValueVecs<M>,
}

View File

@@ -0,0 +1,28 @@
use brk_error::Result;
use brk_types::{Indexes, StoredF32};
use vecdb::Exit;
use super::Vecs;
use crate::{internal::Windows, outputs::CountVecs};
impl Vecs {
pub(crate) fn compute(
&mut self,
count: &CountVecs,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
let h = starting_indexes.height;
let sums = count.total.rolling.sum.0.as_array();
let per_sec = self.0.as_mut_array();
for (i, &secs) in Windows::<()>::SECS.iter().enumerate() {
per_sec[i].height.compute_transform(
h,
&sums[i].height,
|(h, sum, ..)| (h, StoredF32::from(*sum as f64 / secs)),
exit,
)?;
}
Ok(())
}
}

View File

@@ -0,0 +1,21 @@
use brk_error::Result;
use brk_types::Version;
use vecdb::Database;
use super::Vecs;
use crate::{
indexes,
internal::{PerBlock, Windows},
};
impl Vecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
Ok(Self(Windows::try_from_fn(|suffix| {
PerBlock::forced_import(db, &format!("outputs_per_sec_{suffix}"), version, indexes)
})?))
}
}

View File

@@ -0,0 +1,8 @@
use brk_traversable::Traversable;
use brk_types::StoredF32;
use vecdb::{Rw, StorageMode};
use crate::internal::{PerBlock, Windows};
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw>(#[traversable(flatten)] pub Windows<PerBlock<StoredF32, M>>);

View File

@@ -0,0 +1,49 @@
use brk_error::Result;
use brk_types::{Height, Indexes, StoredU64};
use vecdb::Exit;
use super::Vecs;
use crate::{
inputs,
internal::PerBlockCumulativeRolling,
outputs::{ByTypeVecs, CountVecs},
};
impl Vecs {
pub(crate) fn compute(
&mut self,
count: &CountVecs,
inputs_count: &inputs::CountVecs,
by_type: &ByTypeVecs,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
let op_return: &PerBlockCumulativeRolling<StoredU64, StoredU64> =
&by_type.output_count.by_type.unspendable.op_return;
self.count.height.compute_transform3(
starting_indexes.height,
&count.total.cumulative.height,
&inputs_count.cumulative.height,
&op_return.cumulative.height,
|(h, output_count, input_count, op_return_count, ..)| {
let block_count = u64::from(h + 1_usize);
// -1 > genesis output is unspendable
let mut utxo_count =
*output_count - (*input_count - block_count) - *op_return_count - 1;
// BIP30 duplicate txid corrections
if h >= Height::new(91_842) {
utxo_count -= 1;
}
if h >= Height::new(91_880) {
utxo_count -= 1;
}
(h, StoredU64::from(utxo_count))
},
exit,
)?;
Ok(())
}
}

View File

@@ -0,0 +1,18 @@
use brk_error::Result;
use brk_types::Version;
use vecdb::Database;
use super::Vecs;
use crate::{indexes, internal::PerBlock};
impl Vecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
Ok(Self {
count: PerBlock::forced_import(db, "utxo_count_bis", version, indexes)?,
})
}
}

View File

@@ -0,0 +1,11 @@
use brk_traversable::Traversable;
use brk_types::StoredU64;
use vecdb::{Rw, StorageMode};
use crate::internal::PerBlock;
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
/// UTXO count per block: `total - inputs - op_return - genesis`.
pub count: PerBlock<StoredU64, M>,
}

View File

@@ -1,32 +0,0 @@
use brk_error::Result;
use brk_indexer::Indexer;
use brk_types::Indexes;
use vecdb::Exit;
use crate::prices;
use super::Vecs;
impl Vecs {
pub(crate) fn compute(
&mut self,
indexer: &Indexer,
prices: &prices::Vecs,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
self.db.sync_bg_tasks()?;
self.count.compute(indexer, starting_indexes, exit)?;
self.value
.compute(indexer, prices, starting_indexes, exit)?;
let exit = exit.clone();
self.db.run_bg(move |db| {
let _lock = exit.lock();
db.compact_deferred_default()
});
Ok(())
}
}

View File

@@ -1,147 +0,0 @@
use brk_error::Result;
use brk_indexer::Indexer;
use brk_types::Indexes;
use vecdb::Exit;
use super::Vecs;
impl Vecs {
pub(crate) fn compute(
&mut self,
indexer: &Indexer,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
self.p2a.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.addrs.p2a.first_index,
&indexer.vecs.addrs.p2a.bytes,
exit,
)?)
})?;
self.p2ms.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.scripts.p2ms.first_index,
&indexer.vecs.scripts.p2ms.to_tx_index,
exit,
)?)
})?;
self.p2pk33.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.addrs.p2pk33.first_index,
&indexer.vecs.addrs.p2pk33.bytes,
exit,
)?)
})?;
self.p2pk65.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.addrs.p2pk65.first_index,
&indexer.vecs.addrs.p2pk65.bytes,
exit,
)?)
})?;
self.p2pkh.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.addrs.p2pkh.first_index,
&indexer.vecs.addrs.p2pkh.bytes,
exit,
)?)
})?;
self.p2sh.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.addrs.p2sh.first_index,
&indexer.vecs.addrs.p2sh.bytes,
exit,
)?)
})?;
self.p2tr.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.addrs.p2tr.first_index,
&indexer.vecs.addrs.p2tr.bytes,
exit,
)?)
})?;
self.p2wpkh.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.addrs.p2wpkh.first_index,
&indexer.vecs.addrs.p2wpkh.bytes,
exit,
)?)
})?;
self.p2wsh.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.addrs.p2wsh.first_index,
&indexer.vecs.addrs.p2wsh.bytes,
exit,
)?)
})?;
// addr_output_count = sum of the 8 address-type per-block counts.
// Lives here (not in addr/) because every consumer that asks "what
// fraction of address outputs are X" needs it as the denominator.
self.addr_output_count.block.compute_sum_of_others(
starting_indexes.height,
&[
&self.p2pk65.block,
&self.p2pk33.block,
&self.p2pkh.block,
&self.p2sh.block,
&self.p2wpkh.block,
&self.p2wsh.block,
&self.p2tr.block,
&self.p2a.block,
],
exit,
)?;
self.addr_output_count
.compute_rest(starting_indexes.height, exit)?;
self.op_return.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.scripts.op_return.first_index,
&indexer.vecs.scripts.op_return.to_tx_index,
exit,
)?)
})?;
self.unknown_output
.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.scripts.unknown.first_index,
&indexer.vecs.scripts.unknown.to_tx_index,
exit,
)?)
})?;
self.empty_output
.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.scripts.empty.first_index,
&indexer.vecs.scripts.empty.to_tx_index,
exit,
)?)
})?;
Ok(())
}
}

View File

@@ -1,121 +0,0 @@
use brk_error::Result;
use brk_types::Version;
use vecdb::Database;
use super::Vecs;
use crate::{
indexes,
internal::{PerBlockCumulativeRolling, WindowStartVec, Windows},
};
impl Vecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
let p2a = PerBlockCumulativeRolling::forced_import(
db,
"p2a_count",
version,
indexes,
cached_starts,
)?;
let p2ms = PerBlockCumulativeRolling::forced_import(
db,
"p2ms_count",
version,
indexes,
cached_starts,
)?;
let p2pk33 = PerBlockCumulativeRolling::forced_import(
db,
"p2pk33_count",
version,
indexes,
cached_starts,
)?;
let p2pk65 = PerBlockCumulativeRolling::forced_import(
db,
"p2pk65_count",
version,
indexes,
cached_starts,
)?;
let p2pkh = PerBlockCumulativeRolling::forced_import(
db,
"p2pkh_count",
version,
indexes,
cached_starts,
)?;
let p2sh = PerBlockCumulativeRolling::forced_import(
db,
"p2sh_count",
version,
indexes,
cached_starts,
)?;
let p2tr = PerBlockCumulativeRolling::forced_import(
db,
"p2tr_count",
version,
indexes,
cached_starts,
)?;
let p2wpkh = PerBlockCumulativeRolling::forced_import(
db,
"p2wpkh_count",
version,
indexes,
cached_starts,
)?;
let p2wsh = PerBlockCumulativeRolling::forced_import(
db,
"p2wsh_count",
version,
indexes,
cached_starts,
)?;
Ok(Self {
p2a,
p2ms,
p2pk33,
p2pk65,
p2pkh,
p2sh,
p2tr,
p2wpkh,
p2wsh,
addr_output_count: PerBlockCumulativeRolling::forced_import(
db,
"addr_output_count",
version,
indexes,
cached_starts,
)?,
op_return: PerBlockCumulativeRolling::forced_import(
db,
"op_return_count",
version,
indexes,
cached_starts,
)?,
empty_output: PerBlockCumulativeRolling::forced_import(
db,
"empty_output_count",
version,
indexes,
cached_starts,
)?,
unknown_output: PerBlockCumulativeRolling::forced_import(
db,
"unknown_output_count",
version,
indexes,
cached_starts,
)?,
})
}
}

View File

@@ -1,24 +0,0 @@
use brk_traversable::Traversable;
use brk_types::StoredU64;
use vecdb::{Rw, StorageMode};
use crate::internal::PerBlockCumulativeRolling;
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
pub p2a: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub p2ms: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub p2pk33: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub p2pk65: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub p2pkh: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub p2sh: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub p2tr: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub p2wpkh: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub p2wsh: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
/// Sum of the 8 address-type per-block counts. Useful as a denominator
/// for any "fraction of address outputs that …" metric.
pub addr_output_count: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub op_return: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub empty_output: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub unknown_output: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
}

View File

@@ -1,31 +0,0 @@
use std::path::Path;
use brk_error::Result;
use brk_types::Version;
use crate::{
indexes,
internal::db_utils::{finalize_db, open_db},
};
use super::{CountVecs, ValueVecs, Vecs};
use crate::internal::{WindowStartVec, Windows};
impl Vecs {
pub(crate) fn forced_import(
parent_path: &Path,
parent_version: Version,
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
let db = open_db(parent_path, super::DB_NAME, 1_000_000)?;
let version = parent_version;
let count = CountVecs::forced_import(&db, version, indexes, cached_starts)?;
let value = ValueVecs::forced_import(&db, version, indexes)?;
let this = Self { db, count, value };
finalize_db(&this.db, &this)?;
Ok(this)
}
}

View File

@@ -1,22 +0,0 @@
pub mod count;
pub mod value;
mod compute;
mod import;
use brk_traversable::Traversable;
use vecdb::{Database, Rw, StorageMode};
pub use count::Vecs as CountVecs;
pub use value::Vecs as ValueVecs;
pub const DB_NAME: &str = "scripts";
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
#[traversable(skip)]
pub(crate) db: Database,
pub count: CountVecs<M>,
pub value: ValueVecs<M>,
}

View File

@@ -3,12 +3,12 @@ use brk_types::{Indexes, Sats};
use vecdb::{Exit, VecIndex};
use super::Vecs;
use crate::{mining, prices, scripts};
use crate::{mining, outputs, prices};
impl Vecs {
pub(crate) fn compute(
&mut self,
scripts: &scripts::Vecs,
outputs: &outputs::Vecs,
mining: &mining::Vecs,
prices: &prices::Vecs,
starting_indexes: &Indexes,
@@ -18,7 +18,7 @@ impl Vecs {
.compute_with(starting_indexes.height, prices, exit, |sats| {
Ok(sats.compute_transform2(
starting_indexes.height,
&scripts.value.op_return.block.sats,
&outputs.value.op_return.block.sats,
&mining.rewards.unclaimed.block.sats,
|(h, op_return, unclaimed, ..)| {
let genesis = if h.to_usize() == 0 {

View File

@@ -6,13 +6,13 @@ use vecdb::Exit;
const INITIAL_SUBSIDY: f64 = Sats::ONE_BTC_U64 as f64 * 50.0;
use super::Vecs;
use crate::{blocks, distribution, mining, prices, scripts, transactions};
use crate::{blocks, distribution, mining, outputs, prices, transactions};
impl Vecs {
#[allow(clippy::too_many_arguments)]
pub(crate) fn compute(
&mut self,
scripts: &scripts::Vecs,
outputs: &outputs::Vecs,
blocks: &blocks::Vecs,
mining: &mining::Vecs,
transactions: &transactions::Vecs,
@@ -25,7 +25,7 @@ impl Vecs {
// 1. Compute burned/unspendable supply
self.burned
.compute(scripts, mining, prices, starting_indexes, exit)?;
.compute(outputs, mining, prices, starting_indexes, exit)?;
// 2. Compute inflation rate: (supply[h] / supply[1y_ago]) - 1
// Skip when lookback supply <= first block (50 BTC = 5B sats),

View File

@@ -3,8 +3,8 @@ use brk_indexer::Indexer;
use brk_types::Indexes;
use vecdb::Exit;
use super::{Vecs, type_counts::compute_type_percents};
use crate::{blocks, indexes, inputs, outputs, prices};
use super::Vecs;
use crate::{blocks, indexes, inputs, prices};
impl Vecs {
#[allow(clippy::too_many_arguments)]
@@ -14,14 +14,13 @@ impl Vecs {
indexes: &indexes::Vecs,
blocks: &blocks::Vecs,
inputs: &inputs::Vecs,
outputs: &outputs::Vecs,
prices: &prices::Vecs,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
self.db.sync_bg_tasks()?;
let (r1, (r2, (r3, (r4, r5)))) = rayon::join(
let (r1, (r2, r3)) = rayon::join(
|| {
self.count
.compute(indexer, &blocks.lookback, starting_indexes, exit)
@@ -29,56 +28,13 @@ impl Vecs {
|| {
rayon::join(
|| self.versions.compute(indexer, starting_indexes, exit),
|| {
rayon::join(
|| self.size.compute(indexer, indexes, starting_indexes, exit),
|| {
rayon::join(
|| {
self.input_types
.compute(indexer, starting_indexes, exit)
},
|| {
self.output_types
.compute(indexer, starting_indexes, exit)
},
)
},
)
},
)
},
);
r1?;
r2?;
r3?;
r4?;
r5?;
let count_total = &self.count.total;
let (input_types, output_types) = (&mut self.input_types, &mut self.output_types);
let (r6, r7) = rayon::join(
|| {
compute_type_percents(
&input_types.by_type,
&mut input_types.percent,
count_total,
starting_indexes.height,
exit,
)
},
|| {
compute_type_percents(
&output_types.by_type,
&mut output_types.percent,
count_total,
starting_indexes.height,
exit,
)
},
);
r6?;
r7?;
self.fees.compute(
indexer,
@@ -95,8 +51,6 @@ impl Vecs {
prices,
&self.count,
&self.fees,
&inputs.count,
&outputs.count,
starting_indexes,
exit,
)?;

View File

@@ -12,9 +12,7 @@ use crate::{
},
};
use super::{
CountVecs, FeesVecs, InputTypesVecs, OutputTypesVecs, SizeVecs, Vecs, VersionsVecs, VolumeVecs,
};
use super::{CountVecs, FeesVecs, SizeVecs, Vecs, VersionsVecs, VolumeVecs};
impl Vecs {
pub(crate) fn forced_import(
@@ -32,8 +30,6 @@ impl Vecs {
let fees = FeesVecs::forced_import(&db, version, indexes)?;
let versions = VersionsVecs::forced_import(&db, version, indexes, cached_starts)?;
let volume = VolumeVecs::forced_import(&db, version, indexes, cached_starts)?;
let input_types = InputTypesVecs::forced_import(&db, version, indexes, cached_starts)?;
let output_types = OutputTypesVecs::forced_import(&db, version, indexes, cached_starts)?;
let this = Self {
db,
@@ -42,8 +38,6 @@ impl Vecs {
fees,
versions,
volume,
input_types,
output_types,
};
finalize_db(&this.db, &this)?;
Ok(this)

View File

@@ -1,68 +0,0 @@
use brk_error::{OptionData, Result};
use brk_indexer::Indexer;
use brk_types::Indexes;
use vecdb::{AnyVec, Exit, ReadableVec, VecIndex, WritableVec};
use super::{super::type_counts::compute_type_counts, Vecs};
impl Vecs {
pub(crate) fn compute(
&mut self,
indexer: &Indexer,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
let dep_version = indexer.vecs.inputs.output_type.version()
+ indexer.vecs.transactions.first_tx_index.version()
+ indexer.vecs.transactions.first_txin_index.version()
+ indexer.vecs.transactions.txid.version();
for (_, v) in self.by_type.iter_mut() {
v.block
.validate_and_truncate(dep_version, starting_indexes.height)?;
}
let skip = self.by_type.values().map(|v| v.block.len()).min().unwrap();
let first_tx_index = &indexer.vecs.transactions.first_tx_index;
let end = first_tx_index.len();
if skip >= end {
return Ok(());
}
for (_, v) in self.by_type.iter_mut() {
v.block.truncate_if_needed_at(skip)?;
}
let fi_batch = first_tx_index.collect_range_at(skip, end);
let txid_len = indexer.vecs.transactions.txid.len();
let total_txin_len = indexer.vecs.inputs.output_type.len();
let mut itype_cursor = indexer.vecs.inputs.output_type.cursor();
let mut fi_in_cursor = indexer.vecs.transactions.first_txin_index.cursor();
compute_type_counts(
&mut self.by_type,
&fi_batch,
txid_len,
true,
starting_indexes.height,
exit,
|tx_pos| {
let fi_in = fi_in_cursor.get(tx_pos).data()?.to_usize();
let next_fi_in = if tx_pos + 1 < txid_len {
fi_in_cursor.get(tx_pos + 1).data()?.to_usize()
} else {
total_txin_len
};
let mut seen: u16 = 0;
itype_cursor.advance(fi_in - itype_cursor.position());
for _ in fi_in..next_fi_in {
seen |= 1u16 << (itype_cursor.next().unwrap() as u8);
}
Ok(seen)
},
)
}
}

View File

@@ -1,39 +0,0 @@
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_types::Version;
use vecdb::Database;
use super::Vecs;
use crate::{
indexes,
internal::{PerBlockCumulativeRolling, PercentCumulativeRolling, WindowStartVec, Windows},
};
impl Vecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
Ok(Self {
by_type: ByAddrType::new_with_name(|name| {
PerBlockCumulativeRolling::forced_import(
db,
&format!("tx_count_with_{name}_in"),
version,
indexes,
cached_starts,
)
})?,
percent: ByAddrType::new_with_name(|name| {
PercentCumulativeRolling::forced_import(
db,
&format!("tx_count_with_{name}_in_rel_to_all"),
version,
indexes,
)
})?,
})
}
}

View File

@@ -1,12 +0,0 @@
use brk_cohort::ByAddrType;
use brk_traversable::Traversable;
use brk_types::{BasisPoints16, StoredU64};
use vecdb::{Rw, StorageMode};
use crate::internal::{PerBlockCumulativeRolling, PercentCumulativeRolling};
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
pub by_type: ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
pub percent: ByAddrType<PercentCumulativeRolling<BasisPoints16, M>>,
}

View File

@@ -1,12 +1,9 @@
pub mod count;
pub mod fees;
pub mod input_types;
pub mod output_types;
pub mod size;
pub mod versions;
pub mod volume;
mod type_counts;
mod compute;
mod import;
@@ -15,8 +12,6 @@ use vecdb::{Database, Rw, StorageMode};
pub use count::Vecs as CountVecs;
pub use fees::Vecs as FeesVecs;
pub use input_types::Vecs as InputTypesVecs;
pub use output_types::Vecs as OutputTypesVecs;
pub use size::Vecs as SizeVecs;
pub use versions::Vecs as VersionsVecs;
pub use volume::Vecs as VolumeVecs;
@@ -33,6 +28,4 @@ pub struct Vecs<M: StorageMode = Rw> {
pub fees: FeesVecs<M>,
pub versions: VersionsVecs<M>,
pub volume: VolumeVecs<M>,
pub input_types: InputTypesVecs<M>,
pub output_types: OutputTypesVecs<M>,
}

View File

@@ -1,68 +0,0 @@
use brk_error::{OptionData, Result};
use brk_indexer::Indexer;
use brk_types::Indexes;
use vecdb::{AnyVec, Exit, ReadableVec, VecIndex, WritableVec};
use super::{super::type_counts::compute_type_counts, Vecs};
impl Vecs {
pub(crate) fn compute(
&mut self,
indexer: &Indexer,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
let dep_version = indexer.vecs.outputs.output_type.version()
+ indexer.vecs.transactions.first_tx_index.version()
+ indexer.vecs.transactions.first_txout_index.version()
+ indexer.vecs.transactions.txid.version();
for (_, v) in self.by_type.iter_mut() {
v.block
.validate_and_truncate(dep_version, starting_indexes.height)?;
}
let skip = self.by_type.values().map(|v| v.block.len()).min().unwrap();
let first_tx_index = &indexer.vecs.transactions.first_tx_index;
let end = first_tx_index.len();
if skip >= end {
return Ok(());
}
for (_, v) in self.by_type.iter_mut() {
v.block.truncate_if_needed_at(skip)?;
}
let fi_batch = first_tx_index.collect_range_at(skip, end);
let txid_len = indexer.vecs.transactions.txid.len();
let total_txout_len = indexer.vecs.outputs.output_type.len();
let mut otype_cursor = indexer.vecs.outputs.output_type.cursor();
let mut fo_cursor = indexer.vecs.transactions.first_txout_index.cursor();
compute_type_counts(
&mut self.by_type,
&fi_batch,
txid_len,
false,
starting_indexes.height,
exit,
|tx_pos| {
let fo = fo_cursor.get(tx_pos).data()?.to_usize();
let next_fo = if tx_pos + 1 < txid_len {
fo_cursor.get(tx_pos + 1).data()?.to_usize()
} else {
total_txout_len
};
let mut seen: u16 = 0;
otype_cursor.advance(fo - otype_cursor.position());
for _ in fo..next_fo {
seen |= 1u16 << (otype_cursor.next().unwrap() as u8);
}
Ok(seen)
},
)
}
}

View File

@@ -1,39 +0,0 @@
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_types::Version;
use vecdb::Database;
use super::Vecs;
use crate::{
indexes,
internal::{PerBlockCumulativeRolling, PercentCumulativeRolling, WindowStartVec, Windows},
};
impl Vecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
Ok(Self {
by_type: ByAddrType::new_with_name(|name| {
PerBlockCumulativeRolling::forced_import(
db,
&format!("tx_count_with_{name}_out"),
version,
indexes,
cached_starts,
)
})?,
percent: ByAddrType::new_with_name(|name| {
PercentCumulativeRolling::forced_import(
db,
&format!("tx_count_with_{name}_out_rel_to_all"),
version,
indexes,
)
})?,
})
}
}

View File

@@ -1,12 +0,0 @@
use brk_cohort::ByAddrType;
use brk_traversable::Traversable;
use brk_types::{BasisPoints16, StoredU64};
use vecdb::{Rw, StorageMode};
use crate::internal::{PerBlockCumulativeRolling, PercentCumulativeRolling};
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
pub by_type: ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
pub percent: ByAddrType<PercentCumulativeRolling<BasisPoints16, M>>,
}

View File

@@ -1,91 +0,0 @@
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_types::{BasisPoints16, Height, OutputType, StoredU64, TxIndex};
use vecdb::{AnyStoredVec, Exit, VecIndex, WritableVec};
use crate::internal::{
PerBlockCumulativeRolling, PerBlockFull, PercentCumulativeRolling, RatioU64Bp16,
};
pub(super) fn compute_type_counts(
by_type: &mut ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64>>,
fi_batch: &[TxIndex],
txid_len: usize,
skip_first_tx: bool,
starting_height: Height,
exit: &Exit,
mut scan_tx: impl FnMut(usize) -> Result<u16>,
) -> Result<()> {
for (j, first_tx) in fi_batch.iter().enumerate() {
let fi = first_tx.to_usize();
let next_fi = fi_batch
.get(j + 1)
.map(|v| v.to_usize())
.unwrap_or(txid_len);
let start_tx = if skip_first_tx { fi + 1 } else { fi };
let mut counts = [0u64; 12];
for tx_pos in start_tx..next_fi {
let seen = scan_tx(tx_pos)?;
let mut bits = seen;
while bits != 0 {
let idx = bits.trailing_zeros() as usize;
counts[idx] += 1;
bits &= bits - 1;
}
}
for otype in OutputType::ADDR_TYPES {
by_type
.get_mut_unwrap(otype)
.block
.push(StoredU64::from(counts[otype as usize]));
}
if by_type.p2pkh.block.batch_limit_reached() {
let _lock = exit.lock();
for (_, v) in by_type.iter_mut() {
v.block.write()?;
}
}
}
{
let _lock = exit.lock();
for (_, v) in by_type.iter_mut() {
v.block.write()?;
}
}
for (_, v) in by_type.iter_mut() {
v.compute_rest(starting_height, exit)?;
}
Ok(())
}
pub(super) fn compute_type_percents(
by_type: &ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64>>,
percent: &mut ByAddrType<PercentCumulativeRolling<BasisPoints16>>,
count_total: &PerBlockFull<StoredU64>,
starting_height: Height,
exit: &Exit,
) -> Result<()> {
for otype in OutputType::ADDR_TYPES {
let source = by_type.get_unwrap(otype);
percent
.get_mut_unwrap(otype)
.compute_binary::<StoredU64, StoredU64, RatioU64Bp16, _, _, _, _>(
starting_height,
&source.cumulative.height,
&count_total.cumulative.height,
source.sum.as_array().map(|w| &w.height),
count_total.rolling.sum.as_array().map(|w| &w.height),
exit,
)?;
}
Ok(())
}

View File

@@ -5,9 +5,7 @@ use vecdb::Exit;
use super::Vecs;
use crate::transactions::{count, fees};
use crate::{indexes, inputs, outputs, prices};
const WINDOW_SECS: [f64; 4] = [86400.0, 7.0 * 86400.0, 30.0 * 86400.0, 365.0 * 86400.0];
use crate::{indexes, internal::Windows, prices};
impl Vecs {
#[allow(clippy::too_many_arguments)]
@@ -18,8 +16,6 @@ impl Vecs {
prices: &prices::Vecs,
count_vecs: &count::Vecs,
fees_vecs: &fees::Vecs,
inputs_count: &inputs::CountVecs,
outputs_count: &outputs::CountVecs,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
@@ -37,32 +33,14 @@ impl Vecs {
let h = starting_indexes.height;
let tx_sums = count_vecs.total.rolling.sum.0.as_array();
let input_sums = inputs_count.rolling.sum.0.as_array();
let output_sums = outputs_count.total.rolling.sum.0.as_array();
for (i, &secs) in WINDOW_SECS.iter().enumerate() {
self.tx_per_sec.as_mut_array()[i].height.compute_transform(
let tx_per_sec = self.tx_per_sec.as_mut_array();
for (i, &secs) in Windows::<()>::SECS.iter().enumerate() {
tx_per_sec[i].height.compute_transform(
h,
&tx_sums[i].height,
|(h, sum, ..)| (h, StoredF32::from(*sum as f64 / secs)),
exit,
)?;
self.inputs_per_sec.as_mut_array()[i]
.height
.compute_transform(
h,
&input_sums[i].height,
|(h, sum, ..)| (h, StoredF32::from(*sum as f64 / secs)),
exit,
)?;
self.outputs_per_sec.as_mut_array()[i]
.height
.compute_transform(
h,
&output_sums[i].height,
|(h, sum, ..)| (h, StoredF32::from(*sum as f64 / secs)),
exit,
)?;
}
Ok(())

View File

@@ -27,12 +27,6 @@ impl Vecs {
tx_per_sec: Windows::try_from_fn(|suffix| {
PerBlock::forced_import(db, &format!("tx_per_sec_{suffix}"), v, indexes)
})?,
outputs_per_sec: Windows::try_from_fn(|suffix| {
PerBlock::forced_import(db, &format!("outputs_per_sec_{suffix}"), v, indexes)
})?,
inputs_per_sec: Windows::try_from_fn(|suffix| {
PerBlock::forced_import(db, &format!("inputs_per_sec_{suffix}"), v, indexes)
})?,
})
}
}

View File

@@ -8,6 +8,4 @@ use crate::internal::{AmountPerBlockCumulativeRolling, PerBlock, Windows};
pub struct Vecs<M: StorageMode = Rw> {
pub transfer_volume: AmountPerBlockCumulativeRolling<M>,
pub tx_per_sec: Windows<PerBlock<StoredF32, M>>,
pub outputs_per_sec: Windows<PerBlock<StoredF32, M>>,
pub inputs_per_sec: Windows<PerBlock<StoredF32, M>>,
}

View File

@@ -301,6 +301,10 @@ impl Indexer {
drop(readers);
export(stores, vecs, height)?;
readers = Readers::new(vecs);
if height == Height::new(500_000) {
break;
}
}
*self.tip_blockhash.write() = block.block_hash().into();

View File

@@ -209,8 +209,8 @@ impl Query {
.collect_range_at(begin, end);
let utxo_set_sizes = computer
.outputs
.count
.unspent
.count
.height
.collect_range_at(begin, end);
let input_volumes = computer

View File

@@ -2,12 +2,15 @@
//! versus `Reader::after_canonical` (1 reader + N parser threads + canonical
//! hash filter).
//!
//! Two phases:
//! Three phases:
//!
//! 1. **Tail scenarios** — pick an anchor `N` blocks below the chain tip
//! and run each implementation `REPEATS` times. Exercises the tail
//! (≤10) and forward (>10) code paths under realistic catchup ranges.
//! 2. **Full reindex** — anchor=`None` (genesis to tip), one run per
//! 2. **Partial reindex** — anchor=`None` but stop after
//! `PARTIAL_LIMIT` blocks. Exercises the early-chain blk files
//! where blocks are small and dense-parsing isn't the bottleneck.
//! 3. **Full reindex** — anchor=`None` (genesis to tip), one run per
//! config. Exercises every blk file once and shows steady-state
//! throughput on the densest possible workload.
//!
@@ -25,6 +28,7 @@ use brk_types::{BlockHash, Height, ReadBlock};
const SCENARIOS: &[usize] = &[5, 10, 100, 1_000, 10_000];
const REPEATS: usize = 3;
const PARTIAL_LIMIT: usize = 400_000;
fn main() -> Result<()> {
let bitcoin_dir = Client::default_bitcoin_path();
@@ -68,6 +72,26 @@ fn main() -> Result<()> {
println!();
}
println!();
println!("Partial reindex (genesis → {PARTIAL_LIMIT} blocks), one run per config:");
println!(
"{:>10} {:>16} {:>12} {:>10}",
"blocks", "impl", "elapsed", "blk/s"
);
println!("{}", "-".repeat(54));
let after_partial = run_bounded(PARTIAL_LIMIT, || reader.after(None))?;
print_full_row("after", &after_partial);
let p1_partial = run_bounded(PARTIAL_LIMIT, || reader.after_canonical(None))?;
print_full_row("canonical[p=1]", &p1_partial);
sanity_check_full(&after_partial, &p1_partial);
let p4_partial = run_bounded(PARTIAL_LIMIT, || reader.after_canonical_with(None, 4))?;
print_full_row("canonical[p=4]", &p4_partial);
sanity_check_full(&after_partial, &p4_partial);
let p16_partial = run_bounded(PARTIAL_LIMIT, || reader.after_canonical_with(None, 16))?;
print_full_row("canonical[p=16]", &p16_partial);
sanity_check_full(&after_partial, &p16_partial);
println!();
println!("Full reindex (genesis → tip), one run per config:");
println!(
@@ -176,6 +200,27 @@ where
})
}
/// Runs the pipeline starting from genesis but stops consuming once
/// `limit` blocks have been received. Dropping the receiver then closes
/// the channel, which unblocks and unwinds the reader's spawned worker.
fn run_bounded<F>(limit: usize, mut f: F) -> Result<FullRun>
where
F: FnMut() -> Result<Receiver<ReadBlock>>,
{
let start = Instant::now();
let recv = f()?;
let mut count = 0;
for block in recv.iter().take(limit) {
std::hint::black_box(block.height());
count += 1;
}
let elapsed = start.elapsed();
// Explicit drop so the reader worker sees the channel close before
// the next bench config spins up another one.
drop(recv);
Ok(FullRun { elapsed, count })
}
fn print_full_row(label: &str, run: &FullRun) {
let blk_per_s = if run.elapsed.is_zero() {
0.0

View File

@@ -1,32 +1,44 @@
//! Canonical-hash pipeline for `Reader::after`.
//!
//! Three pieces, each with one job:
//! Bitcoin Core stores accepted blocks in append-only `blk*.dat` files
//! under the data dir, XOR-encoded with a per-datadir key. A "blk
//! file" contains every block the node ever accepted — including
//! blocks that were later orphaned by a reorg — in acceptance order,
//! not height order. This module turns "give me every block after
//! `hash` up to the tip" into an ordered `ReadBlock` stream drawn from
//! those files while skipping orphans.
//!
//! * **`CanonicalRange::walk`** is the only place bitcoind is consulted
//! about the main chain. It batch-fetches every canonical hash in the
//! target window once, up front, via `getblockhash` JSON-RPC batching.
//! * **`parse_canonical_block`** is a pure function of raw blk bytes.
//! It XOR-decodes only the 80-byte header, looks the hash up in the
//! pre-fetched `CanonicalRange`, and short-circuits orphans before
//! touching the (expensive) transaction body. No RPC, no `confirmations`
//! filter, no chain logic.
//! * **`pipeline_forward` / `pipeline_tail`** wire the scan loop to a
//! parser pool. The forward pipeline runs 1 reader + N parser threads
//! (default `N = 1`, configurable via `after_canonical_with`); the
//! tail pipeline (≤10 blocks) stays inline on a single thread because
//! channel/lock overhead would dominate.
//! How it works:
//!
//! Coexists with the original `read`/`read_rev`/`after` so the two can be
//! A/B-tested from the indexer.
//! 1. [`CanonicalRange::walk`] asks bitcoind once, up front, for the
//! canonical block hash at every height in the target window. This
//! is one batched JSON-RPC request — no per-block RPC overhead.
//! 2. The reader walks blk files in order and scans each one for the
//! block magic prefix. For every block found,
//! [`peek_canonical_offset`] hashes the 80-byte header and looks
//! the hash up in the canonical map. Orphans short-circuit here,
//! before any bytes are cloned.
//! 3. Canonical hits are cloned into [`ScannedBlock`]s and shipped
//! over a channel to a small pool of parser workers, which run
//! [`parse_canonical_body`] to fully decode the block.
//! 4. Parsers serialise their output through [`ReorderState`] so that
//! the consumer receives blocks in canonical-height order even if
//! the blk files emitted them out of order.
//!
//! Ranges of at most `TAIL_THRESHOLD` blocks take a specialised
//! [`pipeline_tail`] path that reverse-scans the newest blk files in
//! 5 MB chunks — cheaper than walking forward from genesis for a
//! handful of tip blocks.
//!
//! Public entry points: [`ReaderInner::after_canonical`] and
//! [`ReaderInner::after_canonical_with`]. Coexists with the original
//! `read` / `read_rev` / `after` so the two can be A/B-tested.
use std::{
fs::{self, File},
io::{Cursor, Read, Seek, SeekFrom},
ops::ControlFlow,
sync::{
Arc,
atomic::{AtomicBool, Ordering},
},
sync::atomic::{AtomicBool, Ordering},
thread,
};
@@ -39,10 +51,7 @@ use parking_lot::Mutex;
use rustc_hash::FxHashMap;
use tracing::{error, warn};
use crate::{
BlkIndexToBlkPath, ReaderInner, XORBytes, XORIndex,
scan::{ScanResult, scan_bytes},
};
use crate::{BlkIndexToBlkPath, ReaderInner, XORBytes, XORIndex, scan::scan_bytes};
const BOUND_CAP: usize = 50;
const TAIL_CHUNK: usize = 5 * 1024 * 1024;
@@ -58,64 +67,50 @@ const DEFAULT_PARSER_THREADS: usize = 1;
// CanonicalRange — the only RPC-aware piece in this file.
// ─────────────────────────────────────────────────────────────────────────────
/// Forward-ordered canonical hashes for `start..=end`, resolved once up front.
///
/// `hashes[i]` is the canonical block hash at height `start + i`.
/// `by_prefix` maps the 8-byte `BlockHashPrefix` of every canonical hash to
/// its offset — same prefix-keyed scheme brk already uses in `stores`.
/// Lookups verify the full hash via `hashes[offset]`, neutralising the
/// (astronomically small) prefix collision risk at zero extra cost.
/// Every canonical block hash in a contiguous height window, resolved
/// from bitcoind once up front. `hashes[i]` is the canonical hash at
/// height `start + i`. Lookups by hash go through `by_prefix` (8-byte
/// key, same scheme as `brk_store`) and verify the full hash on hit.
pub struct CanonicalRange {
pub start: Height,
pub end: Height,
hashes: Vec<BlockHash>,
by_prefix: FxHashMap<BlockHashPrefix, u32>,
}
impl CanonicalRange {
/// Resolves canonical hashes for every height strictly after `anchor`
/// up to `tip` inclusive. If `anchor` is `None`, starts at genesis.
///
/// Uses `get_block_hash(h)` which is a deterministic height → canonical
/// hash lookup — no race window against in-progress reorgs.
/// up to `tip` inclusive. `anchor = None` starts at genesis.
pub fn walk(client: &Client, anchor: Option<BlockHash>, tip: Height) -> Result<Self> {
let start = match anchor {
Some(hash) => {
let info = client.get_block_header_info(&hash)?;
Height::from(info.height + 1)
}
Some(hash) => Height::from(client.get_block_header_info(&hash)?.height + 1),
None => Height::ZERO,
};
if start > tip {
return Ok(Self::empty(start));
return Ok(Self {
start,
hashes: Vec::new(),
by_prefix: FxHashMap::default(),
});
}
let len = (*tip - *start + 1) as usize;
let hashes = client.get_block_hashes_range(*start, *tip)?;
let mut by_prefix = FxHashMap::with_capacity_and_hasher(len, Default::default());
for (offset, hash) in hashes.iter().enumerate() {
by_prefix.insert(BlockHashPrefix::from(hash), offset as u32);
}
let mut by_prefix =
FxHashMap::with_capacity_and_hasher(hashes.len(), Default::default());
by_prefix.extend(
hashes
.iter()
.enumerate()
.map(|(i, h)| (BlockHashPrefix::from(h), i as u32)),
);
Ok(Self {
start,
end: tip,
hashes,
by_prefix,
})
}
fn empty(start: Height) -> Self {
Self {
start,
end: start,
hashes: Vec::new(),
by_prefix: FxHashMap::default(),
}
}
pub fn len(&self) -> usize {
self.hashes.len()
}
@@ -124,10 +119,10 @@ impl CanonicalRange {
self.hashes.is_empty()
}
/// Returns the offset-from-start of `hash` iff it matches a canonical
/// block in this range. A prefix hit is verified against the stored
/// full hash to rule out the (vanishing) chance of prefix collisions
/// from unrelated orphans in blk files.
/// Returns the offset-from-`start` of `hash` iff it matches the
/// canonical chain in this range. A prefix hit is verified against
/// the full hash so prefix collisions from orphaned blocks are
/// rejected.
#[inline]
fn offset_of(&self, hash: &BlockHash) -> Option<u32> {
let offset = *self.by_prefix.get(&BlockHashPrefix::from(hash))?;
@@ -136,64 +131,63 @@ impl CanonicalRange {
}
// ─────────────────────────────────────────────────────────────────────────────
// Pure block parser — no client, no confirmations, no Ok(None) on RPC errors.
// Block parsing — cheap header peek first, full body parse only on a hit.
// ─────────────────────────────────────────────────────────────────────────────
const HEADER_LEN: usize = 80;
/// XOR-decode just the 80-byte header, compute the block hash, look it
/// up in `canonical`, and only proceed to parse the body and transactions
/// when the block is on the canonical chain. Returning early before the
/// body decode is what lets a single parser thread keep up with the
/// 4-thread `read()` pool on sparse ranges.
///
/// Returns `Ok(None)` for orphans / out-of-range blocks. Deterministic —
/// never touches RPC.
fn parse_canonical_block(
mut bytes: Vec<u8>,
metadata: BlkMetadata,
mut xor_i: XORIndex,
/// Returns the canonical offset of `bytes` if its header hashes to a
/// known canonical block, otherwise `None`. Does not allocate and does
/// not mutate `bytes`: the header is copied onto a stack buffer and
/// XOR-decoded there so an orphan short-circuits cleanly and a
/// canonical hit can still be cloned out intact.
fn peek_canonical_offset(
bytes: &[u8],
mut xor_state: XORIndex,
xor_bytes: XORBytes,
canonical: &CanonicalRange,
) -> Result<Option<(u32, ReadBlock)>> {
) -> Option<u32> {
if bytes.len() < HEADER_LEN {
return Err(Error::Internal("Block bytes shorter than header"));
return None;
}
let mut header_buf = [0u8; HEADER_LEN];
header_buf.copy_from_slice(&bytes[..HEADER_LEN]);
xor_state.bytes(&mut header_buf, xor_bytes);
let header = Header::consensus_decode(&mut &header_buf[..]).ok()?;
canonical.offset_of(&BlockHash::from(header.block_hash()))
}
// Decode just the header and look the hash up before paying for the
// body. `xor_i` advances `HEADER_LEN` here so it stays in lock-step
// with the decoded prefix.
xor_i.bytes(&mut bytes[..HEADER_LEN], xor_bytes);
let header = Header::consensus_decode(&mut &bytes[..HEADER_LEN])?;
let bitcoin_hash = header.block_hash();
let Some(offset) = canonical.offset_of(&BlockHash::from(bitcoin_hash)) else {
return Ok(None);
};
// Canonical: XOR-decode the body and parse transactions.
xor_i.bytes(&mut bytes[HEADER_LEN..], xor_bytes);
/// Full XOR-decode + parse for a block that has already been confirmed
/// canonical by `peek_canonical_offset`. Takes owned `bytes` so it can
/// mutate them in place and hand them to the resulting `ReadBlock`.
fn parse_canonical_body(
mut bytes: Vec<u8>,
metadata: BlkMetadata,
mut xor_state: XORIndex,
xor_bytes: XORBytes,
height: Height,
) -> Result<ReadBlock> {
xor_state.bytes(&mut bytes, xor_bytes);
let mut cursor = Cursor::new(bytes);
cursor.set_position(HEADER_LEN as u64);
let header = Header::consensus_decode(&mut cursor)?;
let bitcoin_hash = header.block_hash();
let tx_count = VarInt::consensus_decode(&mut cursor)?.0 as usize;
let mut txdata = Vec::with_capacity(tx_count);
let mut tx_metadata = Vec::with_capacity(tx_count);
let mut tx_offsets = Vec::with_capacity(tx_count);
for _ in 0..tx_count {
let off = cursor.position() as u32;
tx_offsets.push(off);
let position = metadata.position() + off;
let tx_start = cursor.position() as u32;
tx_offsets.push(tx_start);
let tx = Transaction::consensus_decode(&mut cursor)?;
let tx_len = cursor.position() as u32 - tx_start;
txdata.push(tx);
let len = cursor.position() as u32 - off;
tx_metadata.push(BlkMetadata::new(position, len));
tx_metadata.push(BlkMetadata::new(metadata.position() + tx_start, tx_len));
}
let raw_bytes = cursor.into_inner();
let height = Height::from(*canonical.start + offset);
let mut block = Block::from((height, bitcoin_hash, bitcoin::Block { header, txdata }));
block.set_raw_data(raw_bytes, tx_offsets);
Ok(Some((offset, ReadBlock::from((block, metadata, tx_metadata)))))
Ok(ReadBlock::from((block, metadata, tx_metadata)))
}
// ─────────────────────────────────────────────────────────────────────────────
@@ -201,21 +195,19 @@ fn parse_canonical_block(
// ─────────────────────────────────────────────────────────────────────────────
impl ReaderInner {
/// Stream every canonical block strictly after `hash` (or from
/// genesis if `None`) up to the current chain tip, in canonical
/// order, via the canonical-hash pipeline.
///
/// Uses the default parser-thread count (`1`); see
/// `after_canonical_with` to override.
/// Streams every canonical block strictly after `hash` (or from
/// genesis when `None`) up to the current chain tip, in canonical
/// order. Uses the default parser-thread count; see
/// [`after_canonical_with`](Self::after_canonical_with) to override.
pub fn after_canonical(&self, hash: Option<BlockHash>) -> Result<Receiver<ReadBlock>> {
self.after_canonical_with(hash, DEFAULT_PARSER_THREADS)
}
/// Same as `after_canonical` but with a configurable number of parser
/// threads. `parser_threads = 1` is the minimal-thread default
/// (1 reader + 1 parser, uncontended mutex hot path). Higher values
/// trade extra cores for throughput on dense ranges where the parser
/// is the bottleneck.
/// Like [`after_canonical`](Self::after_canonical) but with a
/// configurable number of parser threads. `parser_threads = 1` is
/// the minimal-thread default (1 reader + 1 parser, uncontended
/// mutex). Higher values trade extra cores for throughput on dense
/// ranges where the parser is the bottleneck.
pub fn after_canonical_with(
&self,
hash: Option<BlockHash>,
@@ -223,43 +215,42 @@ impl ReaderInner {
) -> Result<Receiver<ReadBlock>> {
let parser_threads = parser_threads.max(1);
let tip = self.client.get_last_height()?;
let canonical = Arc::new(CanonicalRange::walk(&self.client, hash, tip)?);
let canonical = CanonicalRange::walk(&self.client, hash, tip)?;
if canonical.is_empty() {
return Ok(bounded(0).1);
}
// Refresh the blk path cache once, on the caller's thread, so the
// worker thread below has a stable view.
let paths = BlkIndexToBlkPath::scan(&self.blocks_dir);
*self.blk_index_to_blk_path.write() = paths.clone();
let (send, recv) = bounded(BOUND_CAP);
let xor_bytes = self.xor_bytes;
if canonical.len() <= TAIL_THRESHOLD {
thread::spawn(move || {
if let Err(e) = pipeline_tail(&paths, xor_bytes, &canonical, &send) {
error!("after_canonical tail pipeline failed: {e}");
}
});
let use_tail = canonical.len() <= TAIL_THRESHOLD;
let first_blk_index = if use_tail {
0
} else {
let first_blk_index = self
.find_start_blk_index(Some(canonical.start), &paths, xor_bytes)
.unwrap_or_default();
self.find_start_blk_index(Some(canonical.start), &paths, xor_bytes)
.unwrap_or_default()
};
thread::spawn(move || {
if let Err(e) = pipeline_forward(
let result = if use_tail {
pipeline_tail(&paths, xor_bytes, &canonical, &send)
} else {
pipeline_forward(
&paths,
first_blk_index,
xor_bytes,
canonical,
&canonical,
&send,
parser_threads,
) {
error!("after_canonical forward pipeline failed: {e}");
)
};
if let Err(e) = result {
error!("after_canonical pipeline failed: {e}");
}
});
}
Ok(recv)
}
@@ -269,36 +260,40 @@ impl ReaderInner {
// Forward pipeline — 1 reader + N parsers + shared in-order emission.
// ─────────────────────────────────────────────────────────────────────────────
/// Item shipped from the reader thread to the parser pool: raw block
/// bytes, blk-file metadata, and the XOR state at the byte the bytes
/// start at.
type ScannedItem = (BlkMetadata, Vec<u8>, XORIndex);
/// A raw block the reader has already confirmed is on the canonical
/// chain, shipped to the parser pool for full decoding.
struct ScannedBlock {
metadata: BlkMetadata,
bytes: Vec<u8>,
xor_state: XORIndex,
canonical_offset: u32,
}
/// Shared in-order emission buffer used by N parser threads. The mutex
/// is uncontended at `parser_threads = 1` (still acquired, never blocks).
/// In-order emission buffer shared between the parser threads. Access
/// is serialised through a `parking_lot::Mutex`; at `parser_threads = 1`
/// the lock is always uncontended.
struct ReorderState {
next_offset: u32,
target_len: u32,
/// Ahead-of-line matches keyed by canonical offset; drained
/// contiguously each time `next_offset` advances.
/// contiguously each time `next_offset` advances. Bounded in
/// practice by parser-thread scheduling jitter — see module doc.
pending: FxHashMap<u32, ReadBlock>,
send_to_consumer: Sender<ReadBlock>,
}
impl ReorderState {
fn new(send_to_consumer: Sender<ReadBlock>, target_len: u32) -> Self {
fn new(send_to_consumer: Sender<ReadBlock>) -> Self {
Self {
next_offset: 0,
target_len,
pending: FxHashMap::default(),
send_to_consumer,
}
}
/// Insert a parsed canonical block. Returns `false` once the pipeline
/// is done — either the consumer dropped the receiver, every canonical
/// block has been emitted, or a parser somehow produced a duplicate
/// offset — so the caller should stop processing and exit.
/// Accepts a parsed canonical block; emits it and drains any
/// contiguous pending matches. Returns `false` iff the consumer
/// dropped the receiver — a pure liveness signal. Completion is
/// checked by the caller via `next_offset`.
fn try_emit(&mut self, offset: u32, block: ReadBlock) -> bool {
use std::cmp::Ordering::*;
match offset.cmp(&self.next_offset) {
@@ -307,80 +302,83 @@ impl ReorderState {
return false;
}
self.next_offset += 1;
while let Some(b) = self.pending.remove(&self.next_offset) {
if self.send_to_consumer.send(b).is_err() {
while let Some(next) = self.pending.remove(&self.next_offset) {
if self.send_to_consumer.send(next).is_err() {
return false;
}
self.next_offset += 1;
}
self.next_offset < self.target_len
true
}
Greater => {
self.pending.insert(offset, block);
true
}
// Each canonical hash appears at exactly one offset, and
// each block is parsed once, so a parser should never
// produce an offset below `next_offset`. Treat as done.
Less => false,
// Unreachable in practice: each canonical hash appears at
// exactly one offset and each block is parsed once.
Less => true,
}
}
}
/// Two-stage pipeline:
/// Forward pipeline: the reader (this thread) scans blk files and
/// ships canonical hits to a scoped parser pool via `parser_send`;
/// parsers decode bodies and serialise emission through `reorder`.
/// Scoped threads let every parser borrow `canonical`, `reorder`, and
/// `done` directly — no `Arc` required.
///
/// 1. **Reader (this thread)** — walks blk files from `first_blk_index`,
/// `fs::read`s each one, runs `scan_bytes` to locate every block, and
/// ships `ScannedItem`s over an mpmc channel to the parser pool.
/// 2. **Parser pool** — `parser_threads` workers draining the same
/// channel. Each worker runs `parse_canonical_block` (header first,
/// body only on canonical match) and acquires the shared `ReorderState`
/// mutex to insert into the in-order emission buffer.
///
/// Canonical blocks can arrive out of order across blk files (bitcoind
/// doesn't write in strict chain order during initial sync, headers-first
/// body fetch, or reindex), so the reorder buffer is required even with
/// a single parser thread.
/// A reorder buffer is required even at `parser_threads = 1` because
/// canonical blocks can arrive out of order across blk files (bitcoind
/// doesn't write in strict chain order during initial sync, headers-
/// first body fetch, or reindex).
fn pipeline_forward(
paths: &BlkIndexToBlkPath,
first_blk_index: u16,
xor_bytes: XORBytes,
canonical: Arc<CanonicalRange>,
canonical: &CanonicalRange,
send: &Sender<ReadBlock>,
parser_threads: usize,
) -> Result<()> {
let (parser_send, parser_recv) = bounded::<ScannedItem>(BOUND_CAP);
let reorder = Arc::new(Mutex::new(ReorderState::new(
send.clone(),
canonical.len() as u32,
)));
// Set when the pipeline is finished (consumer dropped or all canonical
// blocks emitted) so parsers can short-circuit instead of burning CPU
// on doomed work while the reader drains the queue.
let done = Arc::new(AtomicBool::new(false));
let (parser_send, parser_recv) = bounded::<ScannedBlock>(BOUND_CAP);
let reorder = Mutex::new(ReorderState::new(send.clone()));
let target_canonical_count = canonical.len() as u32;
let done = AtomicBool::new(false);
let parsers = spawn_parser_pool(
parser_threads,
&parser_recv,
thread::scope(|scope| -> Result<()> {
for _ in 0..parser_threads {
let parser_recv = parser_recv.clone();
scope.spawn(|| {
parser_loop(
parser_recv,
&reorder,
&done,
&canonical,
canonical,
xor_bytes,
);
drop(parser_recv); // parsers own clones; this would otherwise keep the channel open
let read_result = read_and_dispatch(paths, first_blk_index, xor_bytes, &parser_send, &done);
drop(parser_send); // signal end-of-input to parsers
for parser in parsers {
parser
.join()
.map_err(|_| Error::Internal("parser thread panicked"))??;
target_canonical_count,
)
});
}
read_result?;
// Every parser owns its own clone; ours would otherwise keep
// the channel "alive" and leak a dangling receiver.
drop(parser_recv);
let state = reorder.lock();
if (state.next_offset as usize) < canonical.len() && !done.load(Ordering::Relaxed) {
let read_result = read_and_dispatch(
paths,
first_blk_index,
xor_bytes,
canonical,
&parser_send,
&done,
);
// Signal end-of-input to the parsers so they exit their `for`
// loops and the scope can join them.
drop(parser_send);
read_result
})?;
let pipeline_cancelled = done.load(Ordering::Relaxed);
let emitted = reorder.lock().next_offset as usize;
if !pipeline_cancelled && emitted < canonical.len() {
return Err(Error::Internal(
"after_canonical forward pipeline: blk files missing canonical blocks",
));
@@ -388,58 +386,50 @@ fn pipeline_forward(
Ok(())
}
/// Spawn `n` parser threads that drain `parser_recv`, parse each scanned
/// item via `parse_canonical_block`, and emit canonical matches to
/// `reorder`. Parsers exit when the channel closes or `done` is set.
fn spawn_parser_pool(
n: usize,
parser_recv: &Receiver<ScannedItem>,
reorder: &Arc<Mutex<ReorderState>>,
done: &Arc<AtomicBool>,
canonical: &Arc<CanonicalRange>,
/// Full-body parse + in-order emit loop run by every scoped parser
/// worker in `pipeline_forward`. Drains `parser_recv` to exhaustion.
fn parser_loop(
parser_recv: Receiver<ScannedBlock>,
reorder: &Mutex<ReorderState>,
done: &AtomicBool,
canonical: &CanonicalRange,
xor_bytes: XORBytes,
) -> Vec<thread::JoinHandle<Result<()>>> {
(0..n)
.map(|_| {
let parser_recv = parser_recv.clone();
let reorder = reorder.clone();
let done = done.clone();
let canonical = canonical.clone();
thread::spawn(move || -> Result<()> {
for (metadata, bytes, xor_i) in parser_recv {
target_canonical_count: u32,
) {
for ScannedBlock { metadata, bytes, xor_state, canonical_offset } in parser_recv {
if done.load(Ordering::Relaxed) {
continue; // drain quietly
continue;
}
let (offset, block) = match parse_canonical_block(
bytes, metadata, xor_i, xor_bytes, &canonical,
) {
Ok(Some(item)) => item,
Ok(None) => continue, // orphan / out of range
let height = Height::from(*canonical.start + canonical_offset);
let block = match parse_canonical_body(bytes, metadata, xor_state, xor_bytes, height) {
Ok(block) => block,
Err(e) => {
warn!("parse_canonical_block failed: {e}");
warn!("parse_canonical_body failed: {e}");
continue;
}
};
if !reorder.lock().try_emit(offset, block) {
let pipeline_finished = {
let mut state = reorder.lock();
!state.try_emit(canonical_offset, block)
|| state.next_offset >= target_canonical_count
};
if pipeline_finished {
done.store(true, Ordering::Relaxed);
}
}
Ok(())
})
})
.collect()
}
/// Walk blk files from `first_blk_index`, scan each one, and ship every
/// raw block found to the parser pool. Stops early if `done` flips or
/// the parser channel closes.
/// Walk blk files from `first_blk_index`, scan each one, and ship
/// canonical blocks to the parser pool. Non-canonical blocks are
/// rejected via `peek_canonical_offset` *before* being cloned — the
/// cheap filter is what lets a sparse catchup avoid allocating for the
/// ~99% of blocks outside the window.
fn read_and_dispatch(
paths: &BlkIndexToBlkPath,
first_blk_index: u16,
xor_bytes: XORBytes,
parser_send: &Sender<ScannedItem>,
canonical: &CanonicalRange,
parser_send: &Sender<ScannedBlock>,
done: &AtomicBool,
) -> Result<()> {
for (&blk_index, blk_path) in paths.range(first_blk_index..) {
@@ -457,10 +447,22 @@ fn read_and_dispatch(
blk_index,
0,
xor_bytes,
|metadata, block_bytes, xor_i| {
if done.load(Ordering::Relaxed)
|| parser_send.send((metadata, block_bytes, xor_i)).is_err()
{
|metadata, block_bytes, xor_state| {
if done.load(Ordering::Relaxed) {
return ControlFlow::Break(());
}
let Some(canonical_offset) =
peek_canonical_offset(block_bytes, xor_state, xor_bytes, canonical)
else {
return ControlFlow::Continue(());
};
let scanned = ScannedBlock {
metadata,
bytes: block_bytes.to_vec(),
xor_state,
canonical_offset,
};
if parser_send.send(scanned).is_err() {
ControlFlow::Break(())
} else {
ControlFlow::Continue(())
@@ -476,65 +478,72 @@ fn read_and_dispatch(
}
// ─────────────────────────────────────────────────────────────────────────────
// Tail pipeline — reverse 5MB chunks of the last blk files until we've
// collected every canonical hash, then emit forward.
// Tail pipeline — reverse-scan the newest blk files in 5 MB chunks until
// every canonical hash has been matched, then emit them forward.
// ─────────────────────────────────────────────────────────────────────────────
fn pipeline_tail(
paths: &BlkIndexToBlkPath,
xor_bytes: XORBytes,
canonical: &Arc<CanonicalRange>,
canonical: &CanonicalRange,
send: &Sender<ReadBlock>,
) -> Result<()> {
// Collected matches, keyed by canonical offset. Tail ranges are ≤10 so
// a Vec<Option<_>> is the simplest representation.
let mut collected: Vec<Option<ReadBlock>> = (0..canonical.len()).map(|_| None).collect();
let mut slots: Vec<Option<ReadBlock>> = (0..canonical.len()).map(|_| None).collect();
let mut remaining = canonical.len();
// Carries the bytes before a chunk's first magic into the next
// (earlier) chunk so blocks straddling the boundary survive.
let mut spillover: Vec<u8> = Vec::new();
'files: for (&blk_index, path) in paths.iter().rev() {
let file_len = fs::metadata(path).map(|m| m.len() as usize).unwrap_or(0);
let mut file = File::open(path).map_err(|_| Error::Internal("Failed to open blk file"))?;
let file_len = file.metadata().map(|m| m.len() as usize).unwrap_or(0);
if file_len == 0 {
continue;
}
let Ok(mut file) = File::open(path) else {
return Err(Error::Internal("Failed to open blk file"));
};
let mut read_end = file_len;
let mut head: Vec<u8> = Vec::new();
spillover.clear();
while read_end > 0 && remaining > 0 {
let read_start = read_end.saturating_sub(TAIL_CHUNK);
let chunk_len = read_end - read_start;
read_end = read_start;
if file.seek(SeekFrom::Start(read_start as u64)).is_err() {
return Err(Error::Internal("Failed to seek blk file"));
}
let mut buf = vec![0u8; chunk_len + head.len()];
if file.read_exact(&mut buf[..chunk_len]).is_err() {
return Err(Error::Internal("Failed to read blk chunk"));
}
buf[chunk_len..].copy_from_slice(&head);
head.clear();
file.seek(SeekFrom::Start(read_start as u64))
.map_err(|_| Error::Internal("Failed to seek blk file"))?;
let mut buf = vec![0u8; chunk_len + spillover.len()];
file.read_exact(&mut buf[..chunk_len])
.map_err(|_| Error::Internal("Failed to read blk chunk"))?;
buf[chunk_len..].copy_from_slice(&spillover);
spillover.clear();
let result: ScanResult = scan_bytes(
let result = scan_bytes(
&mut buf,
blk_index,
read_start,
xor_bytes,
|metadata, block_bytes, xor_i| {
match parse_canonical_block(block_bytes, metadata, xor_i, xor_bytes, canonical)
{
Ok(Some((offset, block))) => {
let slot = &mut collected[offset as usize];
if slot.is_none() {
*slot = Some(block);
|metadata, block_bytes, xor_state| {
let Some(offset) =
peek_canonical_offset(block_bytes, xor_state, xor_bytes, canonical)
else {
return ControlFlow::Continue(());
};
if slots[offset as usize].is_some() {
return ControlFlow::Continue(());
}
let height = Height::from(*canonical.start + offset);
match parse_canonical_body(
block_bytes.to_vec(),
metadata,
xor_state,
xor_bytes,
height,
) {
Ok(block) => {
slots[offset as usize] = Some(block);
remaining -= 1;
}
}
Ok(None) => {} // orphan / out of range
Err(e) => warn!("parse_canonical_block failed in tail pipeline: {e}"),
Err(e) => warn!("parse_canonical_body failed in tail pipeline: {e}"),
}
if remaining == 0 {
ControlFlow::Break(())
@@ -547,9 +556,8 @@ fn pipeline_tail(
if remaining == 0 {
break 'files;
}
if read_start > 0 {
head = buf[..result.first_magic.unwrap_or(buf.len())].to_vec();
spillover.extend_from_slice(&buf[..result.first_magic.unwrap_or(buf.len())]);
}
}
}
@@ -560,9 +568,7 @@ fn pipeline_tail(
));
}
// `remaining == 0` above guarantees every slot is `Some`; `flatten`
// is just the natural way to write the emit loop.
for block in collected.into_iter().flatten() {
for block in slots.into_iter().flatten() {
if send.send(block).is_err() {
return Ok(());
}

View File

@@ -221,7 +221,8 @@ impl ReaderInner {
0,
xor_bytes,
|metadata, block_bytes, xor_i| {
if send_bytes.send((metadata, block_bytes, xor_i)).is_err() {
// Send owned bytes to the rayon parser pool.
if send_bytes.send((metadata, block_bytes.to_vec(), xor_i)).is_err() {
return ControlFlow::Break(());
}
ControlFlow::Continue(())
@@ -371,8 +372,10 @@ impl ReaderInner {
read_start,
xor_bytes,
|metadata, bytes, xor_i| {
// `decode_block` needs owned bytes — it XOR-
// decodes in place before parsing.
if let Ok(Some(block)) = decode_block(
bytes, metadata, &client, xor_i, xor_bytes, start, end, 0, 0,
bytes.to_vec(), metadata, &client, xor_i, xor_bytes, start, end, 0, 0,
) {
blocks.push(block);
}

View File

@@ -23,14 +23,17 @@ pub struct ScanResult {
pub interrupted: bool,
}
/// Scans `buf` for blocks. `file_offset` is the absolute position of `buf[0]` in the file.
/// Calls `on_block` for each complete block found.
/// Scans `buf` for blocks. `file_offset` is the absolute position of
/// `buf[0]` in the file. Calls `on_block` for each complete block found,
/// passing the block's raw bytes as a mutable borrow of the buffer — the
/// caller decides whether to clone them (e.g. to ship owned data to a
/// parser thread) or process them in place (e.g. cheap header peek).
pub fn scan_bytes(
buf: &mut [u8],
blk_index: u16,
file_offset: usize,
xor_bytes: XORBytes,
mut on_block: impl FnMut(BlkMetadata, Vec<u8>, XORIndex) -> ControlFlow<()>,
mut on_block: impl FnMut(BlkMetadata, &mut [u8], XORIndex) -> ControlFlow<()>,
) -> ScanResult {
let mut xor_i = XORIndex::default();
xor_i.add_assign(file_offset);
@@ -56,7 +59,7 @@ pub fn scan_bytes(
}
let position = BlkPosition::new(blk_index, (file_offset + i) as u32);
let metadata = BlkMetadata::new(position, len as u32);
if on_block(metadata, buf[i..i + len].to_vec(), xor_i).is_break() {
if on_block(metadata, &mut buf[i..i + len], xor_i).is_break() {
return ScanResult {
first_magic,
interrupted: true,

File diff suppressed because it is too large Load Diff

View File

@@ -2590,6 +2590,25 @@ class _0sdM0M1M1sdM2M2sdM3sdP0P1P1sdP2P2sdP3sdSdZscorePattern:
"""Pattern struct for repeated tree structure."""
pass
class AllEmptyOpP2aP2msP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshUnknownPattern:
"""Pattern struct for repeated tree structure."""
def __init__(self, client: BrkClientBase, acc: str):
"""Create pattern node with accumulated series name."""
self.all: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, _m(acc, 'bis'))
self.empty: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, _m(acc, 'with_empty_outputs_output'))
self.op_return: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, _m(acc, 'with_op_return_output'))
self.p2a: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, _m(acc, 'with_p2a_output'))
self.p2ms: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, _m(acc, 'with_p2ms_output'))
self.p2pk33: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, _m(acc, 'with_p2pk33_output'))
self.p2pk65: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, _m(acc, 'with_p2pk65_output'))
self.p2pkh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, _m(acc, 'with_p2pkh_output'))
self.p2sh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, _m(acc, 'with_p2sh_output'))
self.p2tr: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, _m(acc, 'with_p2tr_output'))
self.p2wpkh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, _m(acc, 'with_p2wpkh_output'))
self.p2wsh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, _m(acc, 'with_p2wsh_output'))
self.unknown: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, _m(acc, 'with_unknown_outputs_output'))
class _10y1m1w1y2y3m3y4y5y6m6y8yPattern2:
"""Pattern struct for repeated tree structure."""
@@ -2613,18 +2632,22 @@ class _10y1m1w1y2y3m3y4y5y6m6y8yPattern3:
def __init__(self, client: BrkClientBase, acc: str):
"""Create pattern node with accumulated series name."""
self._10y: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, '10y'))
self._1m: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, '1m'))
self._1w: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, '1w'))
self._1y: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, '1y'))
self._2y: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, '2y'))
self._3m: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, '3m'))
self._3y: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, '3y'))
self._4y: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, '4y'))
self._5y: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, '5y'))
self._6m: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, '6m'))
self._6y: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, '6y'))
self._8y: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, '8y'))
self._10y: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, _m(acc, '10y'))
self._1m: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, _m(acc, '1m'))
self._1w: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, _m(acc, '1w'))
self._1y: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, _m(acc, '1y'))
self._2y: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, _m(acc, '2y'))
self._3m: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, _m(acc, '3m'))
self._3y: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, _m(acc, '3y'))
self._4y: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, _m(acc, '4y'))
self._5y: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, _m(acc, '5y'))
self._6m: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, _m(acc, '6m'))
self._6y: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, _m(acc, '6y'))
self._8y: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, _m(acc, '8y'))
class AllEmptyP2aP2msP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshUnknownPattern:
"""Pattern struct for repeated tree structure."""
pass
class CapGrossInvestorLossMvrvNetPeakPriceProfitSellSoprPattern:
"""Pattern struct for repeated tree structure."""
@@ -2664,6 +2687,21 @@ class AverageBaseCumulativeMaxMedianMinPct10Pct25Pct75Pct90SumPattern(Generic[T]
self.pct90: _1m1w1y24hPattern[T] = _1m1w1y24hPattern(client, _m(acc, 'pct90'))
self.sum: _1m1w1y24hPattern[T] = _1m1w1y24hPattern(client, _m(acc, 'sum'))
class AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern5:
"""Pattern struct for repeated tree structure."""
def __init__(self, client: BrkClientBase, acc: str):
"""Create pattern node with accumulated series name."""
self.all: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, acc)
self.p2a: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, _p('p2a', acc))
self.p2pk33: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, _p('p2pk33', acc))
self.p2pk65: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, _p('p2pk65', acc))
self.p2pkh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, _p('p2pkh', acc))
self.p2sh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, _p('p2sh', acc))
self.p2tr: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, _p('p2tr', acc))
self.p2wpkh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, _p('p2wpkh', acc))
self.p2wsh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, _p('p2wsh', acc))
class AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3:
"""Pattern struct for repeated tree structure."""
@@ -2713,14 +2751,6 @@ class BpsCentsPercentilesRatioSatsSmaStdUsdPattern:
"""Pattern struct for repeated tree structure."""
pass
class P2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern2:
"""Pattern struct for repeated tree structure."""
pass
class P2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3:
"""Pattern struct for repeated tree structure."""
pass
class Pct0Pct1Pct2Pct5Pct95Pct98Pct99Pattern:
"""Pattern struct for repeated tree structure."""
@@ -2753,10 +2783,10 @@ class _1m1w1y24hBpsPercentRatioPattern:
def __init__(self, client: BrkClientBase, acc: str):
"""Create pattern node with accumulated series name."""
self._1m: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, '1m'))
self._1w: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, '1w'))
self._1y: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, '1y'))
self._24h: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, '24h'))
self._1m: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, _m(acc, '1m'))
self._1w: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, _m(acc, '1w'))
self._1y: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, _m(acc, '1y'))
self._24h: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, _m(acc, '24h'))
self.bps: SeriesPattern1[BasisPoints16] = SeriesPattern1(client, _m(acc, 'bps'))
self.percent: SeriesPattern1[StoredF32] = SeriesPattern1(client, acc)
self.ratio: SeriesPattern1[StoredF32] = SeriesPattern1(client, _m(acc, 'ratio'))
@@ -2785,7 +2815,7 @@ class InMaxMinPerSupplyPattern:
self.min: CentsSatsUsdPattern = CentsSatsUsdPattern(client, _m(acc, 'cost_basis_min'))
self.per_coin: Pct05Pct10Pct15Pct20Pct25Pct30Pct35Pct40Pct45Pct50Pct55Pct60Pct65Pct70Pct75Pct80Pct85Pct90Pct95Pattern = Pct05Pct10Pct15Pct20Pct25Pct30Pct35Pct40Pct45Pct50Pct55Pct60Pct65Pct70Pct75Pct80Pct85Pct90Pct95Pattern(client, _m(acc, 'cost_basis_per_coin'))
self.per_dollar: Pct05Pct10Pct15Pct20Pct25Pct30Pct35Pct40Pct45Pct50Pct55Pct60Pct65Pct70Pct75Pct80Pct85Pct90Pct95Pattern = Pct05Pct10Pct15Pct20Pct25Pct30Pct35Pct40Pct45Pct50Pct55Pct60Pct65Pct70Pct75Pct80Pct85Pct90Pct95Pattern(client, _m(acc, 'cost_basis_per_dollar'))
self.supply_density: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, 'supply_density'))
self.supply_density: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, _m(acc, 'supply_density'))
class MaxMedianMinPct10Pct25Pct75Pct90Pattern2:
"""Pattern struct for repeated tree structure."""
@@ -2843,8 +2873,8 @@ class AverageBlockCumulativeInSumPattern:
def __init__(self, client: BrkClientBase, acc: str):
"""Create pattern node with accumulated series name."""
self.average: _1m1w1y24hPattern3 = _1m1w1y24hPattern3(client, _m(acc, 'average'))
self.block: BtcCentsSatsUsdPattern2 = BtcCentsSatsUsdPattern2(client, acc)
self.cumulative: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, 'cumulative'))
self.block: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, acc)
self.cumulative: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, _m(acc, 'cumulative'))
self.in_loss: AverageBlockCumulativeSumPattern3 = AverageBlockCumulativeSumPattern3(client, _m(acc, 'in_loss'))
self.in_profit: AverageBlockCumulativeSumPattern3 = AverageBlockCumulativeSumPattern3(client, _m(acc, 'in_profit'))
self.sum: _1m1w1y24hPattern4 = _1m1w1y24hPattern4(client, _m(acc, 'sum'))
@@ -2869,8 +2899,8 @@ class BtcCentsSatsToUsdPattern3:
self.btc: SeriesPattern1[Bitcoin] = SeriesPattern1(client, acc)
self.cents: SeriesPattern1[Cents] = SeriesPattern1(client, _m(acc, 'cents'))
self.sats: SeriesPattern1[Sats] = SeriesPattern1(client, _m(acc, 'sats'))
self.to_circulating: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, 'to_circulating'))
self.to_own: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, 'to_own'))
self.to_circulating: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, _m(acc, 'to_circulating'))
self.to_own: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, _m(acc, 'to_own'))
self.usd: SeriesPattern1[Dollars] = SeriesPattern1(client, _m(acc, 'usd'))
class CentsNegativeToUsdPattern2:
@@ -2880,8 +2910,8 @@ class CentsNegativeToUsdPattern2:
"""Create pattern node with accumulated series name."""
self.cents: SeriesPattern1[Cents] = SeriesPattern1(client, _m(acc, 'cents'))
self.negative: SeriesPattern1[Dollars] = SeriesPattern1(client, _m(acc, 'neg'))
self.to_mcap: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, 'to_mcap'))
self.to_own_gross_pnl: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, 'to_own_gross_pnl'))
self.to_mcap: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, _m(acc, 'to_mcap'))
self.to_own_gross_pnl: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, _m(acc, 'to_own_gross_pnl'))
self.to_own_mcap: BpsPercentRatioPattern4 = BpsPercentRatioPattern4(client, _m(acc, 'to_own_mcap'))
self.usd: SeriesPattern1[Dollars] = SeriesPattern1(client, acc)
@@ -2891,11 +2921,11 @@ class DeltaHalfInToTotalPattern:
def __init__(self, client: BrkClientBase, acc: str):
"""Create pattern node with accumulated series name."""
self.delta: AbsoluteRatePattern = AbsoluteRatePattern(client, _m(acc, 'delta'))
self.half: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, 'half'))
self.half: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, _m(acc, 'half'))
self.in_loss: BtcCentsSatsToUsdPattern = BtcCentsSatsToUsdPattern(client, _m(acc, 'in_loss'))
self.in_profit: BtcCentsSatsToUsdPattern = BtcCentsSatsToUsdPattern(client, _m(acc, 'in_profit'))
self.to_circulating: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, 'to_circulating'))
self.total: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, acc)
self.to_circulating: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, _m(acc, 'to_circulating'))
self.total: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, acc)
class DeltaHalfInToTotalPattern2:
"""Pattern struct for repeated tree structure."""
@@ -2903,22 +2933,11 @@ class DeltaHalfInToTotalPattern2:
def __init__(self, client: BrkClientBase, acc: str):
"""Create pattern node with accumulated series name."""
self.delta: AbsoluteRatePattern = AbsoluteRatePattern(client, _m(acc, 'delta'))
self.half: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, 'half'))
self.half: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, _m(acc, 'half'))
self.in_loss: BtcCentsSatsToUsdPattern3 = BtcCentsSatsToUsdPattern3(client, _m(acc, 'in_loss'))
self.in_profit: BtcCentsSatsToUsdPattern3 = BtcCentsSatsToUsdPattern3(client, _m(acc, 'in_profit'))
self.to_circulating: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, 'to_circulating'))
self.total: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, acc)
class _1m1w1y24hCumulativePattern:
"""Pattern struct for repeated tree structure."""
def __init__(self, client: BrkClientBase, acc: str):
"""Create pattern node with accumulated series name."""
self._1m: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, 'sum_1m'))
self._1w: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, 'sum_1w'))
self._1y: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, 'sum_1y'))
self._24h: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, 'sum_24h'))
self.cumulative: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, 'cumulative'))
self.to_circulating: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, _m(acc, 'to_circulating'))
self.total: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, acc)
class _1m1w1y24hBlockPattern:
"""Pattern struct for repeated tree structure."""
@@ -3005,7 +3024,7 @@ class BtcCentsSatsToUsdPattern:
self.btc: SeriesPattern1[Bitcoin] = SeriesPattern1(client, acc)
self.cents: SeriesPattern1[Cents] = SeriesPattern1(client, _m(acc, 'cents'))
self.sats: SeriesPattern1[Sats] = SeriesPattern1(client, _m(acc, 'sats'))
self.to_circulating: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, 'to_circulating'))
self.to_circulating: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, _m(acc, 'to_circulating'))
self.usd: SeriesPattern1[Dollars] = SeriesPattern1(client, _m(acc, 'usd'))
class BtcCentsSatsToUsdPattern2:
@@ -3016,7 +3035,7 @@ class BtcCentsSatsToUsdPattern2:
self.btc: SeriesPattern1[Bitcoin] = SeriesPattern1(client, acc)
self.cents: SeriesPattern1[Cents] = SeriesPattern1(client, _m(acc, 'cents'))
self.sats: SeriesPattern1[Sats] = SeriesPattern1(client, _m(acc, 'sats'))
self.to_own: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, 'to_own'))
self.to_own: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, _m(acc, 'to_own'))
self.usd: SeriesPattern1[Dollars] = SeriesPattern1(client, _m(acc, 'usd'))
class CapLossMvrvPriceProfitPattern:
@@ -3036,9 +3055,9 @@ class CentsToUsdPattern4:
def __init__(self, client: BrkClientBase, acc: str):
"""Create pattern node with accumulated series name."""
self.cents: SeriesPattern1[Cents] = SeriesPattern1(client, _m(acc, 'cents'))
self.to_mcap: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, 'to_mcap'))
self.to_own_gross_pnl: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, 'to_own_gross_pnl'))
self.to_own_mcap: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, 'to_own_mcap'))
self.to_mcap: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, _m(acc, 'to_mcap'))
self.to_own_gross_pnl: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, _m(acc, 'to_own_gross_pnl'))
self.to_own_mcap: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, _m(acc, 'to_own_mcap'))
self.usd: SeriesPattern1[Dollars] = SeriesPattern1(client, acc)
class DeltaHalfInTotalPattern2:
@@ -3047,10 +3066,10 @@ class DeltaHalfInTotalPattern2:
def __init__(self, client: BrkClientBase, acc: str):
"""Create pattern node with accumulated series name."""
self.delta: AbsoluteRatePattern = AbsoluteRatePattern(client, _m(acc, 'delta'))
self.half: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, 'half'))
self.in_loss: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, 'in_loss'))
self.in_profit: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, 'in_profit'))
self.total: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, acc)
self.half: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, _m(acc, 'half'))
self.in_loss: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, _m(acc, 'in_loss'))
self.in_profit: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, _m(acc, 'in_profit'))
self.total: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, acc)
class EmaHistogramLineSignalPattern:
"""Pattern struct for repeated tree structure."""
@@ -3087,7 +3106,7 @@ class _1m1w1y24hPattern7:
self._1y: BpsPercentRatioPattern4 = BpsPercentRatioPattern4(client, _m(acc, '1y'))
self._24h: BpsPercentRatioPattern4 = BpsPercentRatioPattern4(client, _m(acc, '24h'))
class _1m1w1y24hPattern3:
class _1m1w1y24hPattern4:
"""Pattern struct for repeated tree structure."""
def __init__(self, client: BrkClientBase, acc: str):
@@ -3097,15 +3116,15 @@ class _1m1w1y24hPattern3:
self._1y: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, _m(acc, '1y'))
self._24h: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, _m(acc, '24h'))
class _1m1w1y24hPattern4:
class _1m1w1y24hPattern3:
"""Pattern struct for repeated tree structure."""
def __init__(self, client: BrkClientBase, acc: str):
"""Create pattern node with accumulated series name."""
self._1m: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, '1m'))
self._1w: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, '1w'))
self._1y: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, '1y'))
self._24h: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, '24h'))
self._1m: BtcCentsSatsUsdPattern2 = BtcCentsSatsUsdPattern2(client, _m(acc, '1m'))
self._1w: BtcCentsSatsUsdPattern2 = BtcCentsSatsUsdPattern2(client, _m(acc, '1w'))
self._1y: BtcCentsSatsUsdPattern2 = BtcCentsSatsUsdPattern2(client, _m(acc, '1y'))
self._24h: BtcCentsSatsUsdPattern2 = BtcCentsSatsUsdPattern2(client, _m(acc, '24h'))
class _1m1w1y2wPattern:
"""Pattern struct for repeated tree structure."""
@@ -3157,8 +3176,8 @@ class AverageBlockCumulativeSumPattern3:
def __init__(self, client: BrkClientBase, acc: str):
"""Create pattern node with accumulated series name."""
self.average: _1m1w1y24hPattern3 = _1m1w1y24hPattern3(client, _m(acc, 'average'))
self.block: BtcCentsSatsUsdPattern2 = BtcCentsSatsUsdPattern2(client, acc)
self.cumulative: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, 'cumulative'))
self.block: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, acc)
self.cumulative: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, _m(acc, 'cumulative'))
self.sum: _1m1w1y24hPattern4 = _1m1w1y24hPattern4(client, _m(acc, 'sum'))
class BlockCumulativeNegativeSumPattern:
@@ -3191,7 +3210,7 @@ class BothReactivatedReceivingSendingPattern:
self.receiving: _1m1w1y24hBlockPattern = _1m1w1y24hBlockPattern(client, _m(acc, 'receiving'))
self.sending: _1m1w1y24hBlockPattern = _1m1w1y24hBlockPattern(client, _m(acc, 'sending'))
class BtcCentsSatsUsdPattern3:
class BtcCentsSatsUsdPattern:
"""Pattern struct for repeated tree structure."""
def __init__(self, client: BrkClientBase, acc: str):
@@ -3201,7 +3220,7 @@ class BtcCentsSatsUsdPattern3:
self.sats: SeriesPattern1[Sats] = SeriesPattern1(client, _m(acc, 'sats'))
self.usd: SeriesPattern1[Dollars] = SeriesPattern1(client, _m(acc, 'usd'))
class BtcCentsSatsUsdPattern:
class BtcCentsSatsUsdPattern2:
"""Pattern struct for repeated tree structure."""
def __init__(self, client: BrkClientBase, acc: str):
@@ -3211,7 +3230,7 @@ class BtcCentsSatsUsdPattern:
self.sats: SeriesPattern1[StoredF32] = SeriesPattern1(client, _m(acc, 'sats'))
self.usd: SeriesPattern1[Dollars] = SeriesPattern1(client, _m(acc, 'usd'))
class BtcCentsSatsUsdPattern2:
class BtcCentsSatsUsdPattern3:
"""Pattern struct for repeated tree structure."""
def __init__(self, client: BrkClientBase, acc: str):
@@ -3318,7 +3337,7 @@ class BlocksDominanceRewardsPattern:
self.dominance: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, _m(acc, 'dominance'))
self.rewards: AverageBlockCumulativeSumPattern3 = AverageBlockCumulativeSumPattern3(client, _m(acc, 'rewards'))
class BpsPercentRatioPattern3:
class BpsPercentRatioPattern2:
"""Pattern struct for repeated tree structure."""
def __init__(self, client: BrkClientBase, acc: str):
@@ -3440,9 +3459,9 @@ class RsiStochPattern:
def __init__(self, client: BrkClientBase, acc: str, disc: str):
"""Create pattern node with accumulated series name."""
self.rsi: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, disc))
self.stoch_rsi_d: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, f'stoch_d_{disc}'))
self.stoch_rsi_k: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, f'stoch_k_{disc}'))
self.rsi: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, _m(acc, disc))
self.stoch_rsi_d: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, _m(acc, f'stoch_d_{disc}'))
self.stoch_rsi_k: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, _m(acc, f'stoch_k_{disc}'))
class SpendingSpentUnspentPattern:
"""Pattern struct for repeated tree structure."""
@@ -3484,7 +3503,7 @@ class AllSthPattern2:
def __init__(self, client: BrkClientBase, acc: str):
"""Create pattern node with accumulated series name."""
self.all: BtcCentsDeltaSatsUsdPattern = BtcCentsDeltaSatsUsdPattern(client, _m(acc, 'supply'))
self.sth: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, 'sth_supply'))
self.sth: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, _m(acc, 'sth_supply'))
class AllSthPattern:
"""Pattern struct for repeated tree structure."""
@@ -3515,8 +3534,8 @@ class BlockCumulativePattern:
def __init__(self, client: BrkClientBase, acc: str):
"""Create pattern node with accumulated series name."""
self.block: BtcCentsSatsUsdPattern2 = BtcCentsSatsUsdPattern2(client, acc)
self.cumulative: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, _m(acc, 'cumulative'))
self.block: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, acc)
self.cumulative: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, _m(acc, 'cumulative'))
class BlocksDominancePattern:
"""Pattern struct for repeated tree structure."""
@@ -3524,7 +3543,7 @@ class BlocksDominancePattern:
def __init__(self, client: BrkClientBase, acc: str):
"""Create pattern node with accumulated series name."""
self.blocks_mined: AverageBlockCumulativeSumPattern2 = AverageBlockCumulativeSumPattern2(client, _m(acc, 'blocks_mined'))
self.dominance: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, 'dominance'))
self.dominance: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, _m(acc, 'dominance'))
class BpsRatioPattern2:
"""Pattern struct for repeated tree structure."""
@@ -3542,10 +3561,6 @@ class BpsRatioPattern:
self.bps: SeriesPattern1[BasisPointsSigned32] = SeriesPattern1(client, _m(acc, 'bps'))
self.ratio: SeriesPattern1[StoredF32] = SeriesPattern1(client, acc)
class ByPercentPattern:
"""Pattern struct for repeated tree structure."""
pass
class CentsUsdPattern3:
"""Pattern struct for repeated tree structure."""
@@ -3592,7 +3607,15 @@ class DeltaTotalPattern:
def __init__(self, client: BrkClientBase, acc: str):
"""Create pattern node with accumulated series name."""
self.delta: AbsoluteRatePattern = AbsoluteRatePattern(client, _m(acc, 'delta'))
self.total: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, acc)
self.total: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, acc)
class FundedTotalPattern:
"""Pattern struct for repeated tree structure."""
def __init__(self, client: BrkClientBase, acc: str):
"""Create pattern node with accumulated series name."""
self.funded: AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3 = AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3(client, acc)
self.total: AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3 = AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3(client, _p('total', acc))
class InPattern:
"""Pattern struct for repeated tree structure."""
@@ -3861,74 +3884,6 @@ class SeriesTree_Transactions_Volume:
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.transfer_volume: AverageBlockCumulativeSumPattern3 = AverageBlockCumulativeSumPattern3(client, 'transfer_volume_bis')
self.tx_per_sec: _1m1w1y24hPattern[StoredF32] = _1m1w1y24hPattern(client, 'tx_per_sec')
self.outputs_per_sec: _1m1w1y24hPattern[StoredF32] = _1m1w1y24hPattern(client, 'outputs_per_sec')
self.inputs_per_sec: _1m1w1y24hPattern[StoredF32] = _1m1w1y24hPattern(client, 'inputs_per_sec')
class SeriesTree_Transactions_InputTypes_ByType:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.p2pk65: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2pk65_in')
self.p2pk33: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2pk33_in')
self.p2pkh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2pkh_in')
self.p2sh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2sh_in')
self.p2wpkh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2wpkh_in')
self.p2wsh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2wsh_in')
self.p2tr: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2tr_in')
self.p2a: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2a_in')
class SeriesTree_Transactions_InputTypes_Percent:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.p2pk65: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2pk65_in_rel_to_all')
self.p2pk33: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2pk33_in_rel_to_all')
self.p2pkh: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2pkh_in_rel_to_all')
self.p2sh: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2sh_in_rel_to_all')
self.p2wpkh: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2wpkh_in_rel_to_all')
self.p2wsh: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2wsh_in_rel_to_all')
self.p2tr: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2tr_in_rel_to_all')
self.p2a: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2a_in_rel_to_all')
class SeriesTree_Transactions_InputTypes:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.by_type: SeriesTree_Transactions_InputTypes_ByType = SeriesTree_Transactions_InputTypes_ByType(client)
self.percent: SeriesTree_Transactions_InputTypes_Percent = SeriesTree_Transactions_InputTypes_Percent(client)
class SeriesTree_Transactions_OutputTypes_ByType:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.p2pk65: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2pk65_out')
self.p2pk33: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2pk33_out')
self.p2pkh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2pkh_out')
self.p2sh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2sh_out')
self.p2wpkh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2wpkh_out')
self.p2wsh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2wsh_out')
self.p2tr: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2tr_out')
self.p2a: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2a_out')
class SeriesTree_Transactions_OutputTypes_Percent:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.p2pk65: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2pk65_out_rel_to_all')
self.p2pk33: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2pk33_out_rel_to_all')
self.p2pkh: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2pkh_out_rel_to_all')
self.p2sh: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2sh_out_rel_to_all')
self.p2wpkh: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2wpkh_out_rel_to_all')
self.p2wsh: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2wsh_out_rel_to_all')
self.p2tr: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2tr_out_rel_to_all')
self.p2a: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2a_out_rel_to_all')
class SeriesTree_Transactions_OutputTypes:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.by_type: SeriesTree_Transactions_OutputTypes_ByType = SeriesTree_Transactions_OutputTypes_ByType(client)
self.percent: SeriesTree_Transactions_OutputTypes_Percent = SeriesTree_Transactions_OutputTypes_Percent(client)
class SeriesTree_Transactions:
"""Series tree node."""
@@ -3940,8 +3895,6 @@ class SeriesTree_Transactions:
self.fees: SeriesTree_Transactions_Fees = SeriesTree_Transactions_Fees(client)
self.versions: SeriesTree_Transactions_Versions = SeriesTree_Transactions_Versions(client)
self.volume: SeriesTree_Transactions_Volume = SeriesTree_Transactions_Volume(client)
self.input_types: SeriesTree_Transactions_InputTypes = SeriesTree_Transactions_InputTypes(client)
self.output_types: SeriesTree_Transactions_OutputTypes = SeriesTree_Transactions_OutputTypes(client)
class SeriesTree_Inputs_Raw:
"""Series tree node."""
@@ -3960,6 +3913,64 @@ class SeriesTree_Inputs_Spent:
self.txout_index: SeriesPattern20[TxOutIndex] = SeriesPattern20(client, 'txout_index')
self.value: SeriesPattern20[Sats] = SeriesPattern20(client, 'value')
class SeriesTree_Inputs_ByType_InputCount:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.all: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'input_count_bis')
self.p2pk65: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2pk65_prevout_count')
self.p2pk33: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2pk33_prevout_count')
self.p2pkh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2pkh_prevout_count')
self.p2ms: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2ms_prevout_count')
self.p2sh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2sh_prevout_count')
self.p2wpkh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2wpkh_prevout_count')
self.p2wsh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2wsh_prevout_count')
self.p2tr: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2tr_prevout_count')
self.p2a: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2a_prevout_count')
self.unknown: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'unknown_outputs_prevout_count')
self.empty: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'empty_outputs_prevout_count')
class SeriesTree_Inputs_ByType_TxCount:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.all: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'non_coinbase_tx_count')
self.p2pk65: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2pk65_prevout')
self.p2pk33: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2pk33_prevout')
self.p2pkh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2pkh_prevout')
self.p2ms: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2ms_prevout')
self.p2sh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2sh_prevout')
self.p2wpkh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2wpkh_prevout')
self.p2wsh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2wsh_prevout')
self.p2tr: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2tr_prevout')
self.p2a: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2a_prevout')
self.unknown: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_unknown_outputs_prevout')
self.empty: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_empty_outputs_prevout')
class SeriesTree_Inputs_ByType_TxPercent:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.p2pk65: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'tx_percent_with_p2pk65_prevout')
self.p2pk33: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'tx_percent_with_p2pk33_prevout')
self.p2pkh: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'tx_percent_with_p2pkh_prevout')
self.p2ms: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'tx_percent_with_p2ms_prevout')
self.p2sh: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'tx_percent_with_p2sh_prevout')
self.p2wpkh: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'tx_percent_with_p2wpkh_prevout')
self.p2wsh: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'tx_percent_with_p2wsh_prevout')
self.p2tr: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'tx_percent_with_p2tr_prevout')
self.p2a: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'tx_percent_with_p2a_prevout')
self.unknown: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'tx_percent_with_unknown_outputs_prevout')
self.empty: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'tx_percent_with_empty_outputs_prevout')
class SeriesTree_Inputs_ByType:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.input_count: SeriesTree_Inputs_ByType_InputCount = SeriesTree_Inputs_ByType_InputCount(client)
self.tx_count: SeriesTree_Inputs_ByType_TxCount = SeriesTree_Inputs_ByType_TxCount(client)
self.tx_percent: SeriesTree_Inputs_ByType_TxPercent = SeriesTree_Inputs_ByType_TxPercent(client)
class SeriesTree_Inputs:
"""Series tree node."""
@@ -3967,6 +3978,8 @@ class SeriesTree_Inputs:
self.raw: SeriesTree_Inputs_Raw = SeriesTree_Inputs_Raw(client)
self.spent: SeriesTree_Inputs_Spent = SeriesTree_Inputs_Spent(client)
self.count: CumulativeRollingSumPattern = CumulativeRollingSumPattern(client, 'input_count')
self.per_sec: _1m1w1y24hPattern[StoredF32] = _1m1w1y24hPattern(client, 'inputs_per_sec')
self.by_type: SeriesTree_Inputs_ByType = SeriesTree_Inputs_ByType(client)
class SeriesTree_Outputs_Raw:
"""Series tree node."""
@@ -3989,7 +4002,61 @@ class SeriesTree_Outputs_Count:
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.total: CumulativeRollingSumPattern = CumulativeRollingSumPattern(client, 'output_count')
self.unspent: SeriesPattern1[StoredU64] = SeriesPattern1(client, 'utxo_count_bis')
class SeriesTree_Outputs_Unspent:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.count: SeriesPattern1[StoredU64] = SeriesPattern1(client, 'utxo_count_bis')
class SeriesTree_Outputs_ByType_OutputCount:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.all: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'output_count_bis')
self.p2pk65: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2pk65_output_count')
self.p2pk33: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2pk33_output_count')
self.p2pkh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2pkh_output_count')
self.p2ms: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2ms_output_count')
self.p2sh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2sh_output_count')
self.p2wpkh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2wpkh_output_count')
self.p2wsh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2wsh_output_count')
self.p2tr: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2tr_output_count')
self.p2a: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2a_output_count')
self.unknown: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'unknown_outputs_output_count')
self.empty: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'empty_outputs_output_count')
self.op_return: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'op_return_output_count')
class SeriesTree_Outputs_ByType_TxPercent:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.p2pk65: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'tx_percent_with_p2pk65_output')
self.p2pk33: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'tx_percent_with_p2pk33_output')
self.p2pkh: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'tx_percent_with_p2pkh_output')
self.p2ms: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'tx_percent_with_p2ms_output')
self.p2sh: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'tx_percent_with_p2sh_output')
self.p2wpkh: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'tx_percent_with_p2wpkh_output')
self.p2wsh: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'tx_percent_with_p2wsh_output')
self.p2tr: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'tx_percent_with_p2tr_output')
self.p2a: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'tx_percent_with_p2a_output')
self.unknown: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'tx_percent_with_unknown_outputs_output')
self.empty: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'tx_percent_with_empty_outputs_output')
self.op_return: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'tx_percent_with_op_return_output')
class SeriesTree_Outputs_ByType:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.output_count: SeriesTree_Outputs_ByType_OutputCount = SeriesTree_Outputs_ByType_OutputCount(client)
self.tx_count: AllEmptyOpP2aP2msP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshUnknownPattern = AllEmptyOpP2aP2msP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshUnknownPattern(client, 'tx_count')
self.tx_percent: SeriesTree_Outputs_ByType_TxPercent = SeriesTree_Outputs_ByType_TxPercent(client)
class SeriesTree_Outputs_Value:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.op_return: BlockCumulativePattern = BlockCumulativePattern(client, 'op_return_value')
class SeriesTree_Outputs:
"""Series tree node."""
@@ -3998,6 +4065,10 @@ class SeriesTree_Outputs:
self.raw: SeriesTree_Outputs_Raw = SeriesTree_Outputs_Raw(client)
self.spent: SeriesTree_Outputs_Spent = SeriesTree_Outputs_Spent(client)
self.count: SeriesTree_Outputs_Count = SeriesTree_Outputs_Count(client)
self.per_sec: _1m1w1y24hPattern[StoredF32] = _1m1w1y24hPattern(client, 'outputs_per_sec')
self.unspent: SeriesTree_Outputs_Unspent = SeriesTree_Outputs_Unspent(client)
self.by_type: SeriesTree_Outputs_ByType = SeriesTree_Outputs_ByType(client)
self.value: SeriesTree_Outputs_Value = SeriesTree_Outputs_Value(client)
class SeriesTree_Addrs_Raw_P2pk65:
"""Series tree node."""
@@ -4104,26 +4175,54 @@ class SeriesTree_Addrs_Activity:
self.p2tr: BothReactivatedReceivingSendingPattern = BothReactivatedReceivingSendingPattern(client, 'p2tr_addr_activity')
self.p2a: BothReactivatedReceivingSendingPattern = BothReactivatedReceivingSendingPattern(client, 'p2a_addr_activity')
class SeriesTree_Addrs_New:
class SeriesTree_Addrs_Reused_Uses_ReusedAddrUsePercent:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.all: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'new_addr_count')
self.p2pk65: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2pk65_new_addr_count')
self.p2pk33: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2pk33_new_addr_count')
self.p2pkh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2pkh_new_addr_count')
self.p2sh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2sh_new_addr_count')
self.p2wpkh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2wpkh_new_addr_count')
self.p2wsh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2wsh_new_addr_count')
self.p2tr: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2tr_new_addr_count')
self.p2a: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2a_new_addr_count')
self.all: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'reused_addr_use_percent')
self.p2pk65: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'p2pk65_reused_addr_use_percent')
self.p2pk33: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'p2pk33_reused_addr_use_percent')
self.p2pkh: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'p2pkh_reused_addr_use_percent')
self.p2sh: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'p2sh_reused_addr_use_percent')
self.p2wpkh: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'p2wpkh_reused_addr_use_percent')
self.p2wsh: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'p2wsh_reused_addr_use_percent')
self.p2tr: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'p2tr_reused_addr_use_percent')
self.p2a: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'p2a_reused_addr_use_percent')
class SeriesTree_Addrs_Reused_Uses:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.reused_addr_use_count: AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern5 = AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern5(client, 'reused_addr_use_count')
self.reused_addr_use_percent: SeriesTree_Addrs_Reused_Uses_ReusedAddrUsePercent = SeriesTree_Addrs_Reused_Uses_ReusedAddrUsePercent(client)
class SeriesTree_Addrs_Reused:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.funded: AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3 = AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3(client, 'reused_addr_count')
self.total: AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3 = AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3(client, 'total_reused_addr_count')
self.count: FundedTotalPattern = FundedTotalPattern(client, 'reused_addr_count')
self.uses: SeriesTree_Addrs_Reused_Uses = SeriesTree_Addrs_Reused_Uses(client)
class SeriesTree_Addrs_Exposed_Supply:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.all: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'exposed_addr_supply')
self.p2pk65: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'p2pk65_exposed_addr_supply')
self.p2pk33: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'p2pk33_exposed_addr_supply')
self.p2pkh: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'p2pkh_exposed_addr_supply')
self.p2sh: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'p2sh_exposed_addr_supply')
self.p2wpkh: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'p2wpkh_exposed_addr_supply')
self.p2wsh: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'p2wsh_exposed_addr_supply')
self.p2tr: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'p2tr_exposed_addr_supply')
self.p2a: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'p2a_exposed_addr_supply')
class SeriesTree_Addrs_Exposed:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.count: FundedTotalPattern = FundedTotalPattern(client, 'exposed_addr_count')
self.supply: SeriesTree_Addrs_Exposed_Supply = SeriesTree_Addrs_Exposed_Supply(client)
class SeriesTree_Addrs_Delta:
"""Series tree node."""
@@ -4150,8 +4249,9 @@ class SeriesTree_Addrs:
self.empty: AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3 = AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3(client, 'empty_addr_count')
self.activity: SeriesTree_Addrs_Activity = SeriesTree_Addrs_Activity(client)
self.total: AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3 = AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3(client, 'total_addr_count')
self.new: SeriesTree_Addrs_New = SeriesTree_Addrs_New(client)
self.new: AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern5 = AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern5(client, 'new_addr_count')
self.reused: SeriesTree_Addrs_Reused = SeriesTree_Addrs_Reused(client)
self.exposed: SeriesTree_Addrs_Exposed = SeriesTree_Addrs_Exposed(client)
self.delta: SeriesTree_Addrs_Delta = SeriesTree_Addrs_Delta(client)
class SeriesTree_Scripts_Raw_Empty:
@@ -4191,43 +4291,18 @@ class SeriesTree_Scripts_Raw:
self.p2ms: SeriesTree_Scripts_Raw_P2ms = SeriesTree_Scripts_Raw_P2ms(client)
self.unknown: SeriesTree_Scripts_Raw_Unknown = SeriesTree_Scripts_Raw_Unknown(client)
class SeriesTree_Scripts_Count:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.p2a: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2a_count')
self.p2ms: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2ms_count')
self.p2pk33: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2pk33_count')
self.p2pk65: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2pk65_count')
self.p2pkh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2pkh_count')
self.p2sh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2sh_count')
self.p2tr: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2tr_count')
self.p2wpkh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2wpkh_count')
self.p2wsh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2wsh_count')
self.op_return: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'op_return_count')
self.empty_output: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'empty_output_count')
self.unknown_output: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'unknown_output_count')
class SeriesTree_Scripts_Value:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.op_return: BlockCumulativePattern = BlockCumulativePattern(client, 'op_return_value')
class SeriesTree_Scripts:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.raw: SeriesTree_Scripts_Raw = SeriesTree_Scripts_Raw(client)
self.count: SeriesTree_Scripts_Count = SeriesTree_Scripts_Count(client)
self.value: SeriesTree_Scripts_Value = SeriesTree_Scripts_Value(client)
class SeriesTree_Mining_Rewards_Subsidy:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.block: BtcCentsSatsUsdPattern2 = BtcCentsSatsUsdPattern2(client, 'subsidy')
self.cumulative: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, 'subsidy_cumulative')
self.block: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, 'subsidy')
self.cumulative: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'subsidy_cumulative')
self.sum: _1m1w1y24hPattern4 = _1m1w1y24hPattern4(client, 'subsidy_sum')
self.average: _1m1w1y24hPattern3 = _1m1w1y24hPattern3(client, 'subsidy_average')
self.dominance: _1m1w1y24hBpsPercentRatioPattern = _1m1w1y24hBpsPercentRatioPattern(client, 'subsidy_dominance')
@@ -4245,8 +4320,8 @@ class SeriesTree_Mining_Rewards_Fees:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.block: BtcCentsSatsUsdPattern2 = BtcCentsSatsUsdPattern2(client, 'fees')
self.cumulative: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, 'fees_cumulative')
self.block: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, 'fees')
self.cumulative: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'fees_cumulative')
self.sum: _1m1w1y24hPattern4 = _1m1w1y24hPattern4(client, 'fees_sum')
self.average: _1m1w1y24hPattern3 = _1m1w1y24hPattern3(client, 'fees_average')
self.min: _1m1w1y24hPattern4 = _1m1w1y24hPattern4(client, 'fees_min')
@@ -4317,8 +4392,8 @@ class SeriesTree_Cointime_Supply:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.vaulted: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, 'vaulted_supply')
self.active: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, 'active_supply')
self.vaulted: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'vaulted_supply')
self.active: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'active_supply')
class SeriesTree_Cointime_Value:
"""Series tree node."""
@@ -4697,7 +4772,7 @@ class SeriesTree_Indicators:
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.puell_multiple: BpsRatioPattern2 = BpsRatioPattern2(client, 'puell_multiple')
self.nvt: BpsRatioPattern2 = BpsRatioPattern2(client, 'nvt')
self.gini: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, 'gini')
self.gini: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, 'gini')
self.rhodl_ratio: BpsRatioPattern2 = BpsRatioPattern2(client, 'rhodl_ratio')
self.thermo_cap_multiple: BpsRatioPattern2 = BpsRatioPattern2(client, 'thermo_cap_multiple')
self.coindays_destroyed_supply_adjusted: SeriesPattern1[StoredF32] = SeriesPattern1(client, 'coindays_destroyed_supply_adjusted')
@@ -4739,18 +4814,18 @@ class SeriesTree_Investing_Class_DcaStack:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.from_2015: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, 'dca_stack_from_2015')
self.from_2016: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, 'dca_stack_from_2016')
self.from_2017: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, 'dca_stack_from_2017')
self.from_2018: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, 'dca_stack_from_2018')
self.from_2019: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, 'dca_stack_from_2019')
self.from_2020: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, 'dca_stack_from_2020')
self.from_2021: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, 'dca_stack_from_2021')
self.from_2022: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, 'dca_stack_from_2022')
self.from_2023: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, 'dca_stack_from_2023')
self.from_2024: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, 'dca_stack_from_2024')
self.from_2025: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, 'dca_stack_from_2025')
self.from_2026: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, 'dca_stack_from_2026')
self.from_2015: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'dca_stack_from_2015')
self.from_2016: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'dca_stack_from_2016')
self.from_2017: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'dca_stack_from_2017')
self.from_2018: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'dca_stack_from_2018')
self.from_2019: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'dca_stack_from_2019')
self.from_2020: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'dca_stack_from_2020')
self.from_2021: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'dca_stack_from_2021')
self.from_2022: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'dca_stack_from_2022')
self.from_2023: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'dca_stack_from_2023')
self.from_2024: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'dca_stack_from_2024')
self.from_2025: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'dca_stack_from_2025')
self.from_2026: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'dca_stack_from_2026')
class SeriesTree_Investing_Class_DcaCostBasis:
"""Series tree node."""
@@ -4902,7 +4977,7 @@ class SeriesTree_Market_Range:
self.max: _1m1w1y2wPattern = _1m1w1y2wPattern(client, 'price_max')
self.true_range: SeriesPattern1[StoredF32] = SeriesPattern1(client, 'price_true_range')
self.true_range_sum_2w: SeriesPattern1[StoredF32] = SeriesPattern1(client, 'price_true_range_sum_2w')
self.choppiness_index_2w: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, 'price_choppiness_index_2w')
self.choppiness_index_2w: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, 'price_choppiness_index_2w')
class SeriesTree_Market_MovingAverage_Sma_200d:
"""Series tree node."""
@@ -5269,21 +5344,21 @@ class SeriesTree_Supply:
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.state: SeriesPattern18[SupplyState] = SeriesPattern18(client, 'supply_state')
self.circulating: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, 'circulating_supply')
self.circulating: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'circulating_supply')
self.burned: BlockCumulativePattern = BlockCumulativePattern(client, 'unspendable_supply')
self.inflation_rate: BpsPercentRatioPattern = BpsPercentRatioPattern(client, 'inflation_rate')
self.velocity: SeriesTree_Supply_Velocity = SeriesTree_Supply_Velocity(client)
self.market_cap: CentsDeltaUsdPattern = CentsDeltaUsdPattern(client, 'market_cap')
self.market_minus_realized_cap_growth_rate: _1m1w1y24hPattern[BasisPointsSigned32] = _1m1w1y24hPattern(client, 'market_minus_realized_cap_growth_rate')
self.hodled_or_lost: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, 'hodled_or_lost_supply')
self.hodled_or_lost: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'hodled_or_lost_supply')
class SeriesTree_Cohorts_Utxo_All_Supply:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.total: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, 'supply')
self.total: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'supply')
self.delta: AbsoluteRatePattern = AbsoluteRatePattern(client, 'supply_delta')
self.half: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, 'supply_half')
self.half: BtcCentsSatsUsdPattern = BtcCentsSatsUsdPattern(client, 'supply_half')
self.in_profit: BtcCentsSatsToUsdPattern2 = BtcCentsSatsToUsdPattern2(client, 'supply_in_profit')
self.in_loss: BtcCentsSatsToUsdPattern2 = BtcCentsSatsToUsdPattern2(client, 'supply_in_loss')
@@ -5449,7 +5524,7 @@ class SeriesTree_Cohorts_Utxo_All_CostBasis:
self.max: CentsSatsUsdPattern = CentsSatsUsdPattern(client, 'cost_basis_max')
self.per_coin: Pct05Pct10Pct15Pct20Pct25Pct30Pct35Pct40Pct45Pct50Pct55Pct60Pct65Pct70Pct75Pct80Pct85Pct90Pct95Pattern = Pct05Pct10Pct15Pct20Pct25Pct30Pct35Pct40Pct45Pct50Pct55Pct60Pct65Pct70Pct75Pct80Pct85Pct90Pct95Pattern(client, 'cost_basis_per_coin')
self.per_dollar: Pct05Pct10Pct15Pct20Pct25Pct30Pct35Pct40Pct45Pct50Pct55Pct60Pct65Pct70Pct75Pct80Pct85Pct90Pct95Pattern = Pct05Pct10Pct15Pct20Pct25Pct30Pct35Pct40Pct45Pct50Pct55Pct60Pct65Pct70Pct75Pct80Pct85Pct90Pct95Pattern(client, 'cost_basis_per_dollar')
self.supply_density: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, 'supply_density')
self.supply_density: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, 'supply_density')
class SeriesTree_Cohorts_Utxo_All_Unrealized_Profit:
"""Series tree node."""
@@ -5457,8 +5532,8 @@ class SeriesTree_Cohorts_Utxo_All_Unrealized_Profit:
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.usd: SeriesPattern1[Dollars] = SeriesPattern1(client, 'unrealized_profit')
self.cents: SeriesPattern1[Cents] = SeriesPattern1(client, 'unrealized_profit_cents')
self.to_mcap: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, 'unrealized_profit_to_mcap')
self.to_own_gross_pnl: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, 'unrealized_profit_to_own_gross_pnl')
self.to_mcap: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, 'unrealized_profit_to_mcap')
self.to_own_gross_pnl: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, 'unrealized_profit_to_own_gross_pnl')
class SeriesTree_Cohorts_Utxo_All_Unrealized_Loss:
"""Series tree node."""
@@ -5467,8 +5542,8 @@ class SeriesTree_Cohorts_Utxo_All_Unrealized_Loss:
self.usd: SeriesPattern1[Dollars] = SeriesPattern1(client, 'unrealized_loss')
self.cents: SeriesPattern1[Cents] = SeriesPattern1(client, 'unrealized_loss_cents')
self.negative: SeriesPattern1[Dollars] = SeriesPattern1(client, 'unrealized_loss_neg')
self.to_mcap: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, 'unrealized_loss_to_mcap')
self.to_own_gross_pnl: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, 'unrealized_loss_to_own_gross_pnl')
self.to_mcap: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, 'unrealized_loss_to_mcap')
self.to_own_gross_pnl: BpsPercentRatioPattern2 = BpsPercentRatioPattern2(client, 'unrealized_loss_to_own_gross_pnl')
class SeriesTree_Cohorts_Utxo_All_Unrealized_NetPnl:
"""Series tree node."""

View File

@@ -100,10 +100,10 @@
*
*
* BPS + percent + ratio pattern
* @typedef {Brk.BpsPercentRatioPattern3} PercentRatioPattern
* @typedef {Brk.BpsPercentRatioPattern2} PercentRatioPattern
*
* Percent + ratio per window + cumulative (mirrors CountPattern but for percent)
* @typedef {Brk._1m1w1y24hCumulativePattern} PercentRatioCumulativePattern
* @typedef {Brk._1m1w1y24hBpsPercentRatioPattern} PercentRatioCumulativePattern
*
* BPS + ratio pattern (for NUPL and similar)
* @typedef {Brk.BpsRatioPattern} NuplPattern

View File

@@ -37,7 +37,6 @@ export function createNetworkSection() {
transactions,
inputs,
outputs,
scripts,
supply,
addrs,
cohorts,
@@ -67,21 +66,26 @@ export function createNetworkSection() {
defaultActive: true,
},
{
key: "emptyOutput",
key: "empty",
name: "Empty",
color: st.empty,
defaultActive: false,
},
{
key: "unknownOutput",
key: "unknown",
name: "Unknown",
color: st.unknown,
defaultActive: false,
},
]);
// All script types = addressable + non-addressable
const scriptTypes = [...addressTypes, ...nonAddressableTypes];
// All output types = addressable + non-addressable (12 total)
const outputTypes = [...addressTypes, ...nonAddressableTypes];
// Spendable input types: every output type can fund an input *except* OP_RETURN
const inputTypes = [
...addressTypes,
...nonAddressableTypes.filter((t) => t.key !== "opReturn"),
];
// Transacting types (transaction participation)
const activityTypes = /** @type {const} */ ([
@@ -114,13 +118,13 @@ export function createNetworkSection() {
name: "Funded Reused",
title: "Funded Reused Address Count by Type",
/** @param {AddressableType} t */
getSeries: (t) => addrs.reused.funded[t],
getSeries: (t) => addrs.reused.count.funded[t],
},
{
name: "Total Reused",
title: "Total Reused Address Count by Type",
/** @param {AddressableType} t */
getSeries: (t) => addrs.reused.total[t],
getSeries: (t) => addrs.reused.count.total[t],
},
]);
@@ -173,12 +177,12 @@ export function createNetworkSection() {
title: title("Reused Address Count"),
bottom: [
line({
series: addrs.reused.funded[key],
series: addrs.reused.count.funded[key],
name: "Funded",
unit: Unit.count,
}),
line({
series: addrs.reused.total[key],
series: addrs.reused.count.total[key],
name: "Total",
color: colors.gray,
unit: Unit.count,
@@ -190,7 +194,7 @@ export function createNetworkSection() {
title: title("Funded Reused Addresses"),
bottom: [
line({
series: addrs.reused.funded[key],
series: addrs.reused.count.funded[key],
name: "Funded Reused",
unit: Unit.count,
}),
@@ -201,13 +205,83 @@ export function createNetworkSection() {
title: title("Total Reused Addresses"),
bottom: [
line({
series: addrs.reused.total[key],
series: addrs.reused.count.total[key],
name: "Total Reused",
color: colors.gray,
unit: Unit.count,
}),
],
},
{
name: "Count",
tree: chartsFromCount({
pattern: addrs.reused.uses.reusedAddrUseCount[key],
title,
metric: "Reused Address Uses",
unit: Unit.count,
}),
},
{
name: "Share",
tree: chartsFromPercentCumulative({
pattern: addrs.reused.uses.reusedAddrUsePercent[key],
title,
metric: "Share of Outputs to Reused Addresses",
}),
},
],
},
{
name: "Exposed",
tree: [
{
name: "Compare",
title: title("Exposed Address Count"),
bottom: [
line({
series: addrs.exposed.count.funded[key],
name: "Funded",
unit: Unit.count,
}),
line({
series: addrs.exposed.count.total[key],
name: "Total",
color: colors.gray,
unit: Unit.count,
}),
],
},
{
name: "Funded",
title: title("Funded Exposed Addresses"),
bottom: [
line({
series: addrs.exposed.count.funded[key],
name: "Funded Exposed",
unit: Unit.count,
}),
],
},
{
name: "Total",
title: title("Total Exposed Addresses"),
bottom: [
line({
series: addrs.exposed.count.total[key],
name: "Total Exposed",
color: colors.gray,
unit: Unit.count,
}),
],
},
{
name: "Supply",
title: title("Supply in Exposed Addresses"),
bottom: satsBtcUsd({
pattern: addrs.exposed.supply[key],
name: "Supply",
}),
},
],
},
...simpleDeltaTree({
@@ -257,166 +331,154 @@ export function createNetworkSection() {
];
};
/** @type {Record<string, typeof scriptTypes[number]>} */
const byKey = Object.fromEntries(scriptTypes.map((t) => [t.key, t]));
const scriptGroups = [
{ name: "Legacy", types: [byKey.p2pkh, byKey.p2pk33, byKey.p2pk65] },
{ name: "Script Hash", types: [byKey.p2sh, byKey.p2ms] },
{ name: "SegWit", types: [byKey.p2wsh, byKey.p2wpkh] },
{ name: "Taproot", types: [byKey.p2a, byKey.p2tr] },
{
name: "Other",
types: [byKey.opReturn, byKey.emptyOutput, byKey.unknownOutput],
},
];
/**
* @param {string} direction
* @param {{ byType: Record<AddressableType, CountPattern<number>>, percent: Record<AddressableType, PercentRatioCumulativePattern> }} source
* Build a "By Type" subtree: Compare (count / tx count / tx %) plus a
* per-type drill-down with the same three metrics.
*
* @template {string} K
* @param {Object} args
* @param {string} args.label - Singular noun for count/tree labels ("Output" / "Prev-Out")
* @param {Readonly<Record<K, CountPattern<number>>>} args.count
* @param {Readonly<Record<K, CountPattern<number>>>} args.txCount
* @param {Readonly<Record<K, PercentRatioCumulativePattern>>} args.txPercent
* @param {ReadonlyArray<{key: K, name: string, color: Color, defaultActive: boolean}>} args.types
* @returns {PartialOptionsTree}
*/
const createTxTypeGroup = (direction, source) => {
const lowerDir = direction.toLowerCase();
return {
name: `By ${direction} Type`,
tree: [
{
name: "Count Compare",
tree: [
...ROLLING_WINDOWS.map((w) => ({
name: w.name,
title: `${w.title} Transactions by ${direction} Type`,
bottom: addressTypes.map((t) =>
line({
series: source.byType[t.key].sum[w.key],
name: t.name,
color: t.color,
unit: Unit.count,
defaultActive: t.defaultActive,
}),
),
})),
{
name: "Cumulative",
title: `Cumulative Transactions by ${direction} Type`,
bottom: addressTypes.map((t) =>
line({
series: source.byType[t.key].cumulative,
name: t.name,
color: t.color,
unit: Unit.count,
defaultActive: t.defaultActive,
}),
),
},
],
},
{
name: "% Compare",
tree: [
...ROLLING_WINDOWS.map((w) => ({
name: w.name,
title: `${w.title} Share of Transactions by ${direction} Type`,
bottom: addressTypes.map((t) =>
line({
series: source.percent[t.key][w.key].percent,
name: t.name,
color: t.color,
unit: Unit.percentage,
defaultActive: t.defaultActive,
}),
),
})),
{
name: "Cumulative",
title: `Cumulative Share of Transactions by ${direction} Type`,
bottom: addressTypes.map((t) =>
line({
series: source.percent[t.key].cumulative.percent,
name: t.name,
color: t.color,
unit: Unit.percentage,
defaultActive: t.defaultActive,
}),
),
},
],
},
...addressTypes.map((t) => ({
name: t.name,
tree: [
{
name: "Count",
tree: chartsFromCount({
pattern: source.byType[t.key],
metric: `Transactions with ${t.name} ${lowerDir}`,
unit: Unit.count,
color: t.color,
}),
},
{
name: "% of All",
tree: chartsFromPercentCumulative({
pattern: source.percent[t.key],
metric: `Share of Transactions with ${t.name} ${lowerDir}`,
color: t.color,
}),
},
],
})),
],
};
};
/**
* @template {keyof typeof scripts.count} K
* @param {string} groupName
* @param {ReadonlyArray<{key: K, name: string, color: Color}>} types
*/
const createScriptGroup = (groupName, types) => ({
name: groupName,
tree: [
const createByTypeTree = ({ label, count, txCount, txPercent, types }) => {
const lowerLabel = label.toLowerCase();
return [
{
name: "Compare",
tree: [
{
name: `${label} Count`,
tree: [
...ROLLING_WINDOWS.map((w) => ({
name: w.name,
title: `${w.title} ${groupName} Output Count`,
title: `${w.title} ${label} Count by Type`,
bottom: types.map((t) =>
line({
series: /** @type {CountPattern<number>} */ (
scripts.count[t.key]
).sum[w.key],
series: count[t.key].sum[w.key],
name: t.name,
color: t.color,
unit: Unit.count,
defaultActive: t.defaultActive,
}),
),
})),
{
name: "Cumulative",
title: `Cumulative ${groupName} Output Count`,
title: `Cumulative ${label} Count by Type`,
bottom: types.map((t) =>
line({
series: scripts.count[t.key].cumulative,
series: count[t.key].cumulative,
name: t.name,
color: t.color,
unit: Unit.count,
defaultActive: t.defaultActive,
}),
),
},
],
},
{
name: "TX Count",
tree: [
...ROLLING_WINDOWS.map((w) => ({
name: w.name,
title: `${w.title} Transactions by ${label} Type`,
bottom: types.map((t) =>
line({
series: txCount[t.key].sum[w.key],
name: t.name,
color: t.color,
unit: Unit.count,
defaultActive: t.defaultActive,
}),
),
})),
{
name: "Cumulative",
title: `Cumulative Transactions by ${label} Type`,
bottom: types.map((t) =>
line({
series: txCount[t.key].cumulative,
name: t.name,
color: t.color,
unit: Unit.count,
defaultActive: t.defaultActive,
}),
),
},
],
},
{
name: "TX %",
tree: [
...ROLLING_WINDOWS.map((w) => ({
name: w.name,
title: `${w.title} Share of Transactions by ${label} Type`,
bottom: types.map((t) =>
line({
series: txPercent[t.key][w.key].percent,
name: t.name,
color: t.color,
unit: Unit.percentage,
defaultActive: t.defaultActive,
}),
),
})),
{
name: "Cumulative",
title: `Cumulative Share of Transactions by ${label} Type`,
bottom: types.map((t) =>
line({
series: txPercent[t.key].percent,
name: t.name,
color: t.color,
unit: Unit.percentage,
defaultActive: t.defaultActive,
}),
),
},
],
},
],
},
...types.map((t) => ({
name: t.name,
tree: [
{
name: `${label} Count`,
tree: chartsFromCount({
pattern: /** @type {CountPattern<number>} */ (scripts.count[t.key]),
metric: `${t.name} Output Count`,
pattern: count[t.key],
metric: `${t.name} ${label} Count`,
unit: Unit.count,
color: t.color,
}),
})),
},
{
name: "TX Count",
tree: chartsFromCount({
pattern: txCount[t.key],
metric: `Transactions with ${t.name} ${lowerLabel}`,
unit: Unit.count,
color: t.color,
}),
},
{
name: "TX %",
tree: chartsFromPercentCumulative({
pattern: txPercent[t.key],
metric: `Share of Transactions with ${t.name} ${lowerLabel}`,
color: t.color,
}),
},
],
});
})),
];
};
return {
name: "Network",
@@ -463,7 +525,7 @@ export function createNetworkSection() {
name: "OP_RETURN",
title: "OP_RETURN Burned",
bottom: satsBtcUsd({
pattern: scripts.value.opReturn.cumulative,
pattern: outputs.value.opReturn.cumulative,
name: "All Time",
}),
},
@@ -529,8 +591,6 @@ export function createNetworkSection() {
unit: Unit.count,
}),
},
createTxTypeGroup("Input", transactions.inputTypes),
createTxTypeGroup("Output", transactions.outputTypes),
{
name: "Velocity",
title: "Transaction Velocity",
@@ -645,7 +705,7 @@ export function createNetworkSection() {
title: "UTXO Count",
bottom: [
line({
series: outputs.count.unspent,
series: outputs.unspent.count,
name: "Count",
unit: Unit.count,
}),
@@ -683,20 +743,66 @@ export function createNetworkSection() {
},
{
name: "Inputs",
tree: [
{
name: "Count",
tree: chartsFromAggregatedPerBlock({
pattern: inputs.count,
metric: "Input Count",
unit: Unit.count,
}),
},
{
name: "Per Second",
tree: averagesArray({
windows: inputs.perSec,
metric: "Inputs per Second",
unit: Unit.perSec,
}),
},
{
name: "By Type",
tree: createByTypeTree({
label: "Prev-Out",
count: inputs.byType.inputCount,
txCount: inputs.byType.txCount,
txPercent: inputs.byType.txPercent,
types: inputTypes,
}),
},
],
},
{
name: "Outputs",
tree: [
{
name: "Count",
tree: chartsFromAggregatedPerBlock({
pattern: outputs.count.total,
metric: "Output Count",
unit: Unit.count,
}),
},
{
name: "Per Second",
tree: averagesArray({
windows: outputs.perSec,
metric: "Outputs per Second",
unit: Unit.perSec,
}),
},
{
name: "By Type",
tree: createByTypeTree({
label: "Output",
count: outputs.byType.outputCount,
txCount: outputs.byType.txCount,
txPercent: outputs.byType.txPercent,
types: outputTypes,
}),
},
],
},
{
name: "Throughput",
tree: ROLLING_WINDOWS.map((w) => ({
@@ -710,13 +816,13 @@ export function createNetworkSection() {
unit: Unit.perSec,
}),
line({
series: transactions.volume.inputsPerSec[w.key],
series: inputs.perSec[w.key],
name: "Inputs/sec",
color: colors.entity.input,
unit: Unit.perSec,
}),
line({
series: transactions.volume.outputsPerSec[w.key],
series: outputs.perSec[w.key],
name: "Outputs/sec",
color: colors.entity.output,
unit: Unit.perSec,
@@ -758,46 +864,6 @@ export function createNetworkSection() {
],
},
// Scripts
{
name: "Scripts",
tree: [
{
name: "Compare",
tree: [
...ROLLING_WINDOWS.map((w) => ({
name: w.name,
title: `${w.title} Output Count by Script Type`,
bottom: scriptTypes.map((t) =>
line({
series: /** @type {CountPattern<number>} */ (
scripts.count[t.key]
).sum[w.key],
name: t.name,
color: t.color,
unit: Unit.count,
defaultActive: t.defaultActive,
}),
),
})),
{
name: "Cumulative",
title: "Cumulative Output Count by Script Type",
bottom: scriptTypes.map((t) =>
line({
series: scripts.count[t.key].cumulative,
name: t.name,
color: t.color,
unit: Unit.count,
defaultActive: t.defaultActive,
}),
),
},
],
},
...scriptGroups.map((g) => createScriptGroup(g.name, g.types)),
],
},
],
};
}

View File

@@ -1126,7 +1126,7 @@ export function chartsFromPercentCumulative({
}),
).concat(
percentRatio({
pattern: pattern.cumulative,
pattern,
name: "All Time",
color: colors.time.all,
}),
@@ -1145,7 +1145,7 @@ export function chartsFromPercentCumulative({
name: "Cumulative",
title: title(`Cumulative ${metric}`),
bottom: percentRatio({
pattern: pattern.cumulative,
pattern,
name: "All Time",
color: color ?? colors.time.all,
}),