global: big snapshot

This commit is contained in:
nym21
2026-04-13 22:46:56 +02:00
parent c3cef71aa3
commit 765261648d
89 changed files with 4138 additions and 149 deletions

1
.gitignore vendored
View File

@@ -39,6 +39,7 @@ flamegraph.svg
# AI
.claude/settings*
!CLAUDE.md
# Expand
expand.rs

1
CLAUDE.md Normal file
View File

@@ -0,0 +1 @@
Codex will review your output once you are done.

7
Cargo.lock generated
View File

@@ -364,8 +364,6 @@ dependencies = [
[[package]]
name = "brk-corepc-client"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2c15bc86010adb0c3118d88a531a7d2633b726fe97a6c2888ce200946d198e7b"
dependencies = [
"bitcoin",
"brk-corepc-jsonrpc",
@@ -378,8 +376,6 @@ dependencies = [
[[package]]
name = "brk-corepc-jsonrpc"
version = "0.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cbb72c73b4c3aafd6ab3d0a0ad07b2961543929452d7c5d8f11b88e7a1ca7725"
dependencies = [
"base64 0.22.1",
"serde",
@@ -389,8 +385,6 @@ dependencies = [
[[package]]
name = "brk-corepc-types"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca4662e4e22838c09a6f80d1677f6177295eddbb524515be9156a8f8412c4147"
dependencies = [
"bitcoin",
"serde",
@@ -649,6 +643,7 @@ dependencies = [
"parking_lot",
"rayon",
"rlimit",
"rustc-hash",
"tracing",
]

View File

@@ -65,8 +65,10 @@ brk_types = { version = "0.3.0-beta.1", path = "crates/brk_types" }
brk_website = { version = "0.3.0-beta.1", path = "crates/brk_website" }
byteview = "0.10.1"
color-eyre = "0.6.5"
corepc-client = { package = "brk-corepc-client", version = "0.11.0", features = ["client-sync"] }
corepc-jsonrpc = { package = "brk-corepc-jsonrpc", version = "0.19.0", features = ["simple_http"], default-features = false }
# corepc-client = { package = "brk-corepc-client", version = "0.11.0", features = ["client-sync"] }
corepc-client = { package = "brk-corepc-client", path = "../corepc/client", features = ["client-sync"] }
# corepc-jsonrpc = { package = "brk-corepc-jsonrpc", version = "0.19.0", features = ["simple_http"], default-features = false }
corepc-jsonrpc = { package = "brk-corepc-jsonrpc", path = "../corepc/jsonrpc", features = ["simple_http"], default-features = false }
derive_more = { version = "2.1.1", features = ["deref", "deref_mut"] }
fjall = "=3.0.4"
indexmap = { version = "2.14.0", features = ["serde"] }

View File

@@ -1255,6 +1255,30 @@ pub struct BpsCentsPercentilesRatioSatsSmaStdUsdPattern {
pub usd: SeriesPattern1<Dollars>,
}
/// Pattern struct for repeated tree structure.
pub struct P2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern2 {
pub p2a: AverageBlockCumulativeSumPattern<StoredU64>,
pub p2pk33: AverageBlockCumulativeSumPattern<StoredU64>,
pub p2pk65: AverageBlockCumulativeSumPattern<StoredU64>,
pub p2pkh: AverageBlockCumulativeSumPattern<StoredU64>,
pub p2sh: AverageBlockCumulativeSumPattern<StoredU64>,
pub p2tr: AverageBlockCumulativeSumPattern<StoredU64>,
pub p2wpkh: AverageBlockCumulativeSumPattern<StoredU64>,
pub p2wsh: AverageBlockCumulativeSumPattern<StoredU64>,
}
/// Pattern struct for repeated tree structure.
pub struct P2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3 {
pub p2a: _1m1w1y24hCumulativePattern,
pub p2pk33: _1m1w1y24hCumulativePattern,
pub p2pk65: _1m1w1y24hCumulativePattern,
pub p2pkh: _1m1w1y24hCumulativePattern,
pub p2sh: _1m1w1y24hCumulativePattern,
pub p2tr: _1m1w1y24hCumulativePattern,
pub p2wpkh: _1m1w1y24hCumulativePattern,
pub p2wsh: _1m1w1y24hCumulativePattern,
}
/// Pattern struct for repeated tree structure.
pub struct Pct0Pct1Pct2Pct5Pct95Pct98Pct99Pattern {
pub pct0_5: BpsPriceRatioPattern,
@@ -1631,6 +1655,28 @@ impl DeltaHalfInToTotalPattern2 {
}
}
/// Pattern struct for repeated tree structure.
pub struct _1m1w1y24hCumulativePattern {
pub _1m: BpsPercentRatioPattern3,
pub _1w: BpsPercentRatioPattern3,
pub _1y: BpsPercentRatioPattern3,
pub _24h: BpsPercentRatioPattern3,
pub cumulative: BpsPercentRatioPattern3,
}
impl _1m1w1y24hCumulativePattern {
/// Create a new pattern node with accumulated series name.
pub fn new(client: Arc<BrkClientBase>, acc: String) -> Self {
Self {
_1m: BpsPercentRatioPattern3::new(client.clone(), _m(&acc, "sum_1m")),
_1w: BpsPercentRatioPattern3::new(client.clone(), _m(&acc, "sum_1w")),
_1y: BpsPercentRatioPattern3::new(client.clone(), _m(&acc, "sum_1y")),
_24h: BpsPercentRatioPattern3::new(client.clone(), _m(&acc, "sum_24h")),
cumulative: BpsPercentRatioPattern3::new(client.clone(), _m(&acc, "cumulative")),
}
}
}
/// Pattern struct for repeated tree structure.
pub struct _1m1w1y24hBlockPattern {
pub _1m: SeriesPattern1<StoredF32>,
@@ -2876,6 +2922,12 @@ impl BpsRatioPattern {
}
}
/// Pattern struct for repeated tree structure.
pub struct ByPercentPattern {
pub by_type: P2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern2,
pub percent: P2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3,
}
/// Pattern struct for repeated tree structure.
pub struct CentsUsdPattern3 {
pub cents: SeriesPattern1<Cents>,
@@ -3446,6 +3498,8 @@ pub struct SeriesTree_Transactions {
pub fees: SeriesTree_Transactions_Fees,
pub versions: SeriesTree_Transactions_Versions,
pub volume: SeriesTree_Transactions_Volume,
pub input_types: SeriesTree_Transactions_InputTypes,
pub output_types: SeriesTree_Transactions_OutputTypes,
}
impl SeriesTree_Transactions {
@@ -3457,6 +3511,8 @@ impl SeriesTree_Transactions {
fees: SeriesTree_Transactions_Fees::new(client.clone(), format!("{base_path}_fees")),
versions: SeriesTree_Transactions_Versions::new(client.clone(), format!("{base_path}_versions")),
volume: SeriesTree_Transactions_Volume::new(client.clone(), format!("{base_path}_volume")),
input_types: SeriesTree_Transactions_InputTypes::new(client.clone(), format!("{base_path}_input_types")),
output_types: SeriesTree_Transactions_OutputTypes::new(client.clone(), format!("{base_path}_output_types")),
}
}
}
@@ -3540,7 +3596,7 @@ pub struct SeriesTree_Transactions_Fees {
pub input_value: SeriesPattern19<Sats>,
pub output_value: SeriesPattern19<Sats>,
pub fee: _6bBlockTxPattern<Sats>,
pub fee_rate: _6bBlockTxPattern<FeeRate>,
pub fee_rate: SeriesPattern19<FeeRate>,
pub effective_fee_rate: _6bBlockTxPattern<FeeRate>,
}
@@ -3550,7 +3606,7 @@ impl SeriesTree_Transactions_Fees {
input_value: SeriesPattern19::new(client.clone(), "input_value".to_string()),
output_value: SeriesPattern19::new(client.clone(), "output_value".to_string()),
fee: _6bBlockTxPattern::new(client.clone(), "fee".to_string()),
fee_rate: _6bBlockTxPattern::new(client.clone(), "fee_rate".to_string()),
fee_rate: SeriesPattern19::new(client.clone(), "fee_rate".to_string()),
effective_fee_rate: _6bBlockTxPattern::new(client.clone(), "effective_fee_rate".to_string()),
}
}
@@ -3592,6 +3648,144 @@ impl SeriesTree_Transactions_Volume {
}
}
/// Series tree node.
pub struct SeriesTree_Transactions_InputTypes {
pub by_type: SeriesTree_Transactions_InputTypes_ByType,
pub percent: SeriesTree_Transactions_InputTypes_Percent,
}
impl SeriesTree_Transactions_InputTypes {
pub fn new(client: Arc<BrkClientBase>, base_path: String) -> Self {
Self {
by_type: SeriesTree_Transactions_InputTypes_ByType::new(client.clone(), format!("{base_path}_by_type")),
percent: SeriesTree_Transactions_InputTypes_Percent::new(client.clone(), format!("{base_path}_percent")),
}
}
}
/// Series tree node.
pub struct SeriesTree_Transactions_InputTypes_ByType {
pub p2pk65: AverageBlockCumulativeSumPattern<StoredU64>,
pub p2pk33: AverageBlockCumulativeSumPattern<StoredU64>,
pub p2pkh: AverageBlockCumulativeSumPattern<StoredU64>,
pub p2sh: AverageBlockCumulativeSumPattern<StoredU64>,
pub p2wpkh: AverageBlockCumulativeSumPattern<StoredU64>,
pub p2wsh: AverageBlockCumulativeSumPattern<StoredU64>,
pub p2tr: AverageBlockCumulativeSumPattern<StoredU64>,
pub p2a: AverageBlockCumulativeSumPattern<StoredU64>,
}
impl SeriesTree_Transactions_InputTypes_ByType {
pub fn new(client: Arc<BrkClientBase>, base_path: String) -> Self {
Self {
p2pk65: AverageBlockCumulativeSumPattern::new(client.clone(), "tx_count_with_p2pk65_in".to_string()),
p2pk33: AverageBlockCumulativeSumPattern::new(client.clone(), "tx_count_with_p2pk33_in".to_string()),
p2pkh: AverageBlockCumulativeSumPattern::new(client.clone(), "tx_count_with_p2pkh_in".to_string()),
p2sh: AverageBlockCumulativeSumPattern::new(client.clone(), "tx_count_with_p2sh_in".to_string()),
p2wpkh: AverageBlockCumulativeSumPattern::new(client.clone(), "tx_count_with_p2wpkh_in".to_string()),
p2wsh: AverageBlockCumulativeSumPattern::new(client.clone(), "tx_count_with_p2wsh_in".to_string()),
p2tr: AverageBlockCumulativeSumPattern::new(client.clone(), "tx_count_with_p2tr_in".to_string()),
p2a: AverageBlockCumulativeSumPattern::new(client.clone(), "tx_count_with_p2a_in".to_string()),
}
}
}
/// Series tree node.
pub struct SeriesTree_Transactions_InputTypes_Percent {
pub p2pk65: _1m1w1y24hCumulativePattern,
pub p2pk33: _1m1w1y24hCumulativePattern,
pub p2pkh: _1m1w1y24hCumulativePattern,
pub p2sh: _1m1w1y24hCumulativePattern,
pub p2wpkh: _1m1w1y24hCumulativePattern,
pub p2wsh: _1m1w1y24hCumulativePattern,
pub p2tr: _1m1w1y24hCumulativePattern,
pub p2a: _1m1w1y24hCumulativePattern,
}
impl SeriesTree_Transactions_InputTypes_Percent {
pub fn new(client: Arc<BrkClientBase>, base_path: String) -> Self {
Self {
p2pk65: _1m1w1y24hCumulativePattern::new(client.clone(), "tx_count_with_p2pk65_in_rel_to_all".to_string()),
p2pk33: _1m1w1y24hCumulativePattern::new(client.clone(), "tx_count_with_p2pk33_in_rel_to_all".to_string()),
p2pkh: _1m1w1y24hCumulativePattern::new(client.clone(), "tx_count_with_p2pkh_in_rel_to_all".to_string()),
p2sh: _1m1w1y24hCumulativePattern::new(client.clone(), "tx_count_with_p2sh_in_rel_to_all".to_string()),
p2wpkh: _1m1w1y24hCumulativePattern::new(client.clone(), "tx_count_with_p2wpkh_in_rel_to_all".to_string()),
p2wsh: _1m1w1y24hCumulativePattern::new(client.clone(), "tx_count_with_p2wsh_in_rel_to_all".to_string()),
p2tr: _1m1w1y24hCumulativePattern::new(client.clone(), "tx_count_with_p2tr_in_rel_to_all".to_string()),
p2a: _1m1w1y24hCumulativePattern::new(client.clone(), "tx_count_with_p2a_in_rel_to_all".to_string()),
}
}
}
/// Series tree node.
pub struct SeriesTree_Transactions_OutputTypes {
pub by_type: SeriesTree_Transactions_OutputTypes_ByType,
pub percent: SeriesTree_Transactions_OutputTypes_Percent,
}
impl SeriesTree_Transactions_OutputTypes {
pub fn new(client: Arc<BrkClientBase>, base_path: String) -> Self {
Self {
by_type: SeriesTree_Transactions_OutputTypes_ByType::new(client.clone(), format!("{base_path}_by_type")),
percent: SeriesTree_Transactions_OutputTypes_Percent::new(client.clone(), format!("{base_path}_percent")),
}
}
}
/// Series tree node.
pub struct SeriesTree_Transactions_OutputTypes_ByType {
pub p2pk65: AverageBlockCumulativeSumPattern<StoredU64>,
pub p2pk33: AverageBlockCumulativeSumPattern<StoredU64>,
pub p2pkh: AverageBlockCumulativeSumPattern<StoredU64>,
pub p2sh: AverageBlockCumulativeSumPattern<StoredU64>,
pub p2wpkh: AverageBlockCumulativeSumPattern<StoredU64>,
pub p2wsh: AverageBlockCumulativeSumPattern<StoredU64>,
pub p2tr: AverageBlockCumulativeSumPattern<StoredU64>,
pub p2a: AverageBlockCumulativeSumPattern<StoredU64>,
}
impl SeriesTree_Transactions_OutputTypes_ByType {
pub fn new(client: Arc<BrkClientBase>, base_path: String) -> Self {
Self {
p2pk65: AverageBlockCumulativeSumPattern::new(client.clone(), "tx_count_with_p2pk65_out".to_string()),
p2pk33: AverageBlockCumulativeSumPattern::new(client.clone(), "tx_count_with_p2pk33_out".to_string()),
p2pkh: AverageBlockCumulativeSumPattern::new(client.clone(), "tx_count_with_p2pkh_out".to_string()),
p2sh: AverageBlockCumulativeSumPattern::new(client.clone(), "tx_count_with_p2sh_out".to_string()),
p2wpkh: AverageBlockCumulativeSumPattern::new(client.clone(), "tx_count_with_p2wpkh_out".to_string()),
p2wsh: AverageBlockCumulativeSumPattern::new(client.clone(), "tx_count_with_p2wsh_out".to_string()),
p2tr: AverageBlockCumulativeSumPattern::new(client.clone(), "tx_count_with_p2tr_out".to_string()),
p2a: AverageBlockCumulativeSumPattern::new(client.clone(), "tx_count_with_p2a_out".to_string()),
}
}
}
/// Series tree node.
pub struct SeriesTree_Transactions_OutputTypes_Percent {
pub p2pk65: _1m1w1y24hCumulativePattern,
pub p2pk33: _1m1w1y24hCumulativePattern,
pub p2pkh: _1m1w1y24hCumulativePattern,
pub p2sh: _1m1w1y24hCumulativePattern,
pub p2wpkh: _1m1w1y24hCumulativePattern,
pub p2wsh: _1m1w1y24hCumulativePattern,
pub p2tr: _1m1w1y24hCumulativePattern,
pub p2a: _1m1w1y24hCumulativePattern,
}
impl SeriesTree_Transactions_OutputTypes_Percent {
pub fn new(client: Arc<BrkClientBase>, base_path: String) -> Self {
Self {
p2pk65: _1m1w1y24hCumulativePattern::new(client.clone(), "tx_count_with_p2pk65_out_rel_to_all".to_string()),
p2pk33: _1m1w1y24hCumulativePattern::new(client.clone(), "tx_count_with_p2pk33_out_rel_to_all".to_string()),
p2pkh: _1m1w1y24hCumulativePattern::new(client.clone(), "tx_count_with_p2pkh_out_rel_to_all".to_string()),
p2sh: _1m1w1y24hCumulativePattern::new(client.clone(), "tx_count_with_p2sh_out_rel_to_all".to_string()),
p2wpkh: _1m1w1y24hCumulativePattern::new(client.clone(), "tx_count_with_p2wpkh_out_rel_to_all".to_string()),
p2wsh: _1m1w1y24hCumulativePattern::new(client.clone(), "tx_count_with_p2wsh_out_rel_to_all".to_string()),
p2tr: _1m1w1y24hCumulativePattern::new(client.clone(), "tx_count_with_p2tr_out_rel_to_all".to_string()),
p2a: _1m1w1y24hCumulativePattern::new(client.clone(), "tx_count_with_p2a_out_rel_to_all".to_string()),
}
}
}
/// Series tree node.
pub struct SeriesTree_Inputs {
pub raw: SeriesTree_Inputs_Raw,
@@ -3721,6 +3915,7 @@ pub struct SeriesTree_Addrs {
pub activity: SeriesTree_Addrs_Activity,
pub total: AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3,
pub new: SeriesTree_Addrs_New,
pub reused: SeriesTree_Addrs_Reused,
pub delta: SeriesTree_Addrs_Delta,
}
@@ -3735,6 +3930,7 @@ impl SeriesTree_Addrs {
activity: SeriesTree_Addrs_Activity::new(client.clone(), format!("{base_path}_activity")),
total: AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3::new(client.clone(), "total_addr_count".to_string()),
new: SeriesTree_Addrs_New::new(client.clone(), format!("{base_path}_new")),
reused: SeriesTree_Addrs_Reused::new(client.clone(), format!("{base_path}_reused")),
delta: SeriesTree_Addrs_Delta::new(client.clone(), format!("{base_path}_delta")),
}
}
@@ -3991,6 +4187,21 @@ impl SeriesTree_Addrs_New {
}
}
/// Series tree node.
pub struct SeriesTree_Addrs_Reused {
pub funded: AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3,
pub total: AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3,
}
impl SeriesTree_Addrs_Reused {
pub fn new(client: Arc<BrkClientBase>, base_path: String) -> Self {
Self {
funded: AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3::new(client.clone(), "reused_addr_count".to_string()),
total: AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3::new(client.clone(), "total_reused_addr_count".to_string()),
}
}
}
/// Series tree node.
pub struct SeriesTree_Addrs_Delta {
pub all: AbsoluteRatePattern,

View File

@@ -1,22 +1,18 @@
use brk_cohort::ByAddrType;
use brk_traversable::Traversable;
use brk_types::{BasisPointsSigned32, StoredI64, StoredU64, Version};
use derive_more::{Deref, DerefMut};
use crate::{
indexes,
internal::{LazyRollingDeltasFromHeight, WindowStartVec, Windows},
};
use super::AddrCountsVecs;
use super::{AddrCountsVecs, WithAddrTypes};
type AddrDelta = LazyRollingDeltasFromHeight<StoredU64, StoredI64, BasisPointsSigned32>;
#[derive(Clone, Traversable)]
pub struct DeltaVecs {
pub all: AddrDelta,
#[traversable(flatten)]
pub by_addr_type: ByAddrType<AddrDelta>,
}
#[derive(Clone, Deref, DerefMut, Traversable)]
pub struct DeltaVecs(#[traversable(flatten)] pub WithAddrTypes<AddrDelta>);
impl DeltaVecs {
pub(crate) fn new(
@@ -45,6 +41,6 @@ impl DeltaVecs {
)
});
Self { all, by_addr_type }
Self(WithAddrTypes { all, by_addr_type })
}
}

View File

@@ -0,0 +1,74 @@
//! Exposed address count tracking — running counters of how many addresses
//! are currently in (or have ever been in) the exposed set, per address type
//! plus an aggregated `all`. See the parent [`super`] module for the
//! definition of "exposed" and how it varies by address type.
mod state;
mod vecs;
pub use state::AddrTypeToExposedAddrCount;
pub use vecs::ExposedAddrCountAllVecs;
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Indexes, Version};
use rayon::prelude::*;
use vecdb::{AnyStoredVec, Database, Exit, Rw, StorageMode};
use crate::indexes;
/// Exposed address counts: funded (currently at-risk) and total (ever at-risk).
#[derive(Traversable)]
pub struct ExposedAddrCountsVecs<M: StorageMode = Rw> {
pub funded: ExposedAddrCountAllVecs<M>,
pub total: ExposedAddrCountAllVecs<M>,
}
impl ExposedAddrCountsVecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
Ok(Self {
funded: ExposedAddrCountAllVecs::forced_import(
db,
"exposed_addr_count",
version,
indexes,
)?,
total: ExposedAddrCountAllVecs::forced_import(
db,
"total_exposed_addr_count",
version,
indexes,
)?,
})
}
pub(crate) fn min_stateful_len(&self) -> usize {
self.funded
.min_stateful_len()
.min(self.total.min_stateful_len())
}
pub(crate) fn par_iter_height_mut(
&mut self,
) -> impl ParallelIterator<Item = &mut dyn AnyStoredVec> {
self.funded
.par_iter_height_mut()
.chain(self.total.par_iter_height_mut())
}
pub(crate) fn reset_height(&mut self) -> Result<()> {
self.funded.reset_height()?;
self.total.reset_height()?;
Ok(())
}
pub(crate) fn compute_rest(&mut self, starting_indexes: &Indexes, exit: &Exit) -> Result<()> {
self.funded.compute_rest(starting_indexes, exit)?;
self.total.compute_rest(starting_indexes, exit)?;
Ok(())
}
}

View File

@@ -0,0 +1,42 @@
use brk_cohort::ByAddrType;
use brk_types::{Height, StoredU64};
use derive_more::{Deref, DerefMut};
use vecdb::ReadableVec;
use crate::internal::PerBlock;
use super::vecs::ExposedAddrCountAllVecs;
/// Runtime counter for exposed address counts per address type.
#[derive(Debug, Default, Deref, DerefMut)]
pub struct AddrTypeToExposedAddrCount(ByAddrType<u64>);
impl AddrTypeToExposedAddrCount {
#[inline]
pub(crate) fn sum(&self) -> u64 {
self.0.values().sum()
}
}
impl From<(&ExposedAddrCountAllVecs, Height)> for AddrTypeToExposedAddrCount {
#[inline]
fn from((vecs, starting_height): (&ExposedAddrCountAllVecs, Height)) -> Self {
if let Some(prev_height) = starting_height.decremented() {
let read = |v: &PerBlock<StoredU64>| -> u64 {
v.height.collect_one(prev_height).unwrap().into()
};
Self(ByAddrType {
p2pk65: read(&vecs.by_addr_type.p2pk65),
p2pk33: read(&vecs.by_addr_type.p2pk33),
p2pkh: read(&vecs.by_addr_type.p2pkh),
p2sh: read(&vecs.by_addr_type.p2sh),
p2wpkh: read(&vecs.by_addr_type.p2wpkh),
p2wsh: read(&vecs.by_addr_type.p2wsh),
p2tr: read(&vecs.by_addr_type.p2tr),
p2a: read(&vecs.by_addr_type.p2a),
})
} else {
Default::default()
}
}
}

View File

@@ -0,0 +1,30 @@
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{StoredU64, Version};
use derive_more::{Deref, DerefMut};
use vecdb::{Database, Rw, StorageMode};
use crate::{
distribution::addr::WithAddrTypes,
indexes,
internal::PerBlock,
};
/// Exposed address count (`all` + per-type) for a single variant (funded or total).
#[derive(Deref, DerefMut, Traversable)]
pub struct ExposedAddrCountAllVecs<M: StorageMode = Rw>(
#[traversable(flatten)] pub WithAddrTypes<PerBlock<StoredU64, M>>,
);
impl ExposedAddrCountAllVecs {
pub(crate) fn forced_import(
db: &Database,
name: &str,
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
Ok(Self(WithAddrTypes::<PerBlock<StoredU64>>::forced_import(
db, name, version, indexes,
)?))
}
}

View File

@@ -0,0 +1,95 @@
//! Exposed address tracking (quantum / pubkey-exposure sense).
//!
//! An address is "exposed" once its public key is in the blockchain. Once
//! exposed, any funds at that address are at cryptographic risk (e.g. from
//! a quantum attacker capable of recovering the private key from the pubkey).
//!
//! When the pubkey gets exposed depends on the address type:
//!
//! - **P2PK33, P2PK65, P2TR**: the pubkey (or P2TR's tweaked output key) is
//! directly in the locking script of the funding output. These addresses are
//! exposed the moment they receive any funds.
//! - **P2PKH, P2SH, P2WPKH, P2WSH**: the locking script contains a hash of
//! the pubkey/script. The pubkey is only revealed when spending. Note that
//! even the spending tx itself exposes the pubkey while the address still
//! holds funds — during the mempool window between broadcast and confirmation,
//! the pubkey is visible while the UTXO being spent is still unspent on-chain.
//! So every spent address of these types has had at least one moment with
//! funds at quantum risk.
//! - **P2A**: anyone-can-spend, no pubkey at all. Excluded from both counters.
//!
//! Formally, with `is_funding_exposed` = `output_type.pubkey_exposed_at_funding()`:
//! - `funded` (count): `(utxo_count > 0) AND (is_funding_exposed OR spent_txo_count >= 1)`
//! - `total` (count): `(is_funding_exposed AND ever received) OR spent_txo_count >= 1`
//! - `supply` (sats): sum of balances of addresses currently in the funded set
//!
//! For P2PK/P2TR types this means `total ≡ total_addr_count` and
//! `funded ≡ funded_addr_count` (every address of those types is exposed by
//! virtue of existing). For P2PKH/P2SH/P2WPKH/P2WSH it's the strict subset of
//! addresses that have been spent from. The aggregate `all` exposed counter
//! sums these, giving "Bitcoin addresses currently with funds at quantum risk".
//!
//! All metrics are tracked as running counters and require no extra fields
//! on the address data — they're maintained via delta detection in
//! `process_received` and `process_sent`.
mod count;
mod supply;
pub use count::{AddrTypeToExposedAddrCount, ExposedAddrCountsVecs};
pub use supply::{AddrTypeToExposedAddrSupply, ExposedAddrSupplyVecs};
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Indexes, Version};
use rayon::prelude::*;
use vecdb::{AnyStoredVec, Database, Exit, Rw, StorageMode};
use crate::indexes;
/// Top-level container for all exposed address tracking: counts (funded +
/// total) plus the funded supply.
#[derive(Traversable)]
pub struct ExposedAddrVecs<M: StorageMode = Rw> {
pub count: ExposedAddrCountsVecs<M>,
pub supply: ExposedAddrSupplyVecs<M>,
}
impl ExposedAddrVecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
Ok(Self {
count: ExposedAddrCountsVecs::forced_import(db, version, indexes)?,
supply: ExposedAddrSupplyVecs::forced_import(db, version, indexes)?,
})
}
pub(crate) fn min_stateful_len(&self) -> usize {
self.count
.min_stateful_len()
.min(self.supply.min_stateful_len())
}
pub(crate) fn par_iter_height_mut(
&mut self,
) -> impl ParallelIterator<Item = &mut dyn AnyStoredVec> {
self.count
.par_iter_height_mut()
.chain(self.supply.par_iter_height_mut())
}
pub(crate) fn reset_height(&mut self) -> Result<()> {
self.count.reset_height()?;
self.supply.reset_height()?;
Ok(())
}
pub(crate) fn compute_rest(&mut self, starting_indexes: &Indexes, exit: &Exit) -> Result<()> {
self.count.compute_rest(starting_indexes, exit)?;
self.supply.compute_rest(starting_indexes, exit)?;
Ok(())
}
}

View File

@@ -0,0 +1,10 @@
//! Exposed address supply (sats) tracking — running sum of balances held by
//! addresses currently in the funded exposed set, per address type plus an
//! aggregated `all`. See the parent [`super`] module for the definition of
//! "exposed" and how it varies by address type.
mod state;
mod vecs;
pub use state::AddrTypeToExposedAddrSupply;
pub use vecs::ExposedAddrSupplyVecs;

View File

@@ -0,0 +1,43 @@
use brk_cohort::ByAddrType;
use brk_types::{Height, Sats};
use derive_more::{Deref, DerefMut};
use vecdb::ReadableVec;
use crate::internal::PerBlock;
use super::vecs::ExposedAddrSupplyVecs;
/// Runtime running counter for the total balance (sats) held by funded
/// exposed addresses, per address type.
#[derive(Debug, Default, Deref, DerefMut)]
pub struct AddrTypeToExposedAddrSupply(ByAddrType<u64>);
impl AddrTypeToExposedAddrSupply {
#[inline]
pub(crate) fn sum(&self) -> u64 {
self.0.values().sum()
}
}
impl From<(&ExposedAddrSupplyVecs, Height)> for AddrTypeToExposedAddrSupply {
#[inline]
fn from((vecs, starting_height): (&ExposedAddrSupplyVecs, Height)) -> Self {
if let Some(prev_height) = starting_height.decremented() {
let read = |v: &PerBlock<Sats>| -> u64 {
u64::from(v.height.collect_one(prev_height).unwrap())
};
Self(ByAddrType {
p2pk65: read(&vecs.by_addr_type.p2pk65),
p2pk33: read(&vecs.by_addr_type.p2pk33),
p2pkh: read(&vecs.by_addr_type.p2pkh),
p2sh: read(&vecs.by_addr_type.p2sh),
p2wpkh: read(&vecs.by_addr_type.p2wpkh),
p2wsh: read(&vecs.by_addr_type.p2wsh),
p2tr: read(&vecs.by_addr_type.p2tr),
p2a: read(&vecs.by_addr_type.p2a),
})
} else {
Default::default()
}
}
}

View File

@@ -0,0 +1,33 @@
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Sats, Version};
use derive_more::{Deref, DerefMut};
use vecdb::{Database, Rw, StorageMode};
use crate::{
distribution::addr::WithAddrTypes,
indexes,
internal::PerBlock,
};
/// Exposed address supply (sats) — `all` + per-address-type. Tracks the total
/// balance held by addresses currently in the funded exposed set.
#[derive(Deref, DerefMut, Traversable)]
pub struct ExposedAddrSupplyVecs<M: StorageMode = Rw>(
#[traversable(flatten)] pub WithAddrTypes<PerBlock<Sats, M>>,
);
impl ExposedAddrSupplyVecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
Ok(Self(WithAddrTypes::<PerBlock<Sats>>::forced_import(
db,
"exposed_addr_supply",
version,
indexes,
)?))
}
}

View File

@@ -2,16 +2,26 @@ mod activity;
mod addr_count;
mod data;
mod delta;
mod exposed;
mod indexes;
mod new_addr_count;
mod reused;
mod total_addr_count;
mod type_map;
mod with_addr_types;
pub use activity::{AddrActivityVecs, AddrTypeToActivityCounts};
pub use addr_count::{AddrCountsVecs, AddrTypeToAddrCount};
pub use data::AddrsDataVecs;
pub use delta::DeltaVecs;
pub use exposed::{
AddrTypeToExposedAddrCount, AddrTypeToExposedAddrSupply, ExposedAddrVecs,
};
pub use indexes::AnyAddrIndexesVecs;
pub use new_addr_count::NewAddrCountVecs;
pub use reused::{
AddrTypeToReusedAddrCount, AddrTypeToReusedAddrUseCount, ReusedAddrVecs,
};
pub use total_addr_count::TotalAddrCountVecs;
pub use type_map::{AddrTypeToTypeIndexMap, AddrTypeToVec, HeightToAddrTypeToVec};
pub use with_addr_types::WithAddrTypes;

View File

@@ -1,7 +1,7 @@
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Height, StoredU64, Version};
use derive_more::{Deref, DerefMut};
use vecdb::{Database, Exit, Rw, StorageMode};
use crate::{
@@ -9,15 +9,14 @@ use crate::{
internal::{PerBlockCumulativeRolling, WindowStartVec, Windows},
};
use super::TotalAddrCountVecs;
use super::{TotalAddrCountVecs, WithAddrTypes};
/// New address count per block (global + per-type)
#[derive(Traversable)]
pub struct NewAddrCountVecs<M: StorageMode = Rw> {
pub all: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
/// New address count per block (global + per-type).
#[derive(Deref, DerefMut, Traversable)]
pub struct NewAddrCountVecs<M: StorageMode = Rw>(
#[traversable(flatten)]
pub by_addr_type: ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
}
pub WithAddrTypes<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
);
impl NewAddrCountVecs {
pub(crate) fn forced_import(
@@ -26,25 +25,11 @@ impl NewAddrCountVecs {
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
let all = PerBlockCumulativeRolling::forced_import(
db,
"new_addr_count",
version,
indexes,
cached_starts,
)?;
let by_addr_type = ByAddrType::new_with_name(|name| {
PerBlockCumulativeRolling::forced_import(
db,
&format!("{name}_new_addr_count"),
version,
indexes,
cached_starts,
)
})?;
Ok(Self { all, by_addr_type })
Ok(Self(WithAddrTypes::<
PerBlockCumulativeRolling<StoredU64, StoredU64>,
>::forced_import(
db, "new_addr_count", version, indexes, cached_starts
)?))
}
pub(crate) fn compute(
@@ -53,11 +38,12 @@ impl NewAddrCountVecs {
total_addr_count: &TotalAddrCountVecs,
exit: &Exit,
) -> Result<()> {
self.all.compute(max_from, exit, |height_vec| {
self.0.all.compute(max_from, exit, |height_vec| {
Ok(height_vec.compute_change(max_from, &total_addr_count.all.height, 1, exit)?)
})?;
for ((_, new), (_, total)) in self
.0
.by_addr_type
.iter_mut()
.zip(total_addr_count.by_addr_type.iter())

View File

@@ -0,0 +1,78 @@
//! Reused address count tracking — running counters of how many addresses
//! are currently in (or have ever been in) the reused set, per address type
//! plus an aggregated `all`. See the parent [`super`] module for the
//! definition of "reused".
//!
//! Two counters are exposed:
//! - `funded`: addresses currently funded AND with `funded_txo_count > 1`
//! - `total`: addresses that have ever satisfied `funded_txo_count > 1` (monotonic)
mod state;
mod vecs;
pub use state::AddrTypeToReusedAddrCount;
pub use vecs::ReusedAddrCountAllVecs;
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Indexes, Version};
use rayon::prelude::*;
use vecdb::{AnyStoredVec, Database, Exit, Rw, StorageMode};
use crate::indexes;
/// Reused address counts: funded (currently with balance) and total (ever reused).
#[derive(Traversable)]
pub struct ReusedAddrCountsVecs<M: StorageMode = Rw> {
pub funded: ReusedAddrCountAllVecs<M>,
pub total: ReusedAddrCountAllVecs<M>,
}
impl ReusedAddrCountsVecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
Ok(Self {
funded: ReusedAddrCountAllVecs::forced_import(
db,
"reused_addr_count",
version,
indexes,
)?,
total: ReusedAddrCountAllVecs::forced_import(
db,
"total_reused_addr_count",
version,
indexes,
)?,
})
}
pub(crate) fn min_stateful_len(&self) -> usize {
self.funded
.min_stateful_len()
.min(self.total.min_stateful_len())
}
pub(crate) fn par_iter_height_mut(
&mut self,
) -> impl ParallelIterator<Item = &mut dyn AnyStoredVec> {
self.funded
.par_iter_height_mut()
.chain(self.total.par_iter_height_mut())
}
pub(crate) fn reset_height(&mut self) -> Result<()> {
self.funded.reset_height()?;
self.total.reset_height()?;
Ok(())
}
pub(crate) fn compute_rest(&mut self, starting_indexes: &Indexes, exit: &Exit) -> Result<()> {
self.funded.compute_rest(starting_indexes, exit)?;
self.total.compute_rest(starting_indexes, exit)?;
Ok(())
}
}

View File

@@ -0,0 +1,42 @@
use brk_cohort::ByAddrType;
use brk_types::{Height, StoredU64};
use derive_more::{Deref, DerefMut};
use vecdb::ReadableVec;
use crate::internal::PerBlock;
use super::vecs::ReusedAddrCountAllVecs;
/// Runtime counter for reused address counts per address type.
#[derive(Debug, Default, Deref, DerefMut)]
pub struct AddrTypeToReusedAddrCount(ByAddrType<u64>);
impl AddrTypeToReusedAddrCount {
#[inline]
pub(crate) fn sum(&self) -> u64 {
self.0.values().sum()
}
}
impl From<(&ReusedAddrCountAllVecs, Height)> for AddrTypeToReusedAddrCount {
#[inline]
fn from((vecs, starting_height): (&ReusedAddrCountAllVecs, Height)) -> Self {
if let Some(prev_height) = starting_height.decremented() {
let read = |v: &PerBlock<StoredU64>| -> u64 {
v.height.collect_one(prev_height).unwrap().into()
};
Self(ByAddrType {
p2pk65: read(&vecs.by_addr_type.p2pk65),
p2pk33: read(&vecs.by_addr_type.p2pk33),
p2pkh: read(&vecs.by_addr_type.p2pkh),
p2sh: read(&vecs.by_addr_type.p2sh),
p2wpkh: read(&vecs.by_addr_type.p2wpkh),
p2wsh: read(&vecs.by_addr_type.p2wsh),
p2tr: read(&vecs.by_addr_type.p2tr),
p2a: read(&vecs.by_addr_type.p2a),
})
} else {
Default::default()
}
}
}

View File

@@ -0,0 +1,30 @@
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{StoredU64, Version};
use derive_more::{Deref, DerefMut};
use vecdb::{Database, Rw, StorageMode};
use crate::{
distribution::addr::WithAddrTypes,
indexes,
internal::PerBlock,
};
/// Reused address count (`all` + per-type) for a single variant (funded or total).
#[derive(Deref, DerefMut, Traversable)]
pub struct ReusedAddrCountAllVecs<M: StorageMode = Rw>(
#[traversable(flatten)] pub WithAddrTypes<PerBlock<StoredU64, M>>,
);
impl ReusedAddrCountAllVecs {
pub(crate) fn forced_import(
db: &Database,
name: &str,
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
Ok(Self(WithAddrTypes::<PerBlock<StoredU64>>::forced_import(
db, name, version, indexes,
)?))
}
}

View File

@@ -0,0 +1,85 @@
//! Reused address tracking.
//!
//! An address is "reused" if its lifetime `funded_txo_count > 1` — i.e. it
//! has received more than one output across its lifetime. This is the
//! simplest output-multiplicity proxy for address linkability.
//!
//! Two facets are tracked here:
//! - [`count`] — how many distinct addresses are currently reused (funded)
//! and how many have *ever* been reused (total). Per address type plus
//! an aggregated `all`.
//! - [`uses`] — per-block count of outputs going to addresses that were
//! already reused, plus the derived percent over total address-output
//! count (denominator from `scripts::count`).
mod count;
mod uses;
pub use count::{AddrTypeToReusedAddrCount, ReusedAddrCountsVecs};
pub use uses::{AddrTypeToReusedAddrUseCount, ReusedAddrUsesVecs};
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Indexes, Version};
use rayon::prelude::*;
use vecdb::{AnyStoredVec, Database, Exit, Rw, StorageMode};
use crate::{
indexes,
internal::{WindowStartVec, Windows},
scripts,
};
/// Top-level container for all reused address tracking: counts (funded +
/// total) plus per-block uses (count + percent).
#[derive(Traversable)]
pub struct ReusedAddrVecs<M: StorageMode = Rw> {
pub count: ReusedAddrCountsVecs<M>,
pub uses: ReusedAddrUsesVecs<M>,
}
impl ReusedAddrVecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
Ok(Self {
count: ReusedAddrCountsVecs::forced_import(db, version, indexes)?,
uses: ReusedAddrUsesVecs::forced_import(db, version, indexes, cached_starts)?,
})
}
pub(crate) fn min_stateful_len(&self) -> usize {
self.count
.min_stateful_len()
.min(self.uses.min_stateful_len())
}
pub(crate) fn par_iter_height_mut(
&mut self,
) -> impl ParallelIterator<Item = &mut dyn AnyStoredVec> {
self.count
.par_iter_height_mut()
.chain(self.uses.par_iter_height_mut())
}
pub(crate) fn reset_height(&mut self) -> Result<()> {
self.count.reset_height()?;
self.uses.reset_height()?;
Ok(())
}
pub(crate) fn compute_rest(
&mut self,
starting_indexes: &Indexes,
scripts_count: &scripts::CountVecs,
exit: &Exit,
) -> Result<()> {
self.count.compute_rest(starting_indexes, exit)?;
self.uses
.compute_rest(starting_indexes, scripts_count, exit)?;
Ok(())
}
}

View File

@@ -0,0 +1,8 @@
//! Per-block reused-address-use tracking. See [`vecs::ReusedAddrUsesVecs`]
//! for the full description of the metric.
mod state;
mod vecs;
pub use state::AddrTypeToReusedAddrUseCount;
pub use vecs::ReusedAddrUsesVecs;

View File

@@ -0,0 +1,22 @@
use brk_cohort::ByAddrType;
use derive_more::{Deref, DerefMut};
/// Per-block running counter of reused address uses, per address type.
/// Reset at the start of each block (no disk recovery needed since the
/// per-block flow is reconstructed from `process_received` deterministically).
#[derive(Debug, Default, Deref, DerefMut)]
pub struct AddrTypeToReusedAddrUseCount(ByAddrType<u64>);
impl AddrTypeToReusedAddrUseCount {
#[inline]
pub(crate) fn sum(&self) -> u64 {
self.0.values().sum()
}
#[inline]
pub(crate) fn reset(&mut self) {
for v in self.0.values_mut() {
*v = 0;
}
}
}

View File

@@ -0,0 +1,152 @@
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{BasisPoints16, Height, Indexes, OutputType, StoredU64, Version};
use rayon::prelude::*;
use vecdb::{AnyStoredVec, Database, Exit, Rw, StorageMode};
use crate::{
distribution::addr::WithAddrTypes,
indexes,
internal::{
PerBlockCumulativeRolling, PercentCumulativeRolling, RatioU64Bp16, WindowStartVec, Windows,
},
scripts,
};
use super::state::AddrTypeToReusedAddrUseCount;
/// Per-block reused-address-use metrics. A "use" is a single output going
/// to an address (not deduplicated): an address receiving N outputs in one
/// block contributes N. The count only includes uses going to addresses
/// that were *already* reused at the moment of the use, so the use that
/// makes an address reused is not itself counted.
///
/// The denominator for the percent (total address-output count) lives in
/// `scripts::count` and is reused here rather than duplicated.
#[derive(Traversable)]
pub struct ReusedAddrUsesVecs<M: StorageMode = Rw> {
pub reused_addr_use_count:
WithAddrTypes<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
pub reused_addr_use_percent: WithAddrTypes<PercentCumulativeRolling<BasisPoints16, M>>,
}
impl ReusedAddrUsesVecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
let reused_addr_use_count =
WithAddrTypes::<PerBlockCumulativeRolling<StoredU64, StoredU64>>::forced_import(
db,
"reused_addr_use_count",
version,
indexes,
cached_starts,
)?;
let percent_name = "reused_addr_use_percent";
let reused_addr_use_percent = WithAddrTypes {
all: PercentCumulativeRolling::forced_import(db, percent_name, version, indexes)?,
by_addr_type: ByAddrType::new_with_name(|type_name| {
PercentCumulativeRolling::forced_import(
db,
&format!("{type_name}_{percent_name}"),
version,
indexes,
)
})?,
};
Ok(Self {
reused_addr_use_count,
reused_addr_use_percent,
})
}
pub(crate) fn min_stateful_len(&self) -> usize {
self.reused_addr_use_count.min_stateful_len()
}
pub(crate) fn par_iter_height_mut(
&mut self,
) -> impl ParallelIterator<Item = &mut dyn AnyStoredVec> {
self.reused_addr_use_count.par_iter_height_mut()
}
pub(crate) fn reset_height(&mut self) -> Result<()> {
self.reused_addr_use_count.reset_height()
}
#[inline(always)]
pub(crate) fn push_height(&mut self, reused: &AddrTypeToReusedAddrUseCount) {
self.reused_addr_use_count
.push_height(reused.sum(), reused.values().copied());
}
pub(crate) fn compute_rest(
&mut self,
starting_indexes: &Indexes,
scripts_count: &scripts::CountVecs,
exit: &Exit,
) -> Result<()> {
self.reused_addr_use_count
.compute_rest(starting_indexes.height, exit)?;
compute_one_percent(
&mut self.reused_addr_use_percent.all,
&self.reused_addr_use_count.all,
&scripts_count.addr_output_count,
starting_indexes.height,
exit,
)?;
for otype in OutputType::ADDR_TYPES {
compute_one_percent(
self.reused_addr_use_percent
.by_addr_type
.get_mut_unwrap(otype),
self.reused_addr_use_count.by_addr_type.get_unwrap(otype),
denom_for_type(scripts_count, otype),
starting_indexes.height,
exit,
)?;
}
Ok(())
}
}
#[inline]
fn compute_one_percent(
percent: &mut PercentCumulativeRolling<BasisPoints16>,
reused: &PerBlockCumulativeRolling<StoredU64, StoredU64>,
denom: &PerBlockCumulativeRolling<StoredU64, StoredU64>,
starting_height: Height,
exit: &Exit,
) -> Result<()> {
percent.compute_binary::<StoredU64, StoredU64, RatioU64Bp16, _, _, _, _>(
starting_height,
&reused.cumulative.height,
&denom.cumulative.height,
reused.sum.as_array().map(|w| &w.height),
denom.sum.as_array().map(|w| &w.height),
exit,
)
}
#[inline]
fn denom_for_type(
scripts_count: &scripts::CountVecs,
otype: OutputType,
) -> &PerBlockCumulativeRolling<StoredU64, StoredU64> {
match otype {
OutputType::P2PK33 => &scripts_count.p2pk33,
OutputType::P2PK65 => &scripts_count.p2pk65,
OutputType::P2PKH => &scripts_count.p2pkh,
OutputType::P2SH => &scripts_count.p2sh,
OutputType::P2WPKH => &scripts_count.p2wpkh,
OutputType::P2WSH => &scripts_count.p2wsh,
OutputType::P2TR => &scripts_count.p2tr,
OutputType::P2A => &scripts_count.p2a,
_ => unreachable!("OutputType::ADDR_TYPES contains only address types"),
}
}

View File

@@ -1,20 +1,18 @@
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Height, StoredU64, Version};
use derive_more::{Deref, DerefMut};
use vecdb::{Database, Exit, Rw, StorageMode};
use crate::{indexes, internal::PerBlock};
use super::AddrCountsVecs;
use super::{AddrCountsVecs, WithAddrTypes};
/// Total address count (global + per-type) with all derived indexes
#[derive(Traversable)]
pub struct TotalAddrCountVecs<M: StorageMode = Rw> {
pub all: PerBlock<StoredU64, M>,
#[traversable(flatten)]
pub by_addr_type: ByAddrType<PerBlock<StoredU64, M>>,
}
/// Total address count (global + per-type) with all derived indexes.
#[derive(Deref, DerefMut, Traversable)]
pub struct TotalAddrCountVecs<M: StorageMode = Rw>(
#[traversable(flatten)] pub WithAddrTypes<PerBlock<StoredU64, M>>,
);
impl TotalAddrCountVecs {
pub(crate) fn forced_import(
@@ -22,13 +20,12 @@ impl TotalAddrCountVecs {
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
let all = PerBlock::forced_import(db, "total_addr_count", version, indexes)?;
let by_addr_type: ByAddrType<PerBlock<StoredU64>> = ByAddrType::new_with_name(|name| {
PerBlock::forced_import(db, &format!("{name}_total_addr_count"), version, indexes)
})?;
Ok(Self { all, by_addr_type })
Ok(Self(WithAddrTypes::<PerBlock<StoredU64>>::forced_import(
db,
"total_addr_count",
version,
indexes,
)?))
}
/// Eagerly compute total = addr_count + empty_addr_count.
@@ -39,14 +36,14 @@ impl TotalAddrCountVecs {
empty_addr_count: &AddrCountsVecs,
exit: &Exit,
) -> Result<()> {
self.all.height.compute_add(
self.0.all.height.compute_add(
max_from,
&addr_count.all.height,
&empty_addr_count.all.height,
exit,
)?;
for ((_, total), ((_, addr), (_, empty))) in self.by_addr_type.iter_mut().zip(
for ((_, total), ((_, addr), (_, empty))) in self.0.by_addr_type.iter_mut().zip(
addr_count
.by_addr_type
.iter()

View File

@@ -0,0 +1,173 @@
//! Generic `all` + per-`AddrType` container, mirrors the `WithSth` pattern
//! along the address-type axis. Used by every metric that tracks one
//! aggregate value alongside a per-address-type breakdown.
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Height, Indexes, Version};
use rayon::prelude::*;
use schemars::JsonSchema;
use vecdb::{AnyStoredVec, AnyVec, Database, EagerVec, Exit, PcoVec, WritableVec};
use crate::{
indexes,
internal::{NumericValue, PerBlock, PerBlockCumulativeRolling, WindowStartVec, Windows},
};
/// `all` aggregate plus per-`AddrType` breakdown.
#[derive(Clone, Traversable)]
pub struct WithAddrTypes<T> {
pub all: T,
#[traversable(flatten)]
pub by_addr_type: ByAddrType<T>,
}
impl<T> WithAddrTypes<PerBlock<T>>
where
T: NumericValue + JsonSchema,
{
pub(crate) fn forced_import(
db: &Database,
name: &str,
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
let all = PerBlock::forced_import(db, name, version, indexes)?;
let by_addr_type = ByAddrType::new_with_name(|type_name| {
PerBlock::forced_import(db, &format!("{type_name}_{name}"), version, indexes)
})?;
Ok(Self { all, by_addr_type })
}
pub(crate) fn min_stateful_len(&self) -> usize {
self.by_addr_type
.values()
.map(|v| v.height.len())
.min()
.unwrap()
.min(self.all.height.len())
}
pub(crate) fn par_iter_height_mut(
&mut self,
) -> impl ParallelIterator<Item = &mut dyn AnyStoredVec> {
rayon::iter::once(&mut self.all.height as &mut dyn AnyStoredVec).chain(
self.by_addr_type
.par_values_mut()
.map(|v| &mut v.height as &mut dyn AnyStoredVec),
)
}
pub(crate) fn reset_height(&mut self) -> Result<()> {
self.all.height.reset()?;
for v in self.by_addr_type.values_mut() {
v.height.reset()?;
}
Ok(())
}
#[inline(always)]
pub(crate) fn push_height<U>(&mut self, total: U, per_type: impl IntoIterator<Item = U>)
where
U: Into<T>,
{
self.all.height.push(total.into());
for (v, value) in self.by_addr_type.values_mut().zip(per_type) {
v.height.push(value.into());
}
}
/// Compute `all.height` as the per-block sum of the per-type vecs.
pub(crate) fn compute_rest(
&mut self,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
let sources: Vec<&EagerVec<PcoVec<Height, T>>> =
self.by_addr_type.values().map(|v| &v.height).collect();
self.all
.height
.compute_sum_of_others(starting_indexes.height, &sources, exit)?;
Ok(())
}
}
impl<T, C> WithAddrTypes<PerBlockCumulativeRolling<T, C>>
where
T: NumericValue + JsonSchema + Into<C>,
C: NumericValue + JsonSchema,
{
pub(crate) fn forced_import(
db: &Database,
name: &str,
version: Version,
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
let all = PerBlockCumulativeRolling::forced_import(
db,
name,
version,
indexes,
cached_starts,
)?;
let by_addr_type = ByAddrType::new_with_name(|type_name| {
PerBlockCumulativeRolling::forced_import(
db,
&format!("{type_name}_{name}"),
version,
indexes,
cached_starts,
)
})?;
Ok(Self { all, by_addr_type })
}
pub(crate) fn min_stateful_len(&self) -> usize {
self.by_addr_type
.values()
.map(|v| v.block.len())
.min()
.unwrap()
.min(self.all.block.len())
}
pub(crate) fn par_iter_height_mut(
&mut self,
) -> impl ParallelIterator<Item = &mut dyn AnyStoredVec> {
rayon::iter::once(&mut self.all.block as &mut dyn AnyStoredVec).chain(
self.by_addr_type
.par_values_mut()
.map(|v| &mut v.block as &mut dyn AnyStoredVec),
)
}
pub(crate) fn reset_height(&mut self) -> Result<()> {
self.all.block.reset()?;
for v in self.by_addr_type.values_mut() {
v.block.reset()?;
}
Ok(())
}
#[inline(always)]
pub(crate) fn push_height<U>(&mut self, total: U, per_type: impl IntoIterator<Item = U>)
where
U: Into<T>,
{
self.all.block.push(total.into());
for (v, value) in self.by_addr_type.values_mut().zip(per_type) {
v.block.push(value.into());
}
}
/// Finalize `cumulative` / `sum` / `average` for `all` and every per-type vec.
pub(crate) fn compute_rest(&mut self, max_from: Height, exit: &Exit) -> Result<()> {
self.all.compute_rest(max_from, exit)?;
for v in self.by_addr_type.values_mut() {
v.compute_rest(max_from, exit)?;
}
Ok(())
}
}

View File

@@ -3,7 +3,10 @@ use brk_types::{Cents, Sats, TypeIndex};
use rustc_hash::FxHashMap;
use crate::distribution::{
addr::{AddrTypeToActivityCounts, AddrTypeToVec},
addr::{
AddrTypeToActivityCounts, AddrTypeToExposedAddrCount, AddrTypeToExposedAddrSupply,
AddrTypeToReusedAddrCount, AddrTypeToReusedAddrUseCount, AddrTypeToVec,
},
cohorts::AddrCohorts,
};
@@ -25,6 +28,12 @@ pub(crate) fn process_received(
addr_count: &mut ByAddrType<u64>,
empty_addr_count: &mut ByAddrType<u64>,
activity_counts: &mut AddrTypeToActivityCounts,
reused_addr_count: &mut AddrTypeToReusedAddrCount,
total_reused_addr_count: &mut AddrTypeToReusedAddrCount,
reused_addr_use_count: &mut AddrTypeToReusedAddrUseCount,
exposed_addr_count: &mut AddrTypeToExposedAddrCount,
total_exposed_addr_count: &mut AddrTypeToExposedAddrCount,
exposed_addr_supply: &mut AddrTypeToExposedAddrSupply,
) {
let max_type_len = received_data
.iter()
@@ -43,6 +52,12 @@ pub(crate) fn process_received(
let type_addr_count = addr_count.get_mut(output_type).unwrap();
let type_empty_count = empty_addr_count.get_mut(output_type).unwrap();
let type_activity = activity_counts.get_mut_unwrap(output_type);
let type_reused_count = reused_addr_count.get_mut(output_type).unwrap();
let type_total_reused_count = total_reused_addr_count.get_mut(output_type).unwrap();
let type_reused_use_count = reused_addr_use_count.get_mut(output_type).unwrap();
let type_exposed_count = exposed_addr_count.get_mut(output_type).unwrap();
let type_total_exposed_count = total_exposed_addr_count.get_mut(output_type).unwrap();
let type_exposed_supply = exposed_addr_supply.get_mut(output_type).unwrap();
// Aggregate receives by address - each address processed exactly once
for (type_index, value) in vec {
@@ -57,6 +72,13 @@ pub(crate) fn process_received(
// Track receiving activity - each address in receive aggregation
type_activity.receiving += 1;
// Capture state BEFORE the receive mutates funded_txo_count
let was_funded = addr_data.is_funded();
let was_reused = addr_data.is_reused();
let funded_txo_count_before = addr_data.funded_txo_count;
let was_pubkey_exposed = addr_data.is_pubkey_exposed(output_type);
let exposed_contribution_before = addr_data.exposed_supply_contribution(output_type);
match status {
TrackingStatus::New => {
*type_addr_count += 1;
@@ -134,6 +156,54 @@ pub(crate) fn process_received(
.receive_outputs(addr_data, recv.total_value, price, recv.output_count);
}
}
// Update reused counts based on the post-receive state
let is_now_reused = addr_data.is_reused();
if is_now_reused && !was_reused {
// Newly crossed the reuse threshold this block
*type_reused_count += 1;
*type_total_reused_count += 1;
} else if is_now_reused && !was_funded {
// Already-reused address reactivating into the funded set
*type_reused_count += 1;
}
// Per-block reused-use count: every individual output to this
// address counts iff the address was already reused at the
// moment of that output. With aggregation, that means we
// skip enough outputs at the front to take the lifetime
// funding count from `funded_txo_count_before` past 1, then
// count the rest. `skipped` is `max(0, 2 - before)`.
let skipped = 2u32.saturating_sub(funded_txo_count_before);
let counted = recv.output_count.saturating_sub(skipped);
*type_reused_use_count += u64::from(counted);
// Update exposed counts. The address's pubkey-exposure state
// is unchanged by a receive (spent_txo_count unchanged), so we
// can use the captured `was_pubkey_exposed` for both pre and post.
// After the receive the address is always funded, so it's in the
// funded exposed set iff its pubkey is exposed.
//
// Funded exposed enters when the address wasn't funded before but
// is now AND its pubkey is exposed.
// Total exposed (pk_exposed_at_funding types only) increments on
// first-ever receive (status == TrackingStatus::New); for other
// types it's incremented in process_sent on the first spend.
if !was_funded && was_pubkey_exposed {
*type_exposed_count += 1;
}
if output_type.pubkey_exposed_at_funding()
&& matches!(status, TrackingStatus::New)
{
*type_total_exposed_count += 1;
}
// Update exposed supply via post-receive contribution delta.
let exposed_contribution_after =
addr_data.exposed_supply_contribution(output_type);
// Receives can only add to balance and membership, so the delta
// is always non-negative.
*type_exposed_supply += exposed_contribution_after - exposed_contribution_before;
}
}
}

View File

@@ -5,7 +5,10 @@ use rustc_hash::FxHashSet;
use vecdb::VecIndex;
use crate::distribution::{
addr::{AddrTypeToActivityCounts, HeightToAddrTypeToVec},
addr::{
AddrTypeToActivityCounts, AddrTypeToExposedAddrCount, AddrTypeToExposedAddrSupply,
AddrTypeToReusedAddrCount, HeightToAddrTypeToVec,
},
cohorts::AddrCohorts,
compute::PriceRangeMax,
};
@@ -35,6 +38,10 @@ pub(crate) fn process_sent(
addr_count: &mut ByAddrType<u64>,
empty_addr_count: &mut ByAddrType<u64>,
activity_counts: &mut AddrTypeToActivityCounts,
reused_addr_count: &mut AddrTypeToReusedAddrCount,
exposed_addr_count: &mut AddrTypeToExposedAddrCount,
total_exposed_addr_count: &mut AddrTypeToExposedAddrCount,
exposed_addr_supply: &mut AddrTypeToExposedAddrSupply,
received_addrs: &ByAddrType<FxHashSet<TypeIndex>>,
height_to_price: &[Cents],
height_to_timestamp: &[Timestamp],
@@ -57,6 +64,10 @@ pub(crate) fn process_sent(
let type_addr_count = addr_count.get_mut(output_type).unwrap();
let type_empty_count = empty_addr_count.get_mut(output_type).unwrap();
let type_activity = activity_counts.get_mut_unwrap(output_type);
let type_reused_count = reused_addr_count.get_mut(output_type).unwrap();
let type_exposed_count = exposed_addr_count.get_mut(output_type).unwrap();
let type_total_exposed_count = total_exposed_addr_count.get_mut(output_type).unwrap();
let type_exposed_supply = exposed_addr_supply.get_mut(output_type).unwrap();
let type_received = received_addrs.get(output_type);
let type_seen = seen_senders.get_mut_unwrap(output_type);
@@ -78,6 +89,11 @@ pub(crate) fn process_sent(
let will_be_empty = addr_data.has_1_utxos();
// Capture exposed state BEFORE the spend mutates spent_txo_count.
let was_pubkey_exposed = addr_data.is_pubkey_exposed(output_type);
let exposed_contribution_before =
addr_data.exposed_supply_contribution(output_type);
// Compute buckets once
let prev_bucket = AmountBucket::from(prev_balance);
let new_bucket = AmountBucket::from(new_balance);
@@ -91,6 +107,27 @@ pub(crate) fn process_sent(
.unwrap();
cohort_state.send(addr_data, value, current_price, prev_price, peak_price, age)?;
// addr_data.spent_txo_count is now incremented by 1.
// Update exposed supply via post-spend contribution delta.
let exposed_contribution_after =
addr_data.exposed_supply_contribution(output_type);
if exposed_contribution_after >= exposed_contribution_before {
*type_exposed_supply += exposed_contribution_after - exposed_contribution_before;
} else {
*type_exposed_supply -= exposed_contribution_before - exposed_contribution_after;
}
// Update exposed counts on first-ever pubkey exposure.
// For non-pk-exposed types this fires on the first spend; for
// pk-exposed types it never fires here (was_pubkey_exposed was
// already true at first receive in process_received).
if !was_pubkey_exposed {
*type_total_exposed_count += 1;
if !will_be_empty {
*type_exposed_count += 1;
}
}
// If crossing a bucket boundary, remove the (now-updated) address from old bucket
if will_be_empty || crossing_boundary {
@@ -101,6 +138,17 @@ pub(crate) fn process_sent(
if will_be_empty {
*type_addr_count -= 1;
*type_empty_count += 1;
// Reused addr leaving the funded reused set
if addr_data.is_reused() {
*type_reused_count -= 1;
}
// Exposed addr leaving the funded exposed set: was in set
// iff its pubkey was exposed pre-spend (since it was funded
// to be in process_sent in the first place), and now leaves
// because it's empty.
if was_pubkey_exposed {
*type_exposed_count -= 1;
}
lookup.move_to_empty(output_type, type_index);
} else if crossing_boundary {
cohorts

View File

@@ -11,7 +11,10 @@ use vecdb::{AnyStoredVec, AnyVec, Exit, ReadableVec, VecIndex, WritableVec, unli
use crate::{
distribution::{
addr::{AddrTypeToActivityCounts, AddrTypeToAddrCount},
addr::{
AddrTypeToActivityCounts, AddrTypeToAddrCount, AddrTypeToExposedAddrCount,
AddrTypeToExposedAddrSupply, AddrTypeToReusedAddrCount, AddrTypeToReusedAddrUseCount,
},
block::{
AddrCache, InputsResult, process_inputs, process_outputs, process_received,
process_sent,
@@ -192,22 +195,41 @@ pub(crate) fn process_blocks(
// Track running totals - recover from previous height if resuming
debug!("recovering addr_counts from height {}", starting_height);
let (mut addr_counts, mut empty_addr_counts) = if starting_height > Height::ZERO {
let addr_counts =
AddrTypeToAddrCount::from((&vecs.addrs.funded.by_addr_type, starting_height));
let empty_addr_counts =
AddrTypeToAddrCount::from((&vecs.addrs.empty.by_addr_type, starting_height));
(addr_counts, empty_addr_counts)
let (
mut addr_counts,
mut empty_addr_counts,
mut reused_addr_counts,
mut total_reused_addr_counts,
mut exposed_addr_counts,
mut total_exposed_addr_counts,
mut exposed_addr_supply,
) = if starting_height > Height::ZERO {
(
AddrTypeToAddrCount::from((&vecs.addrs.funded.by_addr_type, starting_height)),
AddrTypeToAddrCount::from((&vecs.addrs.empty.by_addr_type, starting_height)),
AddrTypeToReusedAddrCount::from((&vecs.addrs.reused.count.funded, starting_height)),
AddrTypeToReusedAddrCount::from((&vecs.addrs.reused.count.total, starting_height)),
AddrTypeToExposedAddrCount::from((&vecs.addrs.exposed.count.funded, starting_height)),
AddrTypeToExposedAddrCount::from((&vecs.addrs.exposed.count.total, starting_height)),
AddrTypeToExposedAddrSupply::from((&vecs.addrs.exposed.supply, starting_height)),
)
} else {
(
AddrTypeToAddrCount::default(),
AddrTypeToAddrCount::default(),
AddrTypeToReusedAddrCount::default(),
AddrTypeToReusedAddrCount::default(),
AddrTypeToExposedAddrCount::default(),
AddrTypeToExposedAddrCount::default(),
AddrTypeToExposedAddrSupply::default(),
)
};
debug!("addr_counts recovered");
// Track activity counts - reset each block
let mut activity_counts = AddrTypeToActivityCounts::default();
// Reused-use count - per-block flow, reset each block
let mut reused_addr_use_counts = AddrTypeToReusedAddrUseCount::default();
debug!("creating AddrCache");
let mut cache = AddrCache::new();
@@ -226,6 +248,8 @@ pub(crate) fn process_blocks(
.chain(vecs.addrs.funded.par_iter_height_mut())
.chain(vecs.addrs.empty.par_iter_height_mut())
.chain(vecs.addrs.activity.par_iter_height_mut())
.chain(vecs.addrs.reused.par_iter_height_mut())
.chain(vecs.addrs.exposed.par_iter_height_mut())
.chain(rayon::iter::once(
&mut vecs.coinblocks_destroyed.block as &mut dyn AnyStoredVec,
))
@@ -278,6 +302,7 @@ pub(crate) fn process_blocks(
// Reset per-block activity counts
activity_counts.reset();
reused_addr_use_counts.reset();
// Process outputs, inputs, and tick-tock in parallel via rayon::join.
// Collection (build tx_index mappings + bulk mmap reads) is merged into the
@@ -447,6 +472,12 @@ pub(crate) fn process_blocks(
&mut addr_counts,
&mut empty_addr_counts,
&mut activity_counts,
&mut reused_addr_counts,
&mut total_reused_addr_counts,
&mut reused_addr_use_counts,
&mut exposed_addr_counts,
&mut total_exposed_addr_counts,
&mut exposed_addr_supply,
);
// Process sent inputs (addresses sending funds)
@@ -459,6 +490,10 @@ pub(crate) fn process_blocks(
&mut addr_counts,
&mut empty_addr_counts,
&mut activity_counts,
&mut reused_addr_counts,
&mut exposed_addr_counts,
&mut total_exposed_addr_counts,
&mut exposed_addr_supply,
&received_addrs,
height_to_price_vec,
height_to_timestamp_vec,
@@ -481,6 +516,27 @@ pub(crate) fn process_blocks(
.empty
.push_height(empty_addr_counts.sum(), &empty_addr_counts);
vecs.addrs.activity.push_height(&activity_counts);
vecs.addrs.reused.count.funded.push_height(
reused_addr_counts.sum(),
reused_addr_counts.values().copied(),
);
vecs.addrs.reused.count.total.push_height(
total_reused_addr_counts.sum(),
total_reused_addr_counts.values().copied(),
);
vecs.addrs.reused.uses.push_height(&reused_addr_use_counts);
vecs.addrs.exposed.count.funded.push_height(
exposed_addr_counts.sum(),
exposed_addr_counts.values().copied(),
);
vecs.addrs.exposed.count.total.push_height(
total_exposed_addr_counts.sum(),
total_exposed_addr_counts.values().copied(),
);
vecs.addrs.exposed.supply.push_height(
exposed_addr_supply.sum(),
exposed_addr_supply.values().copied(),
);
let is_last_of_day = is_last_of_day[offset];
let date_opt = is_last_of_day.then(|| Date::from(timestamp));

View File

@@ -79,6 +79,8 @@ pub(crate) fn write(
.chain(vecs.addrs.funded.par_iter_height_mut())
.chain(vecs.addrs.empty.par_iter_height_mut())
.chain(vecs.addrs.activity.par_iter_height_mut())
.chain(vecs.addrs.reused.par_iter_height_mut())
.chain(vecs.addrs.exposed.par_iter_height_mut())
.chain(
[
&mut vecs.supply_state as &mut dyn AnyStoredVec,

View File

@@ -27,12 +27,15 @@ use crate::{
PerBlockCumulativeRolling, WindowStartVec, Windows,
db_utils::{finalize_db, open_db},
},
outputs, prices, transactions,
outputs, prices, scripts, transactions,
};
use super::{
AddrCohorts, AddrsDataVecs, AnyAddrIndexesVecs, RangeMap, UTXOCohorts,
addr::{AddrActivityVecs, AddrCountsVecs, DeltaVecs, NewAddrCountVecs, TotalAddrCountVecs},
addr::{
AddrActivityVecs, AddrCountsVecs, DeltaVecs, ExposedAddrVecs, NewAddrCountVecs,
ReusedAddrVecs, TotalAddrCountVecs,
},
};
const VERSION: Version = Version::new(22);
@@ -44,6 +47,8 @@ pub struct AddrMetricsVecs<M: StorageMode = Rw> {
pub activity: AddrActivityVecs<M>,
pub total: TotalAddrCountVecs<M>,
pub new: NewAddrCountVecs<M>,
pub reused: ReusedAddrVecs<M>,
pub exposed: ExposedAddrVecs<M>,
pub delta: DeltaVecs,
#[traversable(wrap = "indexes", rename = "funded")]
pub funded_index:
@@ -154,6 +159,13 @@ impl Vecs {
// Per-block delta of total (global + per-type)
let new_addr_count = NewAddrCountVecs::forced_import(&db, version, indexes, cached_starts)?;
// Reused address tracking (counts + per-block uses + percent)
let reused_addr_count =
ReusedAddrVecs::forced_import(&db, version, indexes, cached_starts)?;
// Exposed address tracking (counts + supply) - quantum / pubkey-exposure sense
let exposed_addr_vecs = ExposedAddrVecs::forced_import(&db, version, indexes)?;
// Growth rate: delta change + rate (global + per-type)
let delta = DeltaVecs::new(version, &addr_count, cached_starts, indexes);
@@ -169,6 +181,8 @@ impl Vecs {
activity: addr_activity,
total: total_addr_count,
new: new_addr_count,
reused: reused_addr_count,
exposed: exposed_addr_vecs,
delta,
funded_index: funded_addr_index,
empty_index: empty_addr_index,
@@ -221,6 +235,7 @@ impl Vecs {
indexes: &indexes::Vecs,
inputs: &inputs::Vecs,
outputs: &outputs::Vecs,
scripts: &scripts::Vecs,
transactions: &transactions::Vecs,
blocks: &blocks::Vecs,
prices: &prices::Vecs,
@@ -285,6 +300,8 @@ impl Vecs {
self.addrs.funded.reset_height()?;
self.addrs.empty.reset_height()?;
self.addrs.activity.reset_height()?;
self.addrs.reused.reset_height()?;
self.addrs.exposed.reset_height()?;
reset_state(
&mut self.any_addr_indexes,
&mut self.addrs_data,
@@ -454,6 +471,10 @@ impl Vecs {
// 6b. Compute address count sum (by addr_type -> all)
self.addrs.funded.compute_rest(starting_indexes, exit)?;
self.addrs.empty.compute_rest(starting_indexes, exit)?;
self.addrs
.reused
.compute_rest(starting_indexes, &scripts.count, exit)?;
self.addrs.exposed.compute_rest(starting_indexes, exit)?;
// 6c. Compute total_addr_count = addr_count + empty_addr_count
self.addrs.total.compute(
@@ -524,6 +545,8 @@ impl Vecs {
.min(Height::from(self.addrs.funded.min_stateful_len()))
.min(Height::from(self.addrs.empty.min_stateful_len()))
.min(Height::from(self.addrs.activity.min_stateful_len()))
.min(Height::from(self.addrs.reused.min_stateful_len()))
.min(Height::from(self.addrs.exposed.min_stateful_len()))
.min(Height::from(self.coinblocks_destroyed.block.len()))
}
}

View File

@@ -0,0 +1,104 @@
use brk_error::{OptionData, Result};
use brk_indexer::Indexer;
use brk_types::{Indexes, StoredU64};
use vecdb::{AnyVec, Exit, ReadableVec, VecIndex, WritableVec};
use super::Vecs;
use crate::internal::{
PerBlockFull, compute_by_addr_type_block_counts, compute_by_addr_type_tx_percents,
};
impl Vecs {
/// Phase 1: walk inputs and populate `input_count` + `tx_count`.
/// Independent of transactions, can run alongside other inputs work.
pub(crate) fn compute_counts(
&mut self,
indexer: &Indexer,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
let dep_version = indexer.vecs.inputs.output_type.version()
+ indexer.vecs.transactions.first_tx_index.version()
+ indexer.vecs.transactions.first_txin_index.version()
+ indexer.vecs.transactions.txid.version();
for (_, v) in self.input_count.iter_mut() {
v.block
.validate_and_truncate(dep_version, starting_indexes.height)?;
}
for (_, v) in self.tx_count.iter_mut() {
v.block
.validate_and_truncate(dep_version, starting_indexes.height)?;
}
let skip = self
.input_count
.values()
.map(|v| v.block.len())
.min()
.unwrap()
.min(self.tx_count.values().map(|v| v.block.len()).min().unwrap());
let first_tx_index = &indexer.vecs.transactions.first_tx_index;
let end = first_tx_index.len();
if skip >= end {
return Ok(());
}
for (_, v) in self.input_count.iter_mut() {
v.block.truncate_if_needed_at(skip)?;
}
for (_, v) in self.tx_count.iter_mut() {
v.block.truncate_if_needed_at(skip)?;
}
let fi_batch = first_tx_index.collect_range_at(skip, end);
let txid_len = indexer.vecs.transactions.txid.len();
let total_txin_len = indexer.vecs.inputs.output_type.len();
let mut itype_cursor = indexer.vecs.inputs.output_type.cursor();
let mut fi_in_cursor = indexer.vecs.transactions.first_txin_index.cursor();
compute_by_addr_type_block_counts(
&mut self.input_count,
&mut self.tx_count,
&fi_batch,
txid_len,
true, // skip coinbase (1 fake input)
starting_indexes.height,
exit,
|tx_pos, per_tx| {
let fi_in = fi_in_cursor.get(tx_pos).data()?.to_usize();
let next_fi_in = if tx_pos + 1 < txid_len {
fi_in_cursor.get(tx_pos + 1).data()?.to_usize()
} else {
total_txin_len
};
itype_cursor.advance(fi_in - itype_cursor.position());
for _ in fi_in..next_fi_in {
let otype = itype_cursor.next().unwrap();
per_tx[otype as usize] += 1;
}
Ok(())
},
)
}
/// Phase 2: derive `tx_percent` from `tx_count` and the total tx count.
/// Must run after `transactions::Vecs::compute`.
pub(crate) fn compute_percents(
&mut self,
transactions_count_total: &PerBlockFull<StoredU64>,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
compute_by_addr_type_tx_percents(
&self.tx_count,
&mut self.tx_percent,
transactions_count_total,
starting_indexes,
exit,
)
}
}

View File

@@ -0,0 +1,48 @@
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_types::Version;
use vecdb::Database;
use super::Vecs;
use crate::{
indexes,
internal::{PerBlockCumulativeRolling, PercentCumulativeRolling, WindowStartVec, Windows},
};
impl Vecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
Ok(Self {
input_count: ByAddrType::new_with_name(|name| {
PerBlockCumulativeRolling::forced_import(
db,
&format!("{name}_input_count"),
version,
indexes,
cached_starts,
)
})?,
tx_count: ByAddrType::new_with_name(|name| {
PerBlockCumulativeRolling::forced_import(
db,
&format!("tx_count_with_{name}_in"),
version,
indexes,
cached_starts,
)
})?,
tx_percent: ByAddrType::new_with_name(|name| {
PercentCumulativeRolling::forced_import(
db,
&format!("tx_count_with_{name}_in_rel_to_all"),
version,
indexes,
)
})?,
})
}
}

View File

@@ -0,0 +1,5 @@
mod compute;
mod import;
mod vecs;
pub use vecs::Vecs;

View File

@@ -0,0 +1,18 @@
use brk_cohort::ByAddrType;
use brk_traversable::Traversable;
use brk_types::{BasisPoints16, StoredU64};
use vecdb::{Rw, StorageMode};
use crate::internal::{PerBlockCumulativeRolling, PercentCumulativeRolling};
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
/// Per-block, per-type total input count (granular). The "type" is the
/// type of the spent output that the input consumes.
pub input_count: ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
/// Per-block, per-type count of TXs containing at least one input that
/// spends an output of this type.
pub tx_count: ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
/// Per-type tx_count as a percent of total tx count.
pub tx_percent: ByAddrType<PercentCumulativeRolling<BasisPoints16, M>>,
}

View File

@@ -0,0 +1,125 @@
//! Shared per-block per-address-type counters.
//!
//! Used by `outputs/by_type/` (counts outputs per type) and `inputs/by_type/`
//! (counts inputs per type). Walks each block's tx range, calls a scanner
//! callback that fills a `[u32; 12]` per-tx counter, and produces two
//! per-block aggregates in a single pass:
//!
//! - `entry_count` — total number of items (outputs / inputs) per type
//! - `tx_count` — number of txs that contain at least one item of each type
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_types::{BasisPoints16, Height, Indexes, OutputType, StoredU64, TxIndex};
use vecdb::{AnyStoredVec, Exit, VecIndex, WritableVec};
use crate::internal::{
PerBlockCumulativeRolling, PerBlockFull, PercentCumulativeRolling, RatioU64Bp16,
};
/// Per-block scan that simultaneously computes:
/// - `entry_count[type] += per_tx[type]` (sum of items)
/// - `tx_count[type] += 1 if per_tx[type] > 0` (presence flag)
///
/// `scan_tx` is called once per tx with a zeroed `[u32; 12]` buffer that
/// it must fill with the per-type item count for that tx.
#[allow(clippy::too_many_arguments)]
pub(crate) fn compute_by_addr_type_block_counts(
entry_count: &mut ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64>>,
tx_count: &mut ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64>>,
fi_batch: &[TxIndex],
txid_len: usize,
skip_first_tx: bool,
starting_height: Height,
exit: &Exit,
mut scan_tx: impl FnMut(usize, &mut [u32; 12]) -> Result<()>,
) -> Result<()> {
for (j, first_tx) in fi_batch.iter().enumerate() {
let fi = first_tx.to_usize();
let next_fi = fi_batch
.get(j + 1)
.map(|v| v.to_usize())
.unwrap_or(txid_len);
let start_tx = if skip_first_tx { fi + 1 } else { fi };
let mut entries_per_block = [0u64; 12];
let mut txs_per_block = [0u64; 12];
for tx_pos in start_tx..next_fi {
let mut per_tx = [0u32; 12];
scan_tx(tx_pos, &mut per_tx)?;
for (i, &n) in per_tx.iter().enumerate() {
if n > 0 {
entries_per_block[i] += u64::from(n);
txs_per_block[i] += 1;
}
}
}
for otype in OutputType::ADDR_TYPES {
let idx = otype as usize;
entry_count
.get_mut_unwrap(otype)
.block
.push(StoredU64::from(entries_per_block[idx]));
tx_count
.get_mut_unwrap(otype)
.block
.push(StoredU64::from(txs_per_block[idx]));
}
if entry_count.p2pkh.block.batch_limit_reached() {
let _lock = exit.lock();
for (_, v) in entry_count.iter_mut() {
v.block.write()?;
}
for (_, v) in tx_count.iter_mut() {
v.block.write()?;
}
}
}
{
let _lock = exit.lock();
for (_, v) in entry_count.iter_mut() {
v.block.write()?;
}
for (_, v) in tx_count.iter_mut() {
v.block.write()?;
}
}
for (_, v) in entry_count.iter_mut() {
v.compute_rest(starting_height, exit)?;
}
for (_, v) in tx_count.iter_mut() {
v.compute_rest(starting_height, exit)?;
}
Ok(())
}
/// Compute per-type tx-count percent over total tx count, for all 8 address types.
pub(crate) fn compute_by_addr_type_tx_percents(
tx_count: &ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64>>,
tx_percent: &mut ByAddrType<PercentCumulativeRolling<BasisPoints16>>,
count_total: &PerBlockFull<StoredU64>,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
for otype in OutputType::ADDR_TYPES {
let source = tx_count.get_unwrap(otype);
tx_percent
.get_mut_unwrap(otype)
.compute_binary::<StoredU64, StoredU64, RatioU64Bp16, _, _, _, _>(
starting_indexes.height,
&source.cumulative.height,
&count_total.cumulative.height,
source.sum.as_array().map(|w| &w.height),
count_total.rolling.sum.as_array().map(|w| &w.height),
exit,
)?;
}
Ok(())
}

View File

@@ -1,5 +1,6 @@
pub(crate) mod algo;
mod amount;
mod by_type_counts;
mod cache_budget;
mod containers;
pub(crate) mod db_utils;
@@ -10,6 +11,7 @@ mod traits;
mod transform;
pub(crate) use amount::*;
pub(crate) use by_type_counts::*;
pub(crate) use cache_budget::*;
pub(crate) use containers::*;
pub(crate) use indexes::*;

View File

@@ -2,7 +2,7 @@ use std::collections::VecDeque;
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Height, get_percentile};
use brk_types::{Height, VSize, get_percentile, get_weighted_percentile};
use derive_more::{Deref, DerefMut};
use schemars::JsonSchema;
use vecdb::{
@@ -154,6 +154,141 @@ impl<T: NumericValue + JsonSchema> PerBlockDistribution<T> {
Ok(())
}
/// Like `compute_with_skip` but uses vsize-weighted percentiles.
/// Each transaction's contribution to percentile rank is proportional to its vsize.
#[allow(clippy::too_many_arguments)]
pub(crate) fn compute_with_skip_weighted<A>(
&mut self,
max_from: Height,
source: &impl ReadableVec<A, T>,
vsize_source: &impl ReadableVec<A, VSize>,
first_indexes: &impl ReadableVec<Height, A>,
count_indexes: &impl ReadableVec<Height, brk_types::StoredU64>,
exit: &Exit,
skip_count: usize,
) -> Result<()>
where
A: VecIndex + VecValue + brk_types::CheckedSub<A>,
{
let DistributionStats {
min,
max,
pct10,
pct25,
median,
pct75,
pct90,
} = &mut self.0;
let min = &mut min.height;
let max = &mut max.height;
let pct10 = &mut pct10.height;
let pct25 = &mut pct25.height;
let median = &mut median.height;
let pct75 = &mut pct75.height;
let pct90 = &mut pct90.height;
let combined_version = source.version()
+ vsize_source.version()
+ first_indexes.version()
+ count_indexes.version();
let mut index = max_from;
for vec in [
&mut *min,
&mut *max,
&mut *median,
&mut *pct10,
&mut *pct25,
&mut *pct75,
&mut *pct90,
] {
vec.validate_computed_version_or_reset(combined_version)?;
index = index.min(Height::from(vec.len()));
}
let start = index.to_usize();
for vec in [
&mut *min,
&mut *max,
&mut *median,
&mut *pct10,
&mut *pct25,
&mut *pct75,
&mut *pct90,
] {
vec.truncate_if_needed_at(start)?;
}
let fi_len = first_indexes.len();
let first_indexes_batch: Vec<A> = first_indexes.collect_range_at(start, fi_len);
let count_indexes_batch: Vec<brk_types::StoredU64> =
count_indexes.collect_range_at(start, fi_len);
let zero = T::from(0_usize);
let mut values: Vec<T> = Vec::new();
let mut vsizes: Vec<VSize> = Vec::new();
let mut weighted: Vec<(T, VSize)> = Vec::new();
first_indexes_batch
.into_iter()
.zip(count_indexes_batch)
.try_for_each(|(first_index, count_index)| -> Result<()> {
let count = u64::from(count_index) as usize;
let effective_count = count.saturating_sub(skip_count);
let effective_first_index = first_index + skip_count.min(count);
let start_at = effective_first_index.to_usize();
let end_at = start_at + effective_count;
source.collect_range_into_at(start_at, end_at, &mut values);
vsize_source.collect_range_into_at(start_at, end_at, &mut vsizes);
weighted.clear();
weighted.extend(
values
.iter()
.copied()
.zip(vsizes.iter().copied())
.filter(|(v, _)| skip_count == 0 || *v > zero),
);
if weighted.is_empty() {
for vec in [
&mut *min,
&mut *max,
&mut *median,
&mut *pct10,
&mut *pct25,
&mut *pct75,
&mut *pct90,
] {
vec.push(zero);
}
} else {
weighted.sort_unstable_by(|a, b| a.0.cmp(&b.0));
max.push(weighted.last().unwrap().0);
pct90.push(get_weighted_percentile(&weighted, 0.90));
pct75.push(get_weighted_percentile(&weighted, 0.75));
median.push(get_weighted_percentile(&weighted, 0.50));
pct25.push(get_weighted_percentile(&weighted, 0.25));
pct10.push(get_weighted_percentile(&weighted, 0.10));
min.push(weighted.first().unwrap().0);
}
Ok(())
})?;
let _lock = exit.lock();
for vec in [min, max, median, pct10, pct25, pct75, pct90] {
vec.write()?;
}
Ok(())
}
pub(crate) fn compute_from_nblocks<A>(
&mut self,
max_from: Height,

View File

@@ -0,0 +1,91 @@
//! PercentCumulativeRolling - cumulative percent + 4 rolling window percents.
//!
//! Mirrors `PerBlockCumulativeRolling` but for percentages derived from ratios
//! of cumulative values and rolling sums.
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{Height, Version};
use vecdb::{BinaryTransform, Database, Exit, ReadableVec, Rw, StorageMode, VecValue};
use crate::{
indexes,
internal::{BpsType, PercentPerBlock, PercentRollingWindows},
};
#[derive(Traversable)]
pub struct PercentCumulativeRolling<B: BpsType, M: StorageMode = Rw> {
pub cumulative: PercentPerBlock<B, M>,
#[traversable(flatten)]
pub rolling: PercentRollingWindows<B, M>,
}
impl<B: BpsType> PercentCumulativeRolling<B> {
pub(crate) fn forced_import(
db: &Database,
name: &str,
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
let cumulative =
PercentPerBlock::forced_import(db, &format!("{name}_cumulative"), version, indexes)?;
let rolling =
PercentRollingWindows::forced_import(db, &format!("{name}_sum"), version, indexes)?;
Ok(Self {
cumulative,
rolling,
})
}
/// Alternate constructor that uses the same base name for both the
/// cumulative `PercentPerBlock` and the `PercentRollingWindows`, relying on
/// the window suffix to disambiguate. Useful for preserving legacy disk
/// names where the two variants historically shared a prefix.
pub(crate) fn forced_import_flat(
db: &Database,
name: &str,
version: Version,
indexes: &indexes::Vecs,
) -> Result<Self> {
let cumulative = PercentPerBlock::forced_import(db, name, version, indexes)?;
let rolling = PercentRollingWindows::forced_import(db, name, version, indexes)?;
Ok(Self {
cumulative,
rolling,
})
}
#[allow(clippy::too_many_arguments)]
pub(crate) fn compute_binary<S1T, S2T, F, Rc1, Rc2, Rw1, Rw2>(
&mut self,
max_from: Height,
cumulative_numerator: &Rc1,
cumulative_denominator: &Rc2,
rolling_numerators: [&Rw1; 4],
rolling_denominators: [&Rw2; 4],
exit: &Exit,
) -> Result<()>
where
S1T: VecValue,
S2T: VecValue,
Rc1: ReadableVec<Height, S1T>,
Rc2: ReadableVec<Height, S2T>,
Rw1: ReadableVec<Height, S1T>,
Rw2: ReadableVec<Height, S2T>,
F: BinaryTransform<S1T, S2T, B>,
{
self.cumulative.compute_binary::<S1T, S2T, F>(
max_from,
cumulative_numerator,
cumulative_denominator,
exit,
)?;
self.rolling.compute_binary::<S1T, S2T, F, Rw1, Rw2>(
max_from,
rolling_numerators,
rolling_denominators,
exit,
)?;
Ok(())
}
}

View File

@@ -1,10 +1,12 @@
mod base;
mod cumulative_rolling;
mod lazy;
mod lazy_windows;
mod vec;
mod windows;
pub use base::*;
pub use cumulative_rolling::*;
pub use lazy::*;
pub use lazy_windows::*;
pub use vec::*;

View File

@@ -2,7 +2,7 @@ use brk_error::Result;
use brk_indexer::Indexer;
use brk_traversable::Traversable;
use brk_types::{Indexes, TxIndex};
use brk_types::{Indexes, TxIndex, VSize};
use schemars::JsonSchema;
use vecdb::{Database, Exit, ReadableVec, Rw, StorageMode, Version};
@@ -113,4 +113,43 @@ where
Ok(())
}
/// Like `derive_from_with_skip` but uses vsize-weighted percentiles for the
/// per-block distribution. The rolling 6-block distribution stays count-based.
#[allow(clippy::too_many_arguments)]
pub(crate) fn derive_from_with_skip_weighted(
&mut self,
indexer: &Indexer,
indexes: &indexes::Vecs,
starting_indexes: &Indexes,
tx_index_source: &impl ReadableVec<TxIndex, T>,
vsize_source: &impl ReadableVec<TxIndex, VSize>,
exit: &Exit,
skip_count: usize,
) -> Result<()>
where
T: Copy + Ord + From<f64> + Default,
f64: From<T>,
{
self.block.compute_with_skip_weighted(
starting_indexes.height,
tx_index_source,
vsize_source,
&indexer.vecs.transactions.first_tx_index,
&indexes.height.tx_index_count,
exit,
skip_count,
)?;
self.distribution._6b.compute_from_nblocks(
starting_indexes.height,
tx_index_source,
&indexer.vecs.transactions.first_tx_index,
&indexes.height.tx_index_count,
6,
exit,
)?;
Ok(())
}
}

View File

@@ -6,9 +6,9 @@
use brk_error::Result;
use brk_indexer::Indexer;
use brk_traversable::Traversable;
use brk_types::{Indexes, TxIndex};
use brk_types::{Indexes, TxIndex, VSize};
use schemars::JsonSchema;
use vecdb::{Database, EagerVec, Exit, ImportableVec, PcoVec, Rw, StorageMode, Version};
use vecdb::{Database, EagerVec, Exit, ImportableVec, PcoVec, ReadableVec, Rw, StorageMode, Version};
use crate::{
indexes,
@@ -65,4 +65,29 @@ where
skip_count,
)
}
#[allow(clippy::too_many_arguments)]
pub(crate) fn derive_from_with_skip_weighted(
&mut self,
indexer: &Indexer,
indexes: &indexes::Vecs,
starting_indexes: &Indexes,
vsize_source: &impl ReadableVec<TxIndex, VSize>,
exit: &Exit,
skip_count: usize,
) -> Result<()>
where
T: Copy + Ord + From<f64> + Default,
f64: From<T>,
{
self.distribution.derive_from_with_skip_weighted(
indexer,
indexes,
starting_indexes,
&self.tx_index,
vsize_source,
exit,
skip_count,
)
}
}

View File

@@ -433,6 +433,7 @@ impl Computer {
&self.indexes,
&self.inputs,
&self.outputs,
&self.scripts,
&self.transactions,
&self.blocks,
&self.prices,

View File

@@ -116,16 +116,10 @@ impl Vecs {
.compute(prices, starting_indexes.height, exit)?;
self.fee_dominance
.compute_binary::<Sats, Sats, RatioSatsBp16>(
.compute_binary::<Sats, Sats, RatioSatsBp16, _, _, _, _>(
starting_indexes.height,
&self.fees.cumulative.sats.height,
&self.coinbase.cumulative.sats.height,
exit,
)?;
self.fee_dominance_rolling
.compute_binary::<Sats, Sats, RatioSatsBp16, _, _>(
starting_indexes.height,
self.fees.sum.as_array().map(|w| &w.sats.height),
self.coinbase.sum.as_array().map(|w| &w.sats.height),
exit,

View File

@@ -7,7 +7,7 @@ use crate::{
indexes,
internal::{
AmountPerBlockCumulative, AmountPerBlockCumulativeRolling, AmountPerBlockFull,
LazyPercentRollingWindows, OneMinusBp16, PercentPerBlock, PercentRollingWindows,
LazyPercentRollingWindows, OneMinusBp16, PercentCumulativeRolling, PercentPerBlock,
RatioRollingWindows, WindowStartVec, Windows,
},
};
@@ -19,13 +19,13 @@ impl Vecs {
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
let fee_dominance_rolling =
PercentRollingWindows::forced_import(db, "fee_dominance", version, indexes)?;
let fee_dominance =
PercentCumulativeRolling::forced_import_flat(db, "fee_dominance", version, indexes)?;
let subsidy_dominance_rolling = LazyPercentRollingWindows::from_rolling::<OneMinusBp16>(
"subsidy_dominance",
version,
&fee_dominance_rolling,
&fee_dominance.rolling,
);
Ok(Self {
@@ -51,8 +51,7 @@ impl Vecs {
version,
indexes,
)?,
fee_dominance: PercentPerBlock::forced_import(db, "fee_dominance", version, indexes)?,
fee_dominance_rolling,
fee_dominance,
subsidy_dominance: PercentPerBlock::forced_import(
db,
"subsidy_dominance",

View File

@@ -4,7 +4,7 @@ use vecdb::{EagerVec, PcoVec, Rw, StorageMode};
use crate::internal::{
AmountPerBlockCumulative, AmountPerBlockCumulativeRolling, AmountPerBlockFull,
LazyPercentRollingWindows, PercentPerBlock, PercentRollingWindows, RatioRollingWindows,
LazyPercentRollingWindows, PercentCumulativeRolling, PercentPerBlock, RatioRollingWindows,
};
#[derive(Traversable)]
@@ -15,9 +15,7 @@ pub struct Vecs<M: StorageMode = Rw> {
pub output_volume: M::Stored<EagerVec<PcoVec<Height, Sats>>>,
pub unclaimed: AmountPerBlockCumulative<M>,
#[traversable(wrap = "fees", rename = "dominance")]
pub fee_dominance: PercentPerBlock<BasisPoints16, M>,
#[traversable(wrap = "fees", rename = "dominance")]
pub fee_dominance_rolling: PercentRollingWindows<BasisPoints16, M>,
pub fee_dominance: PercentCumulativeRolling<BasisPoints16, M>,
#[traversable(wrap = "subsidy", rename = "dominance")]
pub subsidy_dominance: PercentPerBlock<BasisPoints16, M>,
#[traversable(wrap = "subsidy", rename = "dominance")]

View File

@@ -0,0 +1,104 @@
use brk_error::{OptionData, Result};
use brk_indexer::Indexer;
use brk_types::{Indexes, StoredU64};
use vecdb::{AnyVec, Exit, ReadableVec, VecIndex, WritableVec};
use super::Vecs;
use crate::internal::{
PerBlockFull, compute_by_addr_type_block_counts, compute_by_addr_type_tx_percents,
};
impl Vecs {
/// Phase 1: walk outputs and populate `output_count` + `tx_count`.
/// Independent of transactions, can run alongside other outputs work.
pub(crate) fn compute_counts(
&mut self,
indexer: &Indexer,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
let dep_version = indexer.vecs.outputs.output_type.version()
+ indexer.vecs.transactions.first_tx_index.version()
+ indexer.vecs.transactions.first_txout_index.version()
+ indexer.vecs.transactions.txid.version();
for (_, v) in self.output_count.iter_mut() {
v.block
.validate_and_truncate(dep_version, starting_indexes.height)?;
}
for (_, v) in self.tx_count.iter_mut() {
v.block
.validate_and_truncate(dep_version, starting_indexes.height)?;
}
let skip = self
.output_count
.values()
.map(|v| v.block.len())
.min()
.unwrap()
.min(self.tx_count.values().map(|v| v.block.len()).min().unwrap());
let first_tx_index = &indexer.vecs.transactions.first_tx_index;
let end = first_tx_index.len();
if skip >= end {
return Ok(());
}
for (_, v) in self.output_count.iter_mut() {
v.block.truncate_if_needed_at(skip)?;
}
for (_, v) in self.tx_count.iter_mut() {
v.block.truncate_if_needed_at(skip)?;
}
let fi_batch = first_tx_index.collect_range_at(skip, end);
let txid_len = indexer.vecs.transactions.txid.len();
let total_txout_len = indexer.vecs.outputs.output_type.len();
let mut otype_cursor = indexer.vecs.outputs.output_type.cursor();
let mut fo_cursor = indexer.vecs.transactions.first_txout_index.cursor();
compute_by_addr_type_block_counts(
&mut self.output_count,
&mut self.tx_count,
&fi_batch,
txid_len,
false,
starting_indexes.height,
exit,
|tx_pos, per_tx| {
let fo = fo_cursor.get(tx_pos).data()?.to_usize();
let next_fo = if tx_pos + 1 < txid_len {
fo_cursor.get(tx_pos + 1).data()?.to_usize()
} else {
total_txout_len
};
otype_cursor.advance(fo - otype_cursor.position());
for _ in fo..next_fo {
let otype = otype_cursor.next().unwrap();
per_tx[otype as usize] += 1;
}
Ok(())
},
)
}
/// Phase 2: derive `tx_percent` from `tx_count` and the total tx count.
/// Must run after `transactions::Vecs::compute` (depends on tx count totals).
pub(crate) fn compute_percents(
&mut self,
transactions_count_total: &PerBlockFull<StoredU64>,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
compute_by_addr_type_tx_percents(
&self.tx_count,
&mut self.tx_percent,
transactions_count_total,
starting_indexes,
exit,
)
}
}

View File

@@ -0,0 +1,48 @@
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_types::Version;
use vecdb::Database;
use super::Vecs;
use crate::{
indexes,
internal::{PerBlockCumulativeRolling, PercentCumulativeRolling, WindowStartVec, Windows},
};
impl Vecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
Ok(Self {
output_count: ByAddrType::new_with_name(|name| {
PerBlockCumulativeRolling::forced_import(
db,
&format!("{name}_output_count"),
version,
indexes,
cached_starts,
)
})?,
tx_count: ByAddrType::new_with_name(|name| {
PerBlockCumulativeRolling::forced_import(
db,
&format!("tx_count_with_{name}_out"),
version,
indexes,
cached_starts,
)
})?,
tx_percent: ByAddrType::new_with_name(|name| {
PercentCumulativeRolling::forced_import(
db,
&format!("tx_count_with_{name}_out_rel_to_all"),
version,
indexes,
)
})?,
})
}
}

View File

@@ -0,0 +1,5 @@
mod compute;
mod import;
mod vecs;
pub use vecs::Vecs;

View File

@@ -0,0 +1,16 @@
use brk_cohort::ByAddrType;
use brk_traversable::Traversable;
use brk_types::{BasisPoints16, StoredU64};
use vecdb::{Rw, StorageMode};
use crate::internal::{PerBlockCumulativeRolling, PercentCumulativeRolling};
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
/// Per-block, per-type total output count (granular).
pub output_count: ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
/// Per-block, per-type count of TXs containing at least one output of this type.
pub tx_count: ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
/// Per-type tx_count as a percent of total tx count.
pub tx_percent: ByAddrType<PercentCumulativeRolling<BasisPoints16, M>>,
}

View File

@@ -11,7 +11,7 @@ use crate::{
},
};
use super::{CountVecs, SpentVecs, Vecs};
use super::{ByTypeVecs, CountVecs, SpentVecs, Vecs};
impl Vecs {
pub(crate) fn forced_import(
@@ -25,8 +25,14 @@ impl Vecs {
let spent = SpentVecs::forced_import(&db, version)?;
let count = CountVecs::forced_import(&db, version, indexes, cached_starts)?;
let by_type = ByTypeVecs::forced_import(&db, version, indexes, cached_starts)?;
let this = Self { db, spent, count };
let this = Self {
db,
spent,
count,
by_type,
};
finalize_db(&this.db, &this)?;
Ok(this)
}

View File

@@ -1,3 +1,4 @@
pub mod by_type;
pub mod count;
pub mod spent;
@@ -7,6 +8,7 @@ mod import;
use brk_traversable::Traversable;
use vecdb::{Database, Rw, StorageMode};
pub use by_type::Vecs as ByTypeVecs;
pub use count::Vecs as CountVecs;
pub use spent::Vecs as SpentVecs;
@@ -19,4 +21,5 @@ pub struct Vecs<M: StorageMode = Rw> {
pub spent: SpentVecs<M>,
pub count: CountVecs<M>,
pub by_type: ByTypeVecs<M>,
}

View File

@@ -93,6 +93,26 @@ impl Vecs {
)?)
})?;
// addr_output_count = sum of the 8 address-type per-block counts.
// Lives here (not in addr/) because every consumer that asks "what
// fraction of address outputs are X" needs it as the denominator.
self.addr_output_count.block.compute_sum_of_others(
starting_indexes.height,
&[
&self.p2pk65.block,
&self.p2pk33.block,
&self.p2pkh.block,
&self.p2sh.block,
&self.p2wpkh.block,
&self.p2wsh.block,
&self.p2tr.block,
&self.p2a.block,
],
exit,
)?;
self.addr_output_count
.compute_rest(starting_indexes.height, exit)?;
self.op_return.compute(starting_indexes.height, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,

View File

@@ -88,6 +88,13 @@ impl Vecs {
p2tr,
p2wpkh,
p2wsh,
addr_output_count: PerBlockCumulativeRolling::forced_import(
db,
"addr_output_count",
version,
indexes,
cached_starts,
)?,
op_return: PerBlockCumulativeRolling::forced_import(
db,
"op_return_count",

View File

@@ -15,6 +15,9 @@ pub struct Vecs<M: StorageMode = Rw> {
pub p2tr: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub p2wpkh: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub p2wsh: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
/// Sum of the 8 address-type per-block counts. Useful as a denominator
/// for any "fraction of address outputs that …" metric.
pub addr_output_count: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub op_return: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub empty_output: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,
pub unknown_output: PerBlockCumulativeRolling<StoredU64, StoredU64, M>,

View File

@@ -3,10 +3,9 @@ use brk_indexer::Indexer;
use brk_types::Indexes;
use vecdb::Exit;
use super::{Vecs, type_counts::compute_type_percents};
use crate::{blocks, indexes, inputs, outputs, prices};
use super::Vecs;
impl Vecs {
#[allow(clippy::too_many_arguments)]
pub(crate) fn compute(
@@ -22,7 +21,7 @@ impl Vecs {
) -> Result<()> {
self.db.sync_bg_tasks()?;
let (r1, (r2, r3)) = rayon::join(
let (r1, (r2, (r3, (r4, r5)))) = rayon::join(
|| {
self.count
.compute(indexer, &blocks.lookback, starting_indexes, exit)
@@ -30,13 +29,56 @@ impl Vecs {
|| {
rayon::join(
|| self.versions.compute(indexer, starting_indexes, exit),
|| self.size.compute(indexer, indexes, starting_indexes, exit),
|| {
rayon::join(
|| self.size.compute(indexer, indexes, starting_indexes, exit),
|| {
rayon::join(
|| {
self.input_types
.compute(indexer, starting_indexes, exit)
},
|| {
self.output_types
.compute(indexer, starting_indexes, exit)
},
)
},
)
},
)
},
);
r1?;
r2?;
r3?;
r4?;
r5?;
let count_total = &self.count.total;
let (input_types, output_types) = (&mut self.input_types, &mut self.output_types);
let (r6, r7) = rayon::join(
|| {
compute_type_percents(
&input_types.by_type,
&mut input_types.percent,
count_total,
starting_indexes.height,
exit,
)
},
|| {
compute_type_percents(
&output_types.by_type,
&mut output_types.percent,
count_total,
starting_indexes.height,
exit,
)
},
);
r6?;
r7?;
self.fees.compute(
indexer,

View File

@@ -35,37 +35,25 @@ impl Vecs {
self.compute_fees(indexer, indexes, size_vecs, starting_indexes, exit)?;
let (r1, (r2, r3)) = rayon::join(
let vsize_source = &size_vecs.vsize.tx_index;
let (r1, r2) = rayon::join(
|| {
self.fee
.derive_from_with_skip(indexer, indexes, starting_indexes, exit, 1)
},
|| {
rayon::join(
|| {
self.fee_rate.derive_from_with_skip(
indexer,
indexes,
starting_indexes,
exit,
1,
)
},
|| {
self.effective_fee_rate.derive_from_with_skip(
indexer,
indexes,
starting_indexes,
exit,
1,
)
},
self.effective_fee_rate.derive_from_with_skip_weighted(
indexer,
indexes,
starting_indexes,
vsize_source,
exit,
1,
)
},
);
r1?;
r2?;
r3?;
Ok(())
}
@@ -86,7 +74,6 @@ impl Vecs {
.tx_index
.validate_computed_version_or_reset(dep_version)?;
self.fee_rate
.tx_index
.validate_computed_version_or_reset(dep_version)?;
self.effective_fee_rate
.tx_index
@@ -101,7 +88,7 @@ impl Vecs {
.fee
.tx_index
.len()
.min(self.fee_rate.tx_index.len())
.min(self.fee_rate.len())
.min(self.effective_fee_rate.tx_index.len())
.min(starting_indexes.tx_index.to_usize());
@@ -113,7 +100,6 @@ impl Vecs {
.tx_index
.truncate_if_needed(starting_indexes.tx_index)?;
self.fee_rate
.tx_index
.truncate_if_needed(starting_indexes.tx_index)?;
self.effective_fee_rate
.tx_index
@@ -185,7 +171,7 @@ impl Vecs {
input_values[j] - output_values[j]
};
self.fee.tx_index.push(fee);
self.fee_rate.tx_index.push(FeeRate::from((fee, vsizes[j])));
self.fee_rate.push(FeeRate::from((fee, vsizes[j])));
fees.push(fee);
}
@@ -205,14 +191,14 @@ impl Vecs {
if h % 1_000 == 0 {
let _lock = exit.lock();
self.fee.tx_index.write()?;
self.fee_rate.tx_index.write()?;
self.fee_rate.write()?;
self.effective_fee_rate.tx_index.write()?;
}
}
let _lock = exit.lock();
self.fee.tx_index.write()?;
self.fee_rate.tx_index.write()?;
self.fee_rate.write()?;
self.effective_fee_rate.tx_index.write()?;
Ok(())

View File

@@ -19,7 +19,7 @@ impl Vecs {
input_value: EagerVec::forced_import(db, "input_value", version)?,
output_value: EagerVec::forced_import(db, "output_value", version)?,
fee: PerTxDistribution::forced_import(db, "fee", v, indexes)?,
fee_rate: PerTxDistribution::forced_import(db, "fee_rate", v, indexes)?,
fee_rate: EagerVec::forced_import(db, "fee_rate", v)?,
effective_fee_rate: PerTxDistribution::forced_import(
db,
"effective_fee_rate",

View File

@@ -9,6 +9,6 @@ pub struct Vecs<M: StorageMode = Rw> {
pub input_value: M::Stored<EagerVec<PcoVec<TxIndex, Sats>>>,
pub output_value: M::Stored<EagerVec<PcoVec<TxIndex, Sats>>>,
pub fee: PerTxDistribution<Sats, M>,
pub fee_rate: PerTxDistribution<FeeRate, M>,
pub fee_rate: M::Stored<EagerVec<PcoVec<TxIndex, FeeRate>>>,
pub effective_fee_rate: PerTxDistribution<FeeRate, M>,
}

View File

@@ -12,7 +12,9 @@ use crate::{
},
};
use super::{CountVecs, FeesVecs, SizeVecs, Vecs, VersionsVecs, VolumeVecs};
use super::{
CountVecs, FeesVecs, InputTypesVecs, OutputTypesVecs, SizeVecs, Vecs, VersionsVecs, VolumeVecs,
};
impl Vecs {
pub(crate) fn forced_import(
@@ -30,6 +32,8 @@ impl Vecs {
let fees = FeesVecs::forced_import(&db, version, indexes)?;
let versions = VersionsVecs::forced_import(&db, version, indexes, cached_starts)?;
let volume = VolumeVecs::forced_import(&db, version, indexes, cached_starts)?;
let input_types = InputTypesVecs::forced_import(&db, version, indexes, cached_starts)?;
let output_types = OutputTypesVecs::forced_import(&db, version, indexes, cached_starts)?;
let this = Self {
db,
@@ -38,6 +42,8 @@ impl Vecs {
fees,
versions,
volume,
input_types,
output_types,
};
finalize_db(&this.db, &this)?;
Ok(this)

View File

@@ -0,0 +1,68 @@
use brk_error::{OptionData, Result};
use brk_indexer::Indexer;
use brk_types::Indexes;
use vecdb::{AnyVec, Exit, ReadableVec, VecIndex, WritableVec};
use super::{super::type_counts::compute_type_counts, Vecs};
impl Vecs {
pub(crate) fn compute(
&mut self,
indexer: &Indexer,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
let dep_version = indexer.vecs.inputs.output_type.version()
+ indexer.vecs.transactions.first_tx_index.version()
+ indexer.vecs.transactions.first_txin_index.version()
+ indexer.vecs.transactions.txid.version();
for (_, v) in self.by_type.iter_mut() {
v.block
.validate_and_truncate(dep_version, starting_indexes.height)?;
}
let skip = self.by_type.values().map(|v| v.block.len()).min().unwrap();
let first_tx_index = &indexer.vecs.transactions.first_tx_index;
let end = first_tx_index.len();
if skip >= end {
return Ok(());
}
for (_, v) in self.by_type.iter_mut() {
v.block.truncate_if_needed_at(skip)?;
}
let fi_batch = first_tx_index.collect_range_at(skip, end);
let txid_len = indexer.vecs.transactions.txid.len();
let total_txin_len = indexer.vecs.inputs.output_type.len();
let mut itype_cursor = indexer.vecs.inputs.output_type.cursor();
let mut fi_in_cursor = indexer.vecs.transactions.first_txin_index.cursor();
compute_type_counts(
&mut self.by_type,
&fi_batch,
txid_len,
true,
starting_indexes.height,
exit,
|tx_pos| {
let fi_in = fi_in_cursor.get(tx_pos).data()?.to_usize();
let next_fi_in = if tx_pos + 1 < txid_len {
fi_in_cursor.get(tx_pos + 1).data()?.to_usize()
} else {
total_txin_len
};
let mut seen: u16 = 0;
itype_cursor.advance(fi_in - itype_cursor.position());
for _ in fi_in..next_fi_in {
seen |= 1u16 << (itype_cursor.next().unwrap() as u8);
}
Ok(seen)
},
)
}
}

View File

@@ -0,0 +1,39 @@
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_types::Version;
use vecdb::Database;
use super::Vecs;
use crate::{
indexes,
internal::{PerBlockCumulativeRolling, PercentCumulativeRolling, WindowStartVec, Windows},
};
impl Vecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
Ok(Self {
by_type: ByAddrType::new_with_name(|name| {
PerBlockCumulativeRolling::forced_import(
db,
&format!("tx_count_with_{name}_in"),
version,
indexes,
cached_starts,
)
})?,
percent: ByAddrType::new_with_name(|name| {
PercentCumulativeRolling::forced_import(
db,
&format!("tx_count_with_{name}_in_rel_to_all"),
version,
indexes,
)
})?,
})
}
}

View File

@@ -0,0 +1,5 @@
mod compute;
mod import;
mod vecs;
pub use vecs::Vecs;

View File

@@ -0,0 +1,12 @@
use brk_cohort::ByAddrType;
use brk_traversable::Traversable;
use brk_types::{BasisPoints16, StoredU64};
use vecdb::{Rw, StorageMode};
use crate::internal::{PerBlockCumulativeRolling, PercentCumulativeRolling};
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
pub by_type: ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
pub percent: ByAddrType<PercentCumulativeRolling<BasisPoints16, M>>,
}

View File

@@ -1,9 +1,12 @@
pub mod count;
pub mod fees;
pub mod input_types;
pub mod output_types;
pub mod size;
pub mod versions;
pub mod volume;
mod type_counts;
mod compute;
mod import;
@@ -12,6 +15,8 @@ use vecdb::{Database, Rw, StorageMode};
pub use count::Vecs as CountVecs;
pub use fees::Vecs as FeesVecs;
pub use input_types::Vecs as InputTypesVecs;
pub use output_types::Vecs as OutputTypesVecs;
pub use size::Vecs as SizeVecs;
pub use versions::Vecs as VersionsVecs;
pub use volume::Vecs as VolumeVecs;
@@ -28,4 +33,6 @@ pub struct Vecs<M: StorageMode = Rw> {
pub fees: FeesVecs<M>,
pub versions: VersionsVecs<M>,
pub volume: VolumeVecs<M>,
pub input_types: InputTypesVecs<M>,
pub output_types: OutputTypesVecs<M>,
}

View File

@@ -0,0 +1,68 @@
use brk_error::{OptionData, Result};
use brk_indexer::Indexer;
use brk_types::Indexes;
use vecdb::{AnyVec, Exit, ReadableVec, VecIndex, WritableVec};
use super::{super::type_counts::compute_type_counts, Vecs};
impl Vecs {
pub(crate) fn compute(
&mut self,
indexer: &Indexer,
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
let dep_version = indexer.vecs.outputs.output_type.version()
+ indexer.vecs.transactions.first_tx_index.version()
+ indexer.vecs.transactions.first_txout_index.version()
+ indexer.vecs.transactions.txid.version();
for (_, v) in self.by_type.iter_mut() {
v.block
.validate_and_truncate(dep_version, starting_indexes.height)?;
}
let skip = self.by_type.values().map(|v| v.block.len()).min().unwrap();
let first_tx_index = &indexer.vecs.transactions.first_tx_index;
let end = first_tx_index.len();
if skip >= end {
return Ok(());
}
for (_, v) in self.by_type.iter_mut() {
v.block.truncate_if_needed_at(skip)?;
}
let fi_batch = first_tx_index.collect_range_at(skip, end);
let txid_len = indexer.vecs.transactions.txid.len();
let total_txout_len = indexer.vecs.outputs.output_type.len();
let mut otype_cursor = indexer.vecs.outputs.output_type.cursor();
let mut fo_cursor = indexer.vecs.transactions.first_txout_index.cursor();
compute_type_counts(
&mut self.by_type,
&fi_batch,
txid_len,
false,
starting_indexes.height,
exit,
|tx_pos| {
let fo = fo_cursor.get(tx_pos).data()?.to_usize();
let next_fo = if tx_pos + 1 < txid_len {
fo_cursor.get(tx_pos + 1).data()?.to_usize()
} else {
total_txout_len
};
let mut seen: u16 = 0;
otype_cursor.advance(fo - otype_cursor.position());
for _ in fo..next_fo {
seen |= 1u16 << (otype_cursor.next().unwrap() as u8);
}
Ok(seen)
},
)
}
}

View File

@@ -0,0 +1,39 @@
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_types::Version;
use vecdb::Database;
use super::Vecs;
use crate::{
indexes,
internal::{PerBlockCumulativeRolling, PercentCumulativeRolling, WindowStartVec, Windows},
};
impl Vecs {
pub(crate) fn forced_import(
db: &Database,
version: Version,
indexes: &indexes::Vecs,
cached_starts: &Windows<&WindowStartVec>,
) -> Result<Self> {
Ok(Self {
by_type: ByAddrType::new_with_name(|name| {
PerBlockCumulativeRolling::forced_import(
db,
&format!("tx_count_with_{name}_out"),
version,
indexes,
cached_starts,
)
})?,
percent: ByAddrType::new_with_name(|name| {
PercentCumulativeRolling::forced_import(
db,
&format!("tx_count_with_{name}_out_rel_to_all"),
version,
indexes,
)
})?,
})
}
}

View File

@@ -0,0 +1,5 @@
mod compute;
mod import;
mod vecs;
pub use vecs::Vecs;

View File

@@ -0,0 +1,12 @@
use brk_cohort::ByAddrType;
use brk_traversable::Traversable;
use brk_types::{BasisPoints16, StoredU64};
use vecdb::{Rw, StorageMode};
use crate::internal::{PerBlockCumulativeRolling, PercentCumulativeRolling};
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
pub by_type: ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64, M>>,
pub percent: ByAddrType<PercentCumulativeRolling<BasisPoints16, M>>,
}

View File

@@ -0,0 +1,91 @@
use brk_cohort::ByAddrType;
use brk_error::Result;
use brk_types::{BasisPoints16, Height, OutputType, StoredU64, TxIndex};
use vecdb::{AnyStoredVec, Exit, VecIndex, WritableVec};
use crate::internal::{
PerBlockCumulativeRolling, PerBlockFull, PercentCumulativeRolling, RatioU64Bp16,
};
pub(super) fn compute_type_counts(
by_type: &mut ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64>>,
fi_batch: &[TxIndex],
txid_len: usize,
skip_first_tx: bool,
starting_height: Height,
exit: &Exit,
mut scan_tx: impl FnMut(usize) -> Result<u16>,
) -> Result<()> {
for (j, first_tx) in fi_batch.iter().enumerate() {
let fi = first_tx.to_usize();
let next_fi = fi_batch
.get(j + 1)
.map(|v| v.to_usize())
.unwrap_or(txid_len);
let start_tx = if skip_first_tx { fi + 1 } else { fi };
let mut counts = [0u64; 12];
for tx_pos in start_tx..next_fi {
let seen = scan_tx(tx_pos)?;
let mut bits = seen;
while bits != 0 {
let idx = bits.trailing_zeros() as usize;
counts[idx] += 1;
bits &= bits - 1;
}
}
for otype in OutputType::ADDR_TYPES {
by_type
.get_mut_unwrap(otype)
.block
.push(StoredU64::from(counts[otype as usize]));
}
if by_type.p2pkh.block.batch_limit_reached() {
let _lock = exit.lock();
for (_, v) in by_type.iter_mut() {
v.block.write()?;
}
}
}
{
let _lock = exit.lock();
for (_, v) in by_type.iter_mut() {
v.block.write()?;
}
}
for (_, v) in by_type.iter_mut() {
v.compute_rest(starting_height, exit)?;
}
Ok(())
}
pub(super) fn compute_type_percents(
by_type: &ByAddrType<PerBlockCumulativeRolling<StoredU64, StoredU64>>,
percent: &mut ByAddrType<PercentCumulativeRolling<BasisPoints16>>,
count_total: &PerBlockFull<StoredU64>,
starting_height: Height,
exit: &Exit,
) -> Result<()> {
for otype in OutputType::ADDR_TYPES {
let source = by_type.get_unwrap(otype);
percent
.get_mut_unwrap(otype)
.compute_binary::<StoredU64, StoredU64, RatioU64Bp16, _, _, _, _>(
starting_height,
&source.cumulative.height,
&count_total.cumulative.height,
source.sum.as_array().map(|w| &w.height),
count_total.rolling.sum.as_array().map(|w| &w.height),
exit,
)?;
}
Ok(())
}

View File

@@ -21,3 +21,4 @@ tracing = { workspace = true }
parking_lot = { workspace = true }
rayon = { workspace = true }
rlimit = "0.11.0"
rustc-hash = { workspace = true }

View File

@@ -0,0 +1,198 @@
//! End-to-end benchmark: `Reader::after` (rayon-parallel + reorder thread)
//! versus `Reader::after_canonical` (1 reader + N parser threads + canonical
//! hash filter).
//!
//! Two phases:
//!
//! 1. **Tail scenarios** — pick an anchor `N` blocks below the chain tip
//! and run each implementation `REPEATS` times. Exercises the tail
//! (≤10) and forward (>10) code paths under realistic catchup ranges.
//! 2. **Full reindex** — anchor=`None` (genesis to tip), one run per
//! config. Exercises every blk file once and shows steady-state
//! throughput on the densest possible workload.
//!
//! Run with:
//! cargo run --release -p brk_reader --example after_bench
//!
//! Requires a running bitcoind with a cookie file at the default path.
use std::time::{Duration, Instant};
use brk_error::Result;
use brk_reader::{Reader, Receiver};
use brk_rpc::{Auth, Client};
use brk_types::{BlockHash, Height, ReadBlock};
const SCENARIOS: &[usize] = &[5, 10, 100, 1_000, 10_000];
const REPEATS: usize = 3;
fn main() -> Result<()> {
let bitcoin_dir = Client::default_bitcoin_path();
let client = Client::new(
Client::default_url(),
Auth::CookieFile(bitcoin_dir.join(".cookie")),
)?;
let reader = Reader::new(bitcoin_dir.join("blocks"), &client);
let tip = client.get_last_height()?;
println!("Tip: {tip}");
println!();
println!(
"{:>10} {:>16} {:>12} {:>12} {:>10}",
"blocks", "impl", "best", "avg", "blk/s"
);
println!("{}", "-".repeat(68));
for &n in SCENARIOS {
let anchor_height = Height::from(tip.saturating_sub(n as u32));
let anchor_hash = client.get_block_hash(*anchor_height as u64)?;
let anchor = Some(BlockHash::from(anchor_hash));
let after = bench(REPEATS, || reader.after(anchor.clone()))?;
print_row(n, "after", &after);
let canonical_1 = bench(REPEATS, || reader.after_canonical(anchor.clone()))?;
print_row(n, "canonical[p=1]", &canonical_1);
let canonical_4 =
bench(REPEATS, || reader.after_canonical_with(anchor.clone(), 4))?;
print_row(n, "canonical[p=4]", &canonical_4);
let canonical_16 =
bench(REPEATS, || reader.after_canonical_with(anchor.clone(), 16))?;
print_row(n, "canonical[p=16]", &canonical_16);
sanity_check(n, &after, &canonical_1);
sanity_check(n, &after, &canonical_4);
sanity_check(n, &after, &canonical_16);
println!();
}
println!();
println!("Full reindex (genesis → tip), one run per config:");
println!(
"{:>10} {:>16} {:>12} {:>10}",
"blocks", "impl", "elapsed", "blk/s"
);
println!("{}", "-".repeat(54));
let after_full = run_once(|| reader.after(None))?;
print_full_row("after", &after_full);
let p1_full = run_once(|| reader.after_canonical(None))?;
print_full_row("canonical[p=1]", &p1_full);
sanity_check_full(&after_full, &p1_full);
let p4_full = run_once(|| reader.after_canonical_with(None, 4))?;
print_full_row("canonical[p=4]", &p4_full);
sanity_check_full(&after_full, &p4_full);
let p16_full = run_once(|| reader.after_canonical_with(None, 16))?;
print_full_row("canonical[p=16]", &p16_full);
sanity_check_full(&after_full, &p16_full);
Ok(())
}
struct RunStats {
best: Duration,
avg: Duration,
count: usize,
}
fn bench<F>(repeats: usize, mut f: F) -> Result<RunStats>
where
F: FnMut() -> Result<Receiver<ReadBlock>>,
{
let mut best = Duration::MAX;
let mut total = Duration::ZERO;
let mut count = 0;
for _ in 0..repeats {
let start = Instant::now();
let recv = f()?;
let mut n = 0;
for block in recv.iter() {
std::hint::black_box(block.height());
n += 1;
}
let elapsed = start.elapsed();
if elapsed < best {
best = elapsed;
}
total += elapsed;
count = n;
}
Ok(RunStats {
best,
avg: total / repeats as u32,
count,
})
}
fn print_row(requested: usize, label: &str, s: &RunStats) {
let blk_per_s = if s.best.is_zero() {
0.0
} else {
s.count as f64 / s.best.as_secs_f64()
};
println!(
"{:>10} {:>16} {:>12?} {:>12?} {:>10.0}",
requested, label, s.best, s.avg, blk_per_s
);
}
fn sanity_check(requested: usize, after: &RunStats, canonical: &RunStats) {
if after.count != canonical.count {
println!(
" ⚠ block count mismatch: after={} canonical={}",
after.count, canonical.count
);
} else if after.count != requested {
println!(
" (note: got {} blocks, requested {}; tip may have advanced)",
after.count, requested
);
}
}
struct FullRun {
elapsed: Duration,
count: usize,
}
fn run_once<F>(mut f: F) -> Result<FullRun>
where
F: FnMut() -> Result<Receiver<ReadBlock>>,
{
let start = Instant::now();
let recv = f()?;
let mut count = 0;
for block in recv.iter() {
std::hint::black_box(block.height());
count += 1;
}
Ok(FullRun {
elapsed: start.elapsed(),
count,
})
}
fn print_full_row(label: &str, run: &FullRun) {
let blk_per_s = if run.elapsed.is_zero() {
0.0
} else {
run.count as f64 / run.elapsed.as_secs_f64()
};
println!(
"{:>10} {:>16} {:>12?} {:>10.0}",
run.count, label, run.elapsed, blk_per_s
);
}
fn sanity_check_full(after: &FullRun, canonical: &FullRun) {
if after.count != canonical.count {
println!(
" ⚠ block count mismatch vs after: {} vs {}",
after.count, canonical.count
);
}
}

View File

@@ -0,0 +1,571 @@
//! Canonical-hash pipeline for `Reader::after`.
//!
//! Three pieces, each with one job:
//!
//! * **`CanonicalRange::walk`** is the only place bitcoind is consulted
//! about the main chain. It batch-fetches every canonical hash in the
//! target window once, up front, via `getblockhash` JSON-RPC batching.
//! * **`parse_canonical_block`** is a pure function of raw blk bytes.
//! It XOR-decodes only the 80-byte header, looks the hash up in the
//! pre-fetched `CanonicalRange`, and short-circuits orphans before
//! touching the (expensive) transaction body. No RPC, no `confirmations`
//! filter, no chain logic.
//! * **`pipeline_forward` / `pipeline_tail`** wire the scan loop to a
//! parser pool. The forward pipeline runs 1 reader + N parser threads
//! (default `N = 1`, configurable via `after_canonical_with`); the
//! tail pipeline (≤10 blocks) stays inline on a single thread because
//! channel/lock overhead would dominate.
//!
//! Coexists with the original `read`/`read_rev`/`after` so the two can be
//! A/B-tested from the indexer.
use std::{
fs::{self, File},
io::{Cursor, Read, Seek, SeekFrom},
ops::ControlFlow,
sync::{
Arc,
atomic::{AtomicBool, Ordering},
},
thread,
};
use bitcoin::{Transaction, VarInt, block::Header, consensus::Decodable};
use brk_error::{Error, Result};
use brk_rpc::Client;
use brk_types::{BlkMetadata, Block, BlockHash, BlockHashPrefix, Height, ReadBlock};
use crossbeam::channel::{Receiver, Sender, bounded};
use parking_lot::Mutex;
use rustc_hash::FxHashMap;
use tracing::{error, warn};
use crate::{
BlkIndexToBlkPath, ReaderInner, XORBytes, XORIndex,
scan::{ScanResult, scan_bytes},
};
const BOUND_CAP: usize = 50;
const TAIL_CHUNK: usize = 5 * 1024 * 1024;
/// Up to this many canonical blocks → tail pipeline. Beyond → forward.
const TAIL_THRESHOLD: usize = 10;
/// Default parser-thread count for `after_canonical`. The indexer is
/// CPU-bound on the consumer side, so 1 parser thread + 1 reader thread
/// (= 2 total) leaves the rest of the cores for the indexer. Bench tools
/// that drain the channel cheaply can override via `after_canonical_with`.
const DEFAULT_PARSER_THREADS: usize = 1;
// ─────────────────────────────────────────────────────────────────────────────
// CanonicalRange — the only RPC-aware piece in this file.
// ─────────────────────────────────────────────────────────────────────────────
/// Forward-ordered canonical hashes for `start..=end`, resolved once up front.
///
/// `hashes[i]` is the canonical block hash at height `start + i`.
/// `by_prefix` maps the 8-byte `BlockHashPrefix` of every canonical hash to
/// its offset — same prefix-keyed scheme brk already uses in `stores`.
/// Lookups verify the full hash via `hashes[offset]`, neutralising the
/// (astronomically small) prefix collision risk at zero extra cost.
pub struct CanonicalRange {
pub start: Height,
pub end: Height,
hashes: Vec<BlockHash>,
by_prefix: FxHashMap<BlockHashPrefix, u32>,
}
impl CanonicalRange {
/// Resolves canonical hashes for every height strictly after `anchor`
/// up to `tip` inclusive. If `anchor` is `None`, starts at genesis.
///
/// Uses `get_block_hash(h)` which is a deterministic height → canonical
/// hash lookup — no race window against in-progress reorgs.
pub fn walk(client: &Client, anchor: Option<BlockHash>, tip: Height) -> Result<Self> {
let start = match anchor {
Some(hash) => {
let info = client.get_block_header_info(&hash)?;
Height::from(info.height + 1)
}
None => Height::ZERO,
};
if start > tip {
return Ok(Self::empty(start));
}
let len = (*tip - *start + 1) as usize;
let hashes = client.get_block_hashes_range(*start, *tip)?;
let mut by_prefix = FxHashMap::with_capacity_and_hasher(len, Default::default());
for (offset, hash) in hashes.iter().enumerate() {
by_prefix.insert(BlockHashPrefix::from(hash), offset as u32);
}
Ok(Self {
start,
end: tip,
hashes,
by_prefix,
})
}
fn empty(start: Height) -> Self {
Self {
start,
end: start,
hashes: Vec::new(),
by_prefix: FxHashMap::default(),
}
}
pub fn len(&self) -> usize {
self.hashes.len()
}
pub fn is_empty(&self) -> bool {
self.hashes.is_empty()
}
/// Returns the offset-from-start of `hash` iff it matches a canonical
/// block in this range. A prefix hit is verified against the stored
/// full hash to rule out the (vanishing) chance of prefix collisions
/// from unrelated orphans in blk files.
#[inline]
fn offset_of(&self, hash: &BlockHash) -> Option<u32> {
let offset = *self.by_prefix.get(&BlockHashPrefix::from(hash))?;
(self.hashes[offset as usize] == *hash).then_some(offset)
}
}
// ─────────────────────────────────────────────────────────────────────────────
// Pure block parser — no client, no confirmations, no Ok(None) on RPC errors.
// ─────────────────────────────────────────────────────────────────────────────
const HEADER_LEN: usize = 80;
/// XOR-decode just the 80-byte header, compute the block hash, look it
/// up in `canonical`, and only proceed to parse the body and transactions
/// when the block is on the canonical chain. Returning early before the
/// body decode is what lets a single parser thread keep up with the
/// 4-thread `read()` pool on sparse ranges.
///
/// Returns `Ok(None)` for orphans / out-of-range blocks. Deterministic —
/// never touches RPC.
fn parse_canonical_block(
mut bytes: Vec<u8>,
metadata: BlkMetadata,
mut xor_i: XORIndex,
xor_bytes: XORBytes,
canonical: &CanonicalRange,
) -> Result<Option<(u32, ReadBlock)>> {
if bytes.len() < HEADER_LEN {
return Err(Error::Internal("Block bytes shorter than header"));
}
// Decode just the header and look the hash up before paying for the
// body. `xor_i` advances `HEADER_LEN` here so it stays in lock-step
// with the decoded prefix.
xor_i.bytes(&mut bytes[..HEADER_LEN], xor_bytes);
let header = Header::consensus_decode(&mut &bytes[..HEADER_LEN])?;
let bitcoin_hash = header.block_hash();
let Some(offset) = canonical.offset_of(&BlockHash::from(bitcoin_hash)) else {
return Ok(None);
};
// Canonical: XOR-decode the body and parse transactions.
xor_i.bytes(&mut bytes[HEADER_LEN..], xor_bytes);
let mut cursor = Cursor::new(bytes);
cursor.set_position(HEADER_LEN as u64);
let tx_count = VarInt::consensus_decode(&mut cursor)?.0 as usize;
let mut txdata = Vec::with_capacity(tx_count);
let mut tx_metadata = Vec::with_capacity(tx_count);
let mut tx_offsets = Vec::with_capacity(tx_count);
for _ in 0..tx_count {
let off = cursor.position() as u32;
tx_offsets.push(off);
let position = metadata.position() + off;
let tx = Transaction::consensus_decode(&mut cursor)?;
txdata.push(tx);
let len = cursor.position() as u32 - off;
tx_metadata.push(BlkMetadata::new(position, len));
}
let raw_bytes = cursor.into_inner();
let height = Height::from(*canonical.start + offset);
let mut block = Block::from((height, bitcoin_hash, bitcoin::Block { header, txdata }));
block.set_raw_data(raw_bytes, tx_offsets);
Ok(Some((offset, ReadBlock::from((block, metadata, tx_metadata)))))
}
// ─────────────────────────────────────────────────────────────────────────────
// Public entry — drop-in replacement for `Reader::after`.
// ─────────────────────────────────────────────────────────────────────────────
impl ReaderInner {
/// Stream every canonical block strictly after `hash` (or from
/// genesis if `None`) up to the current chain tip, in canonical
/// order, via the canonical-hash pipeline.
///
/// Uses the default parser-thread count (`1`); see
/// `after_canonical_with` to override.
pub fn after_canonical(&self, hash: Option<BlockHash>) -> Result<Receiver<ReadBlock>> {
self.after_canonical_with(hash, DEFAULT_PARSER_THREADS)
}
/// Same as `after_canonical` but with a configurable number of parser
/// threads. `parser_threads = 1` is the minimal-thread default
/// (1 reader + 1 parser, uncontended mutex hot path). Higher values
/// trade extra cores for throughput on dense ranges where the parser
/// is the bottleneck.
pub fn after_canonical_with(
&self,
hash: Option<BlockHash>,
parser_threads: usize,
) -> Result<Receiver<ReadBlock>> {
let parser_threads = parser_threads.max(1);
let tip = self.client.get_last_height()?;
let canonical = Arc::new(CanonicalRange::walk(&self.client, hash, tip)?);
if canonical.is_empty() {
return Ok(bounded(0).1);
}
// Refresh the blk path cache once, on the caller's thread, so the
// worker thread below has a stable view.
let paths = BlkIndexToBlkPath::scan(&self.blocks_dir);
*self.blk_index_to_blk_path.write() = paths.clone();
let (send, recv) = bounded(BOUND_CAP);
let xor_bytes = self.xor_bytes;
if canonical.len() <= TAIL_THRESHOLD {
thread::spawn(move || {
if let Err(e) = pipeline_tail(&paths, xor_bytes, &canonical, &send) {
error!("after_canonical tail pipeline failed: {e}");
}
});
} else {
let first_blk_index = self
.find_start_blk_index(Some(canonical.start), &paths, xor_bytes)
.unwrap_or_default();
thread::spawn(move || {
if let Err(e) = pipeline_forward(
&paths,
first_blk_index,
xor_bytes,
canonical,
&send,
parser_threads,
) {
error!("after_canonical forward pipeline failed: {e}");
}
});
}
Ok(recv)
}
}
// ─────────────────────────────────────────────────────────────────────────────
// Forward pipeline — 1 reader + N parsers + shared in-order emission.
// ─────────────────────────────────────────────────────────────────────────────
/// Item shipped from the reader thread to the parser pool: raw block
/// bytes, blk-file metadata, and the XOR state at the byte the bytes
/// start at.
type ScannedItem = (BlkMetadata, Vec<u8>, XORIndex);
/// Shared in-order emission buffer used by N parser threads. The mutex
/// is uncontended at `parser_threads = 1` (still acquired, never blocks).
struct ReorderState {
next_offset: u32,
target_len: u32,
/// Ahead-of-line matches keyed by canonical offset; drained
/// contiguously each time `next_offset` advances.
pending: FxHashMap<u32, ReadBlock>,
send_to_consumer: Sender<ReadBlock>,
}
impl ReorderState {
fn new(send_to_consumer: Sender<ReadBlock>, target_len: u32) -> Self {
Self {
next_offset: 0,
target_len,
pending: FxHashMap::default(),
send_to_consumer,
}
}
/// Insert a parsed canonical block. Returns `false` once the pipeline
/// is done — either the consumer dropped the receiver, every canonical
/// block has been emitted, or a parser somehow produced a duplicate
/// offset — so the caller should stop processing and exit.
fn try_emit(&mut self, offset: u32, block: ReadBlock) -> bool {
use std::cmp::Ordering::*;
match offset.cmp(&self.next_offset) {
Equal => {
if self.send_to_consumer.send(block).is_err() {
return false;
}
self.next_offset += 1;
while let Some(b) = self.pending.remove(&self.next_offset) {
if self.send_to_consumer.send(b).is_err() {
return false;
}
self.next_offset += 1;
}
self.next_offset < self.target_len
}
Greater => {
self.pending.insert(offset, block);
true
}
// Each canonical hash appears at exactly one offset, and
// each block is parsed once, so a parser should never
// produce an offset below `next_offset`. Treat as done.
Less => false,
}
}
}
/// Two-stage pipeline:
///
/// 1. **Reader (this thread)** — walks blk files from `first_blk_index`,
/// `fs::read`s each one, runs `scan_bytes` to locate every block, and
/// ships `ScannedItem`s over an mpmc channel to the parser pool.
/// 2. **Parser pool** — `parser_threads` workers draining the same
/// channel. Each worker runs `parse_canonical_block` (header first,
/// body only on canonical match) and acquires the shared `ReorderState`
/// mutex to insert into the in-order emission buffer.
///
/// Canonical blocks can arrive out of order across blk files (bitcoind
/// doesn't write in strict chain order during initial sync, headers-first
/// body fetch, or reindex), so the reorder buffer is required even with
/// a single parser thread.
fn pipeline_forward(
paths: &BlkIndexToBlkPath,
first_blk_index: u16,
xor_bytes: XORBytes,
canonical: Arc<CanonicalRange>,
send: &Sender<ReadBlock>,
parser_threads: usize,
) -> Result<()> {
let (parser_send, parser_recv) = bounded::<ScannedItem>(BOUND_CAP);
let reorder = Arc::new(Mutex::new(ReorderState::new(
send.clone(),
canonical.len() as u32,
)));
// Set when the pipeline is finished (consumer dropped or all canonical
// blocks emitted) so parsers can short-circuit instead of burning CPU
// on doomed work while the reader drains the queue.
let done = Arc::new(AtomicBool::new(false));
let parsers = spawn_parser_pool(
parser_threads,
&parser_recv,
&reorder,
&done,
&canonical,
xor_bytes,
);
drop(parser_recv); // parsers own clones; this would otherwise keep the channel open
let read_result = read_and_dispatch(paths, first_blk_index, xor_bytes, &parser_send, &done);
drop(parser_send); // signal end-of-input to parsers
for parser in parsers {
parser
.join()
.map_err(|_| Error::Internal("parser thread panicked"))??;
}
read_result?;
let state = reorder.lock();
if (state.next_offset as usize) < canonical.len() && !done.load(Ordering::Relaxed) {
return Err(Error::Internal(
"after_canonical forward pipeline: blk files missing canonical blocks",
));
}
Ok(())
}
/// Spawn `n` parser threads that drain `parser_recv`, parse each scanned
/// item via `parse_canonical_block`, and emit canonical matches to
/// `reorder`. Parsers exit when the channel closes or `done` is set.
fn spawn_parser_pool(
n: usize,
parser_recv: &Receiver<ScannedItem>,
reorder: &Arc<Mutex<ReorderState>>,
done: &Arc<AtomicBool>,
canonical: &Arc<CanonicalRange>,
xor_bytes: XORBytes,
) -> Vec<thread::JoinHandle<Result<()>>> {
(0..n)
.map(|_| {
let parser_recv = parser_recv.clone();
let reorder = reorder.clone();
let done = done.clone();
let canonical = canonical.clone();
thread::spawn(move || -> Result<()> {
for (metadata, bytes, xor_i) in parser_recv {
if done.load(Ordering::Relaxed) {
continue; // drain quietly
}
let (offset, block) = match parse_canonical_block(
bytes, metadata, xor_i, xor_bytes, &canonical,
) {
Ok(Some(item)) => item,
Ok(None) => continue, // orphan / out of range
Err(e) => {
warn!("parse_canonical_block failed: {e}");
continue;
}
};
if !reorder.lock().try_emit(offset, block) {
done.store(true, Ordering::Relaxed);
}
}
Ok(())
})
})
.collect()
}
/// Walk blk files from `first_blk_index`, scan each one, and ship every
/// raw block found to the parser pool. Stops early if `done` flips or
/// the parser channel closes.
fn read_and_dispatch(
paths: &BlkIndexToBlkPath,
first_blk_index: u16,
xor_bytes: XORBytes,
parser_send: &Sender<ScannedItem>,
done: &AtomicBool,
) -> Result<()> {
for (&blk_index, blk_path) in paths.range(first_blk_index..) {
if done.load(Ordering::Relaxed) {
return Ok(());
}
let mut bytes = fs::read(blk_path).map_err(|e| {
error!("Failed to read blk file {}: {e}", blk_path.display());
Error::Internal("Failed to read blk file")
})?;
let result = scan_bytes(
&mut bytes,
blk_index,
0,
xor_bytes,
|metadata, block_bytes, xor_i| {
if done.load(Ordering::Relaxed)
|| parser_send.send((metadata, block_bytes, xor_i)).is_err()
{
ControlFlow::Break(())
} else {
ControlFlow::Continue(())
}
},
);
if result.interrupted {
return Ok(());
}
}
Ok(())
}
// ─────────────────────────────────────────────────────────────────────────────
// Tail pipeline — reverse 5MB chunks of the last blk files until we've
// collected every canonical hash, then emit forward.
// ─────────────────────────────────────────────────────────────────────────────
fn pipeline_tail(
paths: &BlkIndexToBlkPath,
xor_bytes: XORBytes,
canonical: &Arc<CanonicalRange>,
send: &Sender<ReadBlock>,
) -> Result<()> {
// Collected matches, keyed by canonical offset. Tail ranges are ≤10 so
// a Vec<Option<_>> is the simplest representation.
let mut collected: Vec<Option<ReadBlock>> = (0..canonical.len()).map(|_| None).collect();
let mut remaining = canonical.len();
'files: for (&blk_index, path) in paths.iter().rev() {
let file_len = fs::metadata(path).map(|m| m.len() as usize).unwrap_or(0);
if file_len == 0 {
continue;
}
let Ok(mut file) = File::open(path) else {
return Err(Error::Internal("Failed to open blk file"));
};
let mut read_end = file_len;
let mut head: Vec<u8> = Vec::new();
while read_end > 0 && remaining > 0 {
let read_start = read_end.saturating_sub(TAIL_CHUNK);
let chunk_len = read_end - read_start;
read_end = read_start;
if file.seek(SeekFrom::Start(read_start as u64)).is_err() {
return Err(Error::Internal("Failed to seek blk file"));
}
let mut buf = vec![0u8; chunk_len + head.len()];
if file.read_exact(&mut buf[..chunk_len]).is_err() {
return Err(Error::Internal("Failed to read blk chunk"));
}
buf[chunk_len..].copy_from_slice(&head);
head.clear();
let result: ScanResult = scan_bytes(
&mut buf,
blk_index,
read_start,
xor_bytes,
|metadata, block_bytes, xor_i| {
match parse_canonical_block(block_bytes, metadata, xor_i, xor_bytes, canonical)
{
Ok(Some((offset, block))) => {
let slot = &mut collected[offset as usize];
if slot.is_none() {
*slot = Some(block);
remaining -= 1;
}
}
Ok(None) => {} // orphan / out of range
Err(e) => warn!("parse_canonical_block failed in tail pipeline: {e}"),
}
if remaining == 0 {
ControlFlow::Break(())
} else {
ControlFlow::Continue(())
}
},
);
if remaining == 0 {
break 'files;
}
if read_start > 0 {
head = buf[..result.first_magic.unwrap_or(buf.len())].to_vec();
}
}
}
if remaining > 0 {
return Err(Error::Internal(
"after_canonical tail pipeline: blk files missing canonical blocks",
));
}
// `remaining == 0` above guarantees every slot is `Some`; `flatten`
// is just the natural way to write the emit loop.
for block in collected.into_iter().flatten() {
if send.send(block).is_err() {
return Ok(());
}
}
Ok(())
}

View File

@@ -24,11 +24,13 @@ use rayon::prelude::*;
use tracing::{error, warn};
mod blk_index_to_blk_path;
mod canonical;
mod decode;
mod scan;
mod xor_bytes;
mod xor_index;
pub use canonical::CanonicalRange;
use decode::*;
use scan::*;
pub use xor_bytes::*;

View File

@@ -1,13 +1,19 @@
use std::{thread::sleep, time::Duration};
use bitcoincore_rpc::{Client as CoreClient, Error as RpcError, RpcApi, jsonrpc};
use brk_error::Result;
use brk_error::{Error, Result};
use brk_types::Sats;
use parking_lot::RwLock;
use serde_json::value::RawValue;
use tracing::info;
use super::{Auth, BlockHeaderInfo, BlockInfo, BlockchainInfo, RawMempoolEntry, TxOutInfo};
/// Per-batch request count for `get_block_hashes_range`. Sized so the
/// JSON request body stays well under a megabyte and bitcoind doesn't
/// spend too long on a single batch before yielding results.
const BATCH_CHUNK: usize = 2000;
fn to_rpc_auth(auth: &Auth) -> bitcoincore_rpc::Auth {
match auth {
Auth::None => bitcoincore_rpc::Auth::None,
@@ -171,6 +177,66 @@ impl ClientInner {
Ok(self.call_with_retry(|c| c.get_block_hash(height))?)
}
/// Batched canonical height → block hash lookup over the inclusive
/// range `start..=end`. See the corepc backend for the rationale and
/// chunking strategy; this mirror uses bitcoincore-rpc's
/// `get_jsonrpc_client` accessor.
pub fn get_block_hashes_range(
&self,
start: u64,
end: u64,
) -> Result<Vec<bitcoin::BlockHash>> {
if end < start {
return Ok(Vec::new());
}
let total = (end - start + 1) as usize;
let mut hashes = Vec::with_capacity(total);
let mut chunk_start = start;
while chunk_start <= end {
let chunk_end = (chunk_start + BATCH_CHUNK as u64 - 1).min(end);
self.batch_get_block_hashes(chunk_start, chunk_end, &mut hashes)?;
chunk_start = chunk_end + 1;
}
Ok(hashes)
}
fn batch_get_block_hashes(
&self,
start: u64,
end: u64,
out: &mut Vec<bitcoin::BlockHash>,
) -> Result<()> {
let params: Vec<Box<RawValue>> = (start..=end)
.map(|h| {
RawValue::from_string(format!("[{h}]")).map_err(|e| Error::Parse(e.to_string()))
})
.collect::<Result<Vec<_>>>()?;
let client = self.client.read();
let jsonrpc_client = client.get_jsonrpc_client();
let requests: Vec<jsonrpc::Request> = params
.iter()
.map(|p| jsonrpc_client.build_request("getblockhash", Some(p)))
.collect();
let responses = jsonrpc_client
.send_batch(&requests)
.map_err(|e| Error::Parse(format!("getblockhash batch failed: {e}")))?;
for response in responses {
let response = response.ok_or(Error::Internal("Missing response in JSON-RPC batch"))?;
let hex: String = response
.result()
.map_err(|e| Error::Parse(format!("getblockhash batch result: {e}")))?;
out.push(
hex.parse::<bitcoin::BlockHash>()
.map_err(|e| Error::Parse(format!("invalid block hash hex: {e}")))?,
);
}
Ok(())
}
pub fn get_block_header(&self, hash: &bitcoin::BlockHash) -> Result<bitcoin::block::Header> {
Ok(self.call_with_retry(|c| c.get_block_header(hash))?)
}

View File

@@ -1,9 +1,10 @@
use std::{thread::sleep, time::Duration};
use brk_error::Result;
use brk_error::{Error, Result};
use brk_types::Sats;
use corepc_client::client_sync::Auth as CorepcAuth;
use parking_lot::RwLock;
use serde_json::value::RawValue;
use tracing::info;
use super::{Auth, BlockHeaderInfo, BlockInfo, BlockchainInfo, RawMempoolEntry, TxOutInfo};
@@ -11,6 +12,11 @@ use super::{Auth, BlockHeaderInfo, BlockInfo, BlockchainInfo, RawMempoolEntry, T
type CoreClient = corepc_client::client_sync::v30::Client;
type CoreError = corepc_client::client_sync::Error;
/// Per-batch request count for `get_block_hashes_range`. Sized so the
/// JSON request body stays well under a megabyte and bitcoind doesn't
/// spend too long on a single batch before yielding results.
const BATCH_CHUNK: usize = 2000;
#[derive(Debug)]
pub struct ClientInner {
url: String,
@@ -174,6 +180,73 @@ impl ClientInner {
Ok(r.block_hash()?)
}
/// Batched canonical height → block hash lookup over the inclusive
/// range `start..=end`. Internally splits into JSON-RPC batches of
/// `BATCH_CHUNK` requests so a 1M-block reindex doesn't try to push
/// a 50 MB request body or hold every response in memory at once.
/// Each chunk is one HTTP round-trip — still drops the per-call
/// overhead that dominates a sequential `get_block_hash` loop.
///
/// Returns hashes in canonical order (`start`, `start+1`, …, `end`).
pub fn get_block_hashes_range(
&self,
start: u64,
end: u64,
) -> Result<Vec<bitcoin::BlockHash>> {
if end < start {
return Ok(Vec::new());
}
let total = (end - start + 1) as usize;
let mut hashes = Vec::with_capacity(total);
let mut chunk_start = start;
while chunk_start <= end {
let chunk_end = (chunk_start + BATCH_CHUNK as u64 - 1).min(end);
self.batch_get_block_hashes(chunk_start, chunk_end, &mut hashes)?;
chunk_start = chunk_end + 1;
}
Ok(hashes)
}
fn batch_get_block_hashes(
&self,
start: u64,
end: u64,
out: &mut Vec<bitcoin::BlockHash>,
) -> Result<()> {
// Build raw param strings up front so each `Request` can borrow
// them; `corepc_jsonrpc::Client::build_request` takes a borrowed
// `&RawValue`.
let params: Vec<Box<RawValue>> = (start..=end)
.map(|h| {
RawValue::from_string(format!("[{h}]")).map_err(|e| Error::Parse(e.to_string()))
})
.collect::<Result<Vec<_>>>()?;
let client = self.client.read();
let requests: Vec<corepc_jsonrpc::Request> = params
.iter()
.map(|p| client.jsonrpc().build_request("getblockhash", Some(p)))
.collect();
let responses = client
.jsonrpc()
.send_batch(&requests)
.map_err(|e| Error::Parse(format!("getblockhash batch failed: {e}")))?;
for response in responses {
let response = response.ok_or(Error::Internal("Missing response in JSON-RPC batch"))?;
let hex: String = response
.result()
.map_err(|e| Error::Parse(format!("getblockhash batch result: {e}")))?;
out.push(
hex.parse::<bitcoin::BlockHash>()
.map_err(|e| Error::Parse(format!("invalid block hash hex: {e}")))?,
);
}
Ok(())
}
pub fn get_block_header(&self, hash: &bitcoin::BlockHash) -> Result<bitcoin::block::Header> {
let r = self.call_with_retry(|c| c.get_block_header(hash))?;
r.block_header()

View File

@@ -78,6 +78,22 @@ impl Client {
self.0.get_block_hash(height.into()).map(BlockHash::from)
}
/// Get every canonical block hash for the inclusive height range
/// `start..=end` in a single JSON-RPC batch request. Returns hashes
/// in canonical order (`start`, `start+1`, …, `end`). Use this
/// whenever resolving more than ~2 heights — one HTTP round-trip
/// beats N sequential `get_block_hash` calls once the per-call
/// overhead dominates.
pub fn get_block_hashes_range<H1, H2>(&self, start: H1, end: H2) -> Result<Vec<BlockHash>>
where
H1: Into<u64>,
H2: Into<u64>,
{
self.0
.get_block_hashes_range(start.into(), end.into())
.map(|v| v.into_iter().map(BlockHash::from).collect())
}
pub fn get_block_header<'a, H>(&self, hash: &'a H) -> Result<bitcoin::block::Header>
where
&'a H: Into<&'a bitcoin::BlockHash>,

View File

@@ -8,7 +8,7 @@ use serde::{Deserialize, Serialize, Serializer, de};
use vecdb::{Bytes, Formattable};
/// Block hash
#[derive(Default, Debug, Deref, Clone, PartialEq, Eq, Bytes, JsonSchema)]
#[derive(Default, Debug, Deref, Clone, PartialEq, Eq, Hash, Bytes, JsonSchema)]
#[repr(C)]
#[schemars(
transparent,

View File

@@ -3,7 +3,7 @@ use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use vecdb::{Bytes, Formattable};
use crate::{Cents, CentsSats, CentsSquaredSats, EmptyAddrData, Sats, SupplyState};
use crate::{Cents, CentsSats, CentsSquaredSats, EmptyAddrData, OutputType, Sats, SupplyState};
/// Snapshot of cost basis related state.
/// Uses CentsSats (u64) for single-UTXO values, CentsSquaredSats (u128) for investor cap.
@@ -104,6 +104,47 @@ impl FundedAddrData {
self.funded_txo_count == self.spent_txo_count
}
/// Whether this address currently holds at least one UTXO.
#[inline]
pub fn is_funded(&self) -> bool {
!self.has_0_utxos()
}
/// Whether this address has received more than one output over its
/// lifetime — the simplest proxy for address reuse (close to but not
/// exactly "received in 2+ distinct transactions"; over-counts the rare
/// case of multi-output funding to the same address in one tx).
#[inline]
pub fn is_reused(&self) -> bool {
self.funded_txo_count > 1
}
/// Whether this address's public key has been revealed in the chain.
/// For P2PK33/P2PK65/P2TR the pubkey is in the locking script of any
/// funding output; for other types it's only revealed when spending.
#[inline]
pub fn is_pubkey_exposed(&self, output_type: OutputType) -> bool {
output_type.pubkey_exposed_at_funding() || self.spent_txo_count > 0
}
/// Whether this address currently holds funds AND its pubkey is exposed.
/// True iff the address contributes to the "funds at quantum risk" set.
#[inline]
pub fn is_funded_with_exposed_pubkey(&self, output_type: OutputType) -> bool {
self.is_funded() && self.is_pubkey_exposed(output_type)
}
/// This address's contribution (in sats) to the "funds at quantum risk"
/// supply: its balance if currently in the funded-exposed set, else 0.
#[inline]
pub fn exposed_supply_contribution(&self, output_type: OutputType) -> u64 {
if self.is_funded_with_exposed_pubkey(output_type) {
u64::from(self.balance())
} else {
0
}
}
pub fn receive(&mut self, amount: Sats, price: Cents) {
self.receive_outputs(amount, price, 1);
}

View File

@@ -112,6 +112,15 @@ impl OutputType {
!self.is_spendable()
}
/// Whether the address type's public key is revealed at funding time
/// (vs. only at spending time). For P2PK33/P2PK65 the pubkey is directly
/// in the locking script; for P2TR the tweaked output key is in the
/// locking script. All other address types hash the pubkey/script and
/// only reveal it on spending.
pub fn pubkey_exposed_at_funding(&self) -> bool {
matches!(self, Self::P2PK65 | Self::P2PK33 | Self::P2TR)
}
pub fn as_vec() -> Vec<Self> {
vec![
Self::P2PK65,

View File

@@ -1,3 +1,5 @@
use crate::VSize;
/// Standard percentile values used throughout BRK.
pub const PERCENTILES: [u8; 19] = [
5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95,
@@ -16,3 +18,28 @@ pub fn get_percentile<T: Clone>(sorted: &[T], percentile: f64) -> T {
let index = ((len - 1) as f64 * percentile).round() as usize;
sorted[index].clone()
}
/// Get a percentile value from a sorted (value, vsize) slice using
/// vsize-weighted interpolation — matches mempool.space's feeRange calculation.
///
/// Walks through the sorted pairs accumulating vsize. When cumulative vsize
/// crosses `total_vsize * percentile`, returns that value.
///
/// # Panics
/// Panics if the slice is empty.
pub fn get_weighted_percentile<T: Clone>(sorted_with_vsizes: &[(T, VSize)], percentile: f64) -> T {
assert!(
!sorted_with_vsizes.is_empty(),
"Cannot get percentile from empty slice"
);
let total: u64 = sorted_with_vsizes.iter().map(|(_, v)| u64::from(*v)).sum();
let target = (total as f64 * percentile).round() as u64;
let mut cumulative = 0u64;
for (value, vsize) in sorted_with_vsizes {
cumulative += u64::from(*vsize);
if cumulative >= target {
return value.clone();
}
}
sorted_with_vsizes.last().unwrap().0.clone()
}

View File

@@ -93,11 +93,7 @@ impl Serialize for TxOut {
S: Serializer,
{
let output_type = self.type_();
// P2PK has no standard address format — don't include scriptpubkey_address
let addr = match output_type {
OutputType::P2PK65 | OutputType::P2PK33 => None,
_ => self.addr(),
};
let addr = self.addr();
let field_count = if addr.is_some() { 5 } else { 4 };
let mut state = serializer.serialize_struct("TxOut", field_count)?;
state.serialize_field("scriptpubkey", &self.script_pubkey.to_hex_string())?;

View File

@@ -2232,6 +2232,30 @@ function createGrossInvestedInvestorLossNetNuplProfitSentimentPattern2(client, a
* @property {SeriesPattern1<Dollars>} usd
*/
/**
* @typedef {Object} P2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern2
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2a
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2pk33
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2pk65
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2pkh
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2sh
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2tr
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2wpkh
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2wsh
*/
/**
* @typedef {Object} P2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3
* @property {_1m1w1y24hCumulativePattern} p2a
* @property {_1m1w1y24hCumulativePattern} p2pk33
* @property {_1m1w1y24hCumulativePattern} p2pk65
* @property {_1m1w1y24hCumulativePattern} p2pkh
* @property {_1m1w1y24hCumulativePattern} p2sh
* @property {_1m1w1y24hCumulativePattern} p2tr
* @property {_1m1w1y24hCumulativePattern} p2wpkh
* @property {_1m1w1y24hCumulativePattern} p2wsh
*/
/**
* @typedef {Object} Pct0Pct1Pct2Pct5Pct95Pct98Pct99Pattern
* @property {BpsPriceRatioPattern} pct05
@@ -2655,6 +2679,31 @@ function createDeltaHalfInToTotalPattern2(client, acc) {
};
}
/**
* @typedef {Object} _1m1w1y24hCumulativePattern
* @property {BpsPercentRatioPattern3} _1m
* @property {BpsPercentRatioPattern3} _1w
* @property {BpsPercentRatioPattern3} _1y
* @property {BpsPercentRatioPattern3} _24h
* @property {BpsPercentRatioPattern3} cumulative
*/
/**
* Create a _1m1w1y24hCumulativePattern pattern node
* @param {BrkClientBase} client
* @param {string} acc - Accumulated series name
* @returns {_1m1w1y24hCumulativePattern}
*/
function create_1m1w1y24hCumulativePattern(client, acc) {
return {
_1m: createBpsPercentRatioPattern3(client, _m(acc, 'sum_1m')),
_1w: createBpsPercentRatioPattern3(client, _m(acc, 'sum_1w')),
_1y: createBpsPercentRatioPattern3(client, _m(acc, 'sum_1y')),
_24h: createBpsPercentRatioPattern3(client, _m(acc, 'sum_24h')),
cumulative: createBpsPercentRatioPattern3(client, _m(acc, 'cumulative')),
};
}
/**
* @typedef {Object} _1m1w1y24hBlockPattern
* @property {SeriesPattern1<StoredF32>} _1m
@@ -4101,6 +4150,12 @@ function createBpsRatioPattern(client, acc) {
};
}
/**
* @typedef {Object} ByPercentPattern
* @property {P2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern2} byType
* @property {P2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3} percent
*/
/**
* @typedef {Object} CentsUsdPattern3
* @property {SeriesPattern1<Cents>} cents
@@ -4541,6 +4596,8 @@ function createTransferPattern(client, acc) {
* @property {SeriesTree_Transactions_Fees} fees
* @property {SeriesTree_Transactions_Versions} versions
* @property {SeriesTree_Transactions_Volume} volume
* @property {SeriesTree_Transactions_InputTypes} inputTypes
* @property {SeriesTree_Transactions_OutputTypes} outputTypes
*/
/**
@@ -4579,7 +4636,7 @@ function createTransferPattern(client, acc) {
* @property {SeriesPattern19<Sats>} inputValue
* @property {SeriesPattern19<Sats>} outputValue
* @property {_6bBlockTxPattern<Sats>} fee
* @property {_6bBlockTxPattern<FeeRate>} feeRate
* @property {SeriesPattern19<FeeRate>} feeRate
* @property {_6bBlockTxPattern<FeeRate>} effectiveFeeRate
*/
@@ -4598,6 +4655,66 @@ function createTransferPattern(client, acc) {
* @property {_1m1w1y24hPattern<StoredF32>} inputsPerSec
*/
/**
* @typedef {Object} SeriesTree_Transactions_InputTypes
* @property {SeriesTree_Transactions_InputTypes_ByType} byType
* @property {SeriesTree_Transactions_InputTypes_Percent} percent
*/
/**
* @typedef {Object} SeriesTree_Transactions_InputTypes_ByType
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2pk65
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2pk33
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2pkh
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2sh
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2wpkh
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2wsh
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2tr
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2a
*/
/**
* @typedef {Object} SeriesTree_Transactions_InputTypes_Percent
* @property {_1m1w1y24hCumulativePattern} p2pk65
* @property {_1m1w1y24hCumulativePattern} p2pk33
* @property {_1m1w1y24hCumulativePattern} p2pkh
* @property {_1m1w1y24hCumulativePattern} p2sh
* @property {_1m1w1y24hCumulativePattern} p2wpkh
* @property {_1m1w1y24hCumulativePattern} p2wsh
* @property {_1m1w1y24hCumulativePattern} p2tr
* @property {_1m1w1y24hCumulativePattern} p2a
*/
/**
* @typedef {Object} SeriesTree_Transactions_OutputTypes
* @property {SeriesTree_Transactions_OutputTypes_ByType} byType
* @property {SeriesTree_Transactions_OutputTypes_Percent} percent
*/
/**
* @typedef {Object} SeriesTree_Transactions_OutputTypes_ByType
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2pk65
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2pk33
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2pkh
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2sh
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2wpkh
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2wsh
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2tr
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2a
*/
/**
* @typedef {Object} SeriesTree_Transactions_OutputTypes_Percent
* @property {_1m1w1y24hCumulativePattern} p2pk65
* @property {_1m1w1y24hCumulativePattern} p2pk33
* @property {_1m1w1y24hCumulativePattern} p2pkh
* @property {_1m1w1y24hCumulativePattern} p2sh
* @property {_1m1w1y24hCumulativePattern} p2wpkh
* @property {_1m1w1y24hCumulativePattern} p2wsh
* @property {_1m1w1y24hCumulativePattern} p2tr
* @property {_1m1w1y24hCumulativePattern} p2a
*/
/**
* @typedef {Object} SeriesTree_Inputs
* @property {SeriesTree_Inputs_Raw} raw
@@ -4657,6 +4774,7 @@ function createTransferPattern(client, acc) {
* @property {SeriesTree_Addrs_Activity} activity
* @property {AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3} total
* @property {SeriesTree_Addrs_New} new
* @property {SeriesTree_Addrs_Reused} reused
* @property {SeriesTree_Addrs_Delta} delta
*/
@@ -4766,6 +4884,12 @@ function createTransferPattern(client, acc) {
* @property {AverageBlockCumulativeSumPattern<StoredU64>} p2a
*/
/**
* @typedef {Object} SeriesTree_Addrs_Reused
* @property {AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3} funded
* @property {AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3} total
*/
/**
* @typedef {Object} SeriesTree_Addrs_Delta
* @property {AbsoluteRatePattern} all
@@ -7933,7 +8057,7 @@ class BrkClient extends BrkClientBase {
inputValue: createSeriesPattern19(this, 'input_value'),
outputValue: createSeriesPattern19(this, 'output_value'),
fee: create_6bBlockTxPattern(this, 'fee'),
feeRate: create_6bBlockTxPattern(this, 'fee_rate'),
feeRate: createSeriesPattern19(this, 'fee_rate'),
effectiveFeeRate: create_6bBlockTxPattern(this, 'effective_fee_rate'),
},
versions: {
@@ -7947,6 +8071,50 @@ class BrkClient extends BrkClientBase {
outputsPerSec: create_1m1w1y24hPattern(this, 'outputs_per_sec'),
inputsPerSec: create_1m1w1y24hPattern(this, 'inputs_per_sec'),
},
inputTypes: {
byType: {
p2pk65: createAverageBlockCumulativeSumPattern(this, 'tx_count_with_p2pk65_in'),
p2pk33: createAverageBlockCumulativeSumPattern(this, 'tx_count_with_p2pk33_in'),
p2pkh: createAverageBlockCumulativeSumPattern(this, 'tx_count_with_p2pkh_in'),
p2sh: createAverageBlockCumulativeSumPattern(this, 'tx_count_with_p2sh_in'),
p2wpkh: createAverageBlockCumulativeSumPattern(this, 'tx_count_with_p2wpkh_in'),
p2wsh: createAverageBlockCumulativeSumPattern(this, 'tx_count_with_p2wsh_in'),
p2tr: createAverageBlockCumulativeSumPattern(this, 'tx_count_with_p2tr_in'),
p2a: createAverageBlockCumulativeSumPattern(this, 'tx_count_with_p2a_in'),
},
percent: {
p2pk65: create_1m1w1y24hCumulativePattern(this, 'tx_count_with_p2pk65_in_rel_to_all'),
p2pk33: create_1m1w1y24hCumulativePattern(this, 'tx_count_with_p2pk33_in_rel_to_all'),
p2pkh: create_1m1w1y24hCumulativePattern(this, 'tx_count_with_p2pkh_in_rel_to_all'),
p2sh: create_1m1w1y24hCumulativePattern(this, 'tx_count_with_p2sh_in_rel_to_all'),
p2wpkh: create_1m1w1y24hCumulativePattern(this, 'tx_count_with_p2wpkh_in_rel_to_all'),
p2wsh: create_1m1w1y24hCumulativePattern(this, 'tx_count_with_p2wsh_in_rel_to_all'),
p2tr: create_1m1w1y24hCumulativePattern(this, 'tx_count_with_p2tr_in_rel_to_all'),
p2a: create_1m1w1y24hCumulativePattern(this, 'tx_count_with_p2a_in_rel_to_all'),
},
},
outputTypes: {
byType: {
p2pk65: createAverageBlockCumulativeSumPattern(this, 'tx_count_with_p2pk65_out'),
p2pk33: createAverageBlockCumulativeSumPattern(this, 'tx_count_with_p2pk33_out'),
p2pkh: createAverageBlockCumulativeSumPattern(this, 'tx_count_with_p2pkh_out'),
p2sh: createAverageBlockCumulativeSumPattern(this, 'tx_count_with_p2sh_out'),
p2wpkh: createAverageBlockCumulativeSumPattern(this, 'tx_count_with_p2wpkh_out'),
p2wsh: createAverageBlockCumulativeSumPattern(this, 'tx_count_with_p2wsh_out'),
p2tr: createAverageBlockCumulativeSumPattern(this, 'tx_count_with_p2tr_out'),
p2a: createAverageBlockCumulativeSumPattern(this, 'tx_count_with_p2a_out'),
},
percent: {
p2pk65: create_1m1w1y24hCumulativePattern(this, 'tx_count_with_p2pk65_out_rel_to_all'),
p2pk33: create_1m1w1y24hCumulativePattern(this, 'tx_count_with_p2pk33_out_rel_to_all'),
p2pkh: create_1m1w1y24hCumulativePattern(this, 'tx_count_with_p2pkh_out_rel_to_all'),
p2sh: create_1m1w1y24hCumulativePattern(this, 'tx_count_with_p2sh_out_rel_to_all'),
p2wpkh: create_1m1w1y24hCumulativePattern(this, 'tx_count_with_p2wpkh_out_rel_to_all'),
p2wsh: create_1m1w1y24hCumulativePattern(this, 'tx_count_with_p2wsh_out_rel_to_all'),
p2tr: create_1m1w1y24hCumulativePattern(this, 'tx_count_with_p2tr_out_rel_to_all'),
p2a: create_1m1w1y24hCumulativePattern(this, 'tx_count_with_p2a_out_rel_to_all'),
},
},
},
inputs: {
raw: {
@@ -8054,6 +8222,10 @@ class BrkClient extends BrkClientBase {
p2tr: createAverageBlockCumulativeSumPattern(this, 'p2tr_new_addr_count'),
p2a: createAverageBlockCumulativeSumPattern(this, 'p2a_new_addr_count'),
},
reused: {
funded: createAllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3(this, 'reused_addr_count'),
total: createAllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3(this, 'total_reused_addr_count'),
},
delta: {
all: createAbsoluteRatePattern(this, 'addr_count'),
p2pk65: createAbsoluteRatePattern(this, 'p2pk65_addr_count'),

View File

@@ -2713,6 +2713,14 @@ class BpsCentsPercentilesRatioSatsSmaStdUsdPattern:
"""Pattern struct for repeated tree structure."""
pass
class P2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern2:
"""Pattern struct for repeated tree structure."""
pass
class P2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3:
"""Pattern struct for repeated tree structure."""
pass
class Pct0Pct1Pct2Pct5Pct95Pct98Pct99Pattern:
"""Pattern struct for repeated tree structure."""
@@ -2901,6 +2909,17 @@ class DeltaHalfInToTotalPattern2:
self.to_circulating: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, 'to_circulating'))
self.total: BtcCentsSatsUsdPattern3 = BtcCentsSatsUsdPattern3(client, acc)
class _1m1w1y24hCumulativePattern:
"""Pattern struct for repeated tree structure."""
def __init__(self, client: BrkClientBase, acc: str):
"""Create pattern node with accumulated series name."""
self._1m: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, 'sum_1m'))
self._1w: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, 'sum_1w'))
self._1y: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, 'sum_1y'))
self._24h: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, 'sum_24h'))
self.cumulative: BpsPercentRatioPattern3 = BpsPercentRatioPattern3(client, _m(acc, 'cumulative'))
class _1m1w1y24hBlockPattern:
"""Pattern struct for repeated tree structure."""
@@ -3523,6 +3542,10 @@ class BpsRatioPattern:
self.bps: SeriesPattern1[BasisPointsSigned32] = SeriesPattern1(client, _m(acc, 'bps'))
self.ratio: SeriesPattern1[StoredF32] = SeriesPattern1(client, acc)
class ByPercentPattern:
"""Pattern struct for repeated tree structure."""
pass
class CentsUsdPattern3:
"""Pattern struct for repeated tree structure."""
@@ -3821,7 +3844,7 @@ class SeriesTree_Transactions_Fees:
self.input_value: SeriesPattern19[Sats] = SeriesPattern19(client, 'input_value')
self.output_value: SeriesPattern19[Sats] = SeriesPattern19(client, 'output_value')
self.fee: _6bBlockTxPattern[Sats] = _6bBlockTxPattern(client, 'fee')
self.fee_rate: _6bBlockTxPattern[FeeRate] = _6bBlockTxPattern(client, 'fee_rate')
self.fee_rate: SeriesPattern19[FeeRate] = SeriesPattern19(client, 'fee_rate')
self.effective_fee_rate: _6bBlockTxPattern[FeeRate] = _6bBlockTxPattern(client, 'effective_fee_rate')
class SeriesTree_Transactions_Versions:
@@ -3841,6 +3864,72 @@ class SeriesTree_Transactions_Volume:
self.outputs_per_sec: _1m1w1y24hPattern[StoredF32] = _1m1w1y24hPattern(client, 'outputs_per_sec')
self.inputs_per_sec: _1m1w1y24hPattern[StoredF32] = _1m1w1y24hPattern(client, 'inputs_per_sec')
class SeriesTree_Transactions_InputTypes_ByType:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.p2pk65: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2pk65_in')
self.p2pk33: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2pk33_in')
self.p2pkh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2pkh_in')
self.p2sh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2sh_in')
self.p2wpkh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2wpkh_in')
self.p2wsh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2wsh_in')
self.p2tr: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2tr_in')
self.p2a: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2a_in')
class SeriesTree_Transactions_InputTypes_Percent:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.p2pk65: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2pk65_in_rel_to_all')
self.p2pk33: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2pk33_in_rel_to_all')
self.p2pkh: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2pkh_in_rel_to_all')
self.p2sh: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2sh_in_rel_to_all')
self.p2wpkh: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2wpkh_in_rel_to_all')
self.p2wsh: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2wsh_in_rel_to_all')
self.p2tr: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2tr_in_rel_to_all')
self.p2a: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2a_in_rel_to_all')
class SeriesTree_Transactions_InputTypes:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.by_type: SeriesTree_Transactions_InputTypes_ByType = SeriesTree_Transactions_InputTypes_ByType(client)
self.percent: SeriesTree_Transactions_InputTypes_Percent = SeriesTree_Transactions_InputTypes_Percent(client)
class SeriesTree_Transactions_OutputTypes_ByType:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.p2pk65: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2pk65_out')
self.p2pk33: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2pk33_out')
self.p2pkh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2pkh_out')
self.p2sh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2sh_out')
self.p2wpkh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2wpkh_out')
self.p2wsh: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2wsh_out')
self.p2tr: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2tr_out')
self.p2a: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'tx_count_with_p2a_out')
class SeriesTree_Transactions_OutputTypes_Percent:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.p2pk65: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2pk65_out_rel_to_all')
self.p2pk33: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2pk33_out_rel_to_all')
self.p2pkh: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2pkh_out_rel_to_all')
self.p2sh: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2sh_out_rel_to_all')
self.p2wpkh: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2wpkh_out_rel_to_all')
self.p2wsh: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2wsh_out_rel_to_all')
self.p2tr: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2tr_out_rel_to_all')
self.p2a: _1m1w1y24hCumulativePattern = _1m1w1y24hCumulativePattern(client, 'tx_count_with_p2a_out_rel_to_all')
class SeriesTree_Transactions_OutputTypes:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.by_type: SeriesTree_Transactions_OutputTypes_ByType = SeriesTree_Transactions_OutputTypes_ByType(client)
self.percent: SeriesTree_Transactions_OutputTypes_Percent = SeriesTree_Transactions_OutputTypes_Percent(client)
class SeriesTree_Transactions:
"""Series tree node."""
@@ -3851,6 +3940,8 @@ class SeriesTree_Transactions:
self.fees: SeriesTree_Transactions_Fees = SeriesTree_Transactions_Fees(client)
self.versions: SeriesTree_Transactions_Versions = SeriesTree_Transactions_Versions(client)
self.volume: SeriesTree_Transactions_Volume = SeriesTree_Transactions_Volume(client)
self.input_types: SeriesTree_Transactions_InputTypes = SeriesTree_Transactions_InputTypes(client)
self.output_types: SeriesTree_Transactions_OutputTypes = SeriesTree_Transactions_OutputTypes(client)
class SeriesTree_Inputs_Raw:
"""Series tree node."""
@@ -4027,6 +4118,13 @@ class SeriesTree_Addrs_New:
self.p2tr: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2tr_new_addr_count')
self.p2a: AverageBlockCumulativeSumPattern[StoredU64] = AverageBlockCumulativeSumPattern(client, 'p2a_new_addr_count')
class SeriesTree_Addrs_Reused:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
self.funded: AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3 = AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3(client, 'reused_addr_count')
self.total: AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3 = AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3(client, 'total_reused_addr_count')
class SeriesTree_Addrs_Delta:
"""Series tree node."""
@@ -4053,6 +4151,7 @@ class SeriesTree_Addrs:
self.activity: SeriesTree_Addrs_Activity = SeriesTree_Addrs_Activity(client)
self.total: AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3 = AllP2aP2pk33P2pk65P2pkhP2shP2trP2wpkhP2wshPattern3(client, 'total_addr_count')
self.new: SeriesTree_Addrs_New = SeriesTree_Addrs_New(client)
self.reused: SeriesTree_Addrs_Reused = SeriesTree_Addrs_Reused(client)
self.delta: SeriesTree_Addrs_Delta = SeriesTree_Addrs_Delta(client)
class SeriesTree_Scripts_Raw_Empty:

View File

@@ -5,3 +5,7 @@ To check types run:
```sh
npx --package typescript tsc --noEmit --pretty false | grep -v "modules/"
```
# Code
Codex will review your output once you are done.

View File

@@ -102,6 +102,9 @@
* BPS + percent + ratio pattern
* @typedef {Brk.BpsPercentRatioPattern3} PercentRatioPattern
*
* Percent + ratio per window + cumulative (mirrors CountPattern but for percent)
* @typedef {Brk._1m1w1y24hCumulativePattern} PercentRatioCumulativePattern
*
* BPS + ratio pattern (for NUPL and similar)
* @typedef {Brk.BpsRatioPattern} NuplPattern
*

View File

@@ -171,6 +171,7 @@ export async function goToCube(hashOrHeight, { silent } = {}) {
selectCube(cube, { scroll: "smooth", silent });
return;
}
for (const cube of blocksEl.children) cube.classList.add("skeleton");
let startHash;
try {
const height = await resolveHeight(hashOrHeight);

View File

@@ -11,6 +11,7 @@ import {
chartsFromFullPerBlock,
chartsFromCount,
chartsFromCountEntries,
chartsFromPercentCumulative,
chartsFromAggregatedPerBlock,
averagesArray,
simpleDeltaTree,
@@ -109,6 +110,18 @@ export function createNetworkSection() {
/** @param {AddressableType} t */
getSeries: (t) => addrs.total[t],
},
{
name: "Funded Reused",
title: "Funded Reused Address Count by Type",
/** @param {AddressableType} t */
getSeries: (t) => addrs.reused.funded[t],
},
{
name: "Total Reused",
title: "Total Reused Address Count by Type",
/** @param {AddressableType} t */
getSeries: (t) => addrs.reused.total[t],
},
]);
const countMetrics = /** @type {const} */ ([
@@ -152,6 +165,51 @@ export function createNetworkSection() {
})),
],
},
{
name: "Reused",
tree: [
{
name: "Compare",
title: title("Reused Address Count"),
bottom: [
line({
series: addrs.reused.funded[key],
name: "Funded",
unit: Unit.count,
}),
line({
series: addrs.reused.total[key],
name: "Total",
color: colors.gray,
unit: Unit.count,
}),
],
},
{
name: "Funded",
title: title("Funded Reused Addresses"),
bottom: [
line({
series: addrs.reused.funded[key],
name: "Funded Reused",
unit: Unit.count,
}),
],
},
{
name: "Total",
title: title("Total Reused Addresses"),
bottom: [
line({
series: addrs.reused.total[key],
name: "Total Reused",
color: colors.gray,
unit: Unit.count,
}),
],
},
],
},
...simpleDeltaTree({
delta: addrs.delta[key],
title,
@@ -213,6 +271,103 @@ export function createNetworkSection() {
},
];
/**
* @param {string} direction
* @param {{ byType: Record<AddressableType, CountPattern<number>>, percent: Record<AddressableType, PercentRatioCumulativePattern> }} source
*/
const createTxTypeGroup = (direction, source) => {
const lowerDir = direction.toLowerCase();
return {
name: `By ${direction} Type`,
tree: [
{
name: "Count Compare",
tree: [
...ROLLING_WINDOWS.map((w) => ({
name: w.name,
title: `${w.title} Transactions by ${direction} Type`,
bottom: addressTypes.map((t) =>
line({
series: source.byType[t.key].sum[w.key],
name: t.name,
color: t.color,
unit: Unit.count,
defaultActive: t.defaultActive,
}),
),
})),
{
name: "Cumulative",
title: `Cumulative Transactions by ${direction} Type`,
bottom: addressTypes.map((t) =>
line({
series: source.byType[t.key].cumulative,
name: t.name,
color: t.color,
unit: Unit.count,
defaultActive: t.defaultActive,
}),
),
},
],
},
{
name: "% Compare",
tree: [
...ROLLING_WINDOWS.map((w) => ({
name: w.name,
title: `${w.title} Share of Transactions by ${direction} Type`,
bottom: addressTypes.map((t) =>
line({
series: source.percent[t.key][w.key].percent,
name: t.name,
color: t.color,
unit: Unit.percentage,
defaultActive: t.defaultActive,
}),
),
})),
{
name: "Cumulative",
title: `Cumulative Share of Transactions by ${direction} Type`,
bottom: addressTypes.map((t) =>
line({
series: source.percent[t.key].cumulative.percent,
name: t.name,
color: t.color,
unit: Unit.percentage,
defaultActive: t.defaultActive,
}),
),
},
],
},
...addressTypes.map((t) => ({
name: t.name,
tree: [
{
name: "Count",
tree: chartsFromCount({
pattern: source.byType[t.key],
metric: `Transactions with ${t.name} ${lowerDir}`,
unit: Unit.count,
color: t.color,
}),
},
{
name: "% of All",
tree: chartsFromPercentCumulative({
pattern: source.percent[t.key],
metric: `Share of Transactions with ${t.name} ${lowerDir}`,
color: t.color,
}),
},
],
})),
],
};
};
/**
* @template {keyof typeof scripts.count} K
* @param {string} groupName
@@ -335,10 +490,10 @@ export function createNetworkSection() {
}),
},
{
name: "Fee Rate",
name: "Effective Fee Rate",
tree: chartsFromBlockAnd6b({
pattern: transactions.fees.feeRate,
metric: "Transaction Fee Rate",
pattern: transactions.fees.effectiveFeeRate,
metric: "Effective Transaction Fee Rate",
unit: Unit.feeRate,
}),
},
@@ -374,6 +529,8 @@ export function createNetworkSection() {
unit: Unit.count,
}),
},
createTxTypeGroup("Input", transactions.inputTypes),
createTxTypeGroup("Output", transactions.outputTypes),
{
name: "Velocity",
title: "Transaction Velocity",

View File

@@ -1099,6 +1099,60 @@ export function chartsFromCount({ pattern, title = (s) => s, metric, unit, color
});
}
/**
* Percent + ratio per rolling window + cumulative — mirrors chartsFromCount for percent data.
* @param {Object} args
* @param {PercentRatioCumulativePattern} args.pattern
* @param {(metric: string) => string} [args.title]
* @param {string} args.metric
* @param {Color} [args.color]
* @returns {PartialOptionsTree}
*/
export function chartsFromPercentCumulative({
pattern,
title = (s) => s,
metric,
color,
}) {
return [
{
name: "Compare",
title: title(metric),
bottom: ROLLING_WINDOWS.flatMap((w) =>
percentRatio({
pattern: pattern[w.key],
name: w.name,
color: w.color,
}),
).concat(
percentRatio({
pattern: pattern.cumulative,
name: "All Time",
color: colors.time.all,
}),
),
},
...ROLLING_WINDOWS.map((w) => ({
name: w.name,
title: title(`${w.title} ${metric}`),
bottom: percentRatio({
pattern: pattern[w.key],
name: w.name,
color: color ?? w.color,
}),
})),
{
name: "Cumulative",
title: title(`Cumulative ${metric}`),
bottom: percentRatio({
pattern: pattern.cumulative,
name: "All Time",
color: color ?? colors.time.all,
}),
},
];
}
/**
* Windowed sums + cumulative for multiple named entries (e.g. transaction versions)
* @param {Object} args

View File

@@ -65,5 +65,6 @@ export function stringToId(s) {
return s
.trim()
.replace(/[ /]+/g, "-")
.toLowerCase();
.toLowerCase()
.replace(/%/g, "%25");
}

View File

@@ -149,6 +149,11 @@
scaleY(0.864);
}
&.skeleton {
pointer-events: none;
.face { color: transparent; }
}
.fees {
display: flex;
flex-direction: column;