website: snapshot

This commit is contained in:
nym21
2026-02-04 17:26:35 +01:00
parent 0d5d7da70f
commit 0437ce1bb4
33 changed files with 5752 additions and 995 deletions

View File

@@ -8,7 +8,7 @@ mod total_addr_count;
mod type_map;
pub use activity::{AddressActivityVecs, AddressTypeToActivityCounts};
pub use address_count::{AddrCountVecs, AddrCountsVecs, AddressTypeToAddressCount};
pub use address_count::{AddrCountsVecs, AddressTypeToAddressCount};
pub use data::AddressesDataVecs;
pub use growth_rate::GrowthRateVecs;
pub use indexes::AnyAddressIndexesVecs;

View File

@@ -56,52 +56,44 @@ impl AddressCohorts {
}))
}
/// Apply a function to each aggregate cohort with its source cohorts.
fn for_each_aggregate<F>(&mut self, mut f: F) -> Result<()>
where
F: FnMut(&mut AddressCohortVecs, Vec<&AddressCohortVecs>) -> Result<()>,
{
let by_amount_range = &self.0.amount_range;
let pairs: Vec<_> = self
.0
.ge_amount
.iter_mut()
.chain(self.0.lt_amount.iter_mut())
.map(|vecs| {
let filter = vecs.filter().clone();
(
vecs,
by_amount_range
.iter()
.filter(|other| filter.includes(other.filter()))
.collect(),
)
})
.collect();
for (vecs, sources) in pairs {
f(vecs, sources)?;
}
Ok(())
}
/// Compute overlapping cohorts from component amount_range cohorts.
///
/// For example, ">=1 BTC" cohort is computed from sum of amount_range cohorts that match.
pub fn compute_overlapping_vecs(
&mut self,
starting_indexes: &ComputeIndexes,
exit: &Exit,
) -> Result<()> {
let by_amount_range = &self.0.amount_range;
// ge_amount cohorts computed from matching amount_range cohorts
[
self.0
.ge_amount
.par_iter_mut()
.map(|vecs| {
let filter = vecs.filter().clone();
(
vecs,
by_amount_range
.iter()
.filter(|other| filter.includes(other.filter()))
.collect::<Vec<_>>(),
)
})
.collect::<Vec<_>>(),
// lt_amount cohorts computed from matching amount_range cohorts
self.0
.lt_amount
.par_iter_mut()
.map(|vecs| {
let filter = vecs.filter().clone();
(
vecs,
by_amount_range
.iter()
.filter(|other| filter.includes(other.filter()))
.collect::<Vec<_>>(),
)
})
.collect::<Vec<_>>(),
]
.into_iter()
.flatten()
.try_for_each(|(vecs, components)| {
vecs.compute_from_stateful(starting_indexes, &components, exit)
self.for_each_aggregate(|vecs, sources| {
vecs.compute_from_stateful(starting_indexes, &sources, exit)
})
}
@@ -117,6 +109,20 @@ impl AddressCohorts {
.try_for_each(|v| v.compute_rest_part1(indexes, price, starting_indexes, exit))
}
/// Recompute net_sentiment for aggregate cohorts as weighted average of source cohorts.
pub fn compute_aggregate_net_sentiment(
&mut self,
indexes: &indexes::Vecs,
starting_indexes: &ComputeIndexes,
exit: &Exit,
) -> Result<()> {
self.for_each_aggregate(|vecs, sources| {
let metrics: Vec<_> = sources.iter().map(|v| &v.metrics).collect();
vecs.metrics
.compute_net_sentiment_from_others(starting_indexes, &metrics, indexes, exit)
})
}
/// Second phase of post-processing: compute relative metrics.
#[allow(clippy::too_many_arguments)]
pub fn compute_rest_part2<HM, DM>(

View File

@@ -152,70 +152,84 @@ impl UTXOCohorts {
}))
}
/// Apply a function to each aggregate cohort with its source cohorts.
fn for_each_aggregate<F>(&mut self, mut f: F) -> Result<()>
where
F: FnMut(&mut UTXOCohortVecs, Vec<&UTXOCohortVecs>) -> Result<()>,
{
let by_age_range = &self.0.age_range;
let by_amount_range = &self.0.amount_range;
// Build (aggregate, sources) pairs
let pairs: Vec<_> = [(&mut self.0.all, by_age_range.iter().collect::<Vec<_>>())]
.into_iter()
.chain(self.0.min_age.iter_mut().map(|vecs| {
let filter = vecs.filter().clone();
(
vecs,
by_age_range
.iter()
.filter(|other| filter.includes(other.filter()))
.collect(),
)
}))
.chain(self.0.max_age.iter_mut().map(|vecs| {
let filter = vecs.filter().clone();
(
vecs,
by_age_range
.iter()
.filter(|other| filter.includes(other.filter()))
.collect(),
)
}))
.chain(self.0.term.iter_mut().map(|vecs| {
let filter = vecs.filter().clone();
(
vecs,
by_age_range
.iter()
.filter(|other| filter.includes(other.filter()))
.collect(),
)
}))
.chain(self.0.ge_amount.iter_mut().map(|vecs| {
let filter = vecs.filter().clone();
(
vecs,
by_amount_range
.iter()
.filter(|other| filter.includes(other.filter()))
.collect(),
)
}))
.chain(self.0.lt_amount.iter_mut().map(|vecs| {
let filter = vecs.filter().clone();
(
vecs,
by_amount_range
.iter()
.filter(|other| filter.includes(other.filter()))
.collect(),
)
}))
.collect();
for (vecs, sources) in pairs {
f(vecs, sources)?;
}
Ok(())
}
/// Compute overlapping cohorts from component age/amount range cohorts.
pub fn compute_overlapping_vecs(
&mut self,
starting_indexes: &ComputeIndexes,
exit: &Exit,
) -> Result<()> {
let by_age_range = &self.0.age_range;
let by_amount_range = &self.0.amount_range;
[(&mut self.0.all, by_age_range.iter().collect::<Vec<_>>())]
.into_par_iter()
.chain(self.0.min_age.par_iter_mut().map(|vecs| {
let filter = vecs.filter().clone();
(
vecs,
by_age_range
.iter()
.filter(|other| filter.includes(other.filter()))
.collect::<Vec<_>>(),
)
}))
.chain(self.0.max_age.par_iter_mut().map(|vecs| {
let filter = vecs.filter().clone();
(
vecs,
by_age_range
.iter()
.filter(|other| filter.includes(other.filter()))
.collect::<Vec<_>>(),
)
}))
.chain(self.0.term.par_iter_mut().map(|vecs| {
let filter = vecs.filter().clone();
(
vecs,
by_age_range
.iter()
.filter(|other| filter.includes(other.filter()))
.collect::<Vec<_>>(),
)
}))
.chain(self.0.ge_amount.par_iter_mut().map(|vecs| {
let filter = vecs.filter().clone();
(
vecs,
by_amount_range
.iter()
.filter(|other| filter.includes(other.filter()))
.collect::<Vec<_>>(),
)
}))
.chain(self.0.lt_amount.par_iter_mut().map(|vecs| {
let filter = vecs.filter().clone();
(
vecs,
by_amount_range
.iter()
.filter(|other| filter.includes(other.filter()))
.collect::<Vec<_>>(),
)
}))
.try_for_each(|(vecs, components)| {
vecs.compute_from_stateful(starting_indexes, &components, exit)
})
self.for_each_aggregate(|vecs, sources| {
vecs.compute_from_stateful(starting_indexes, &sources, exit)
})
}
/// First phase of post-processing: compute index transforms.
@@ -230,6 +244,24 @@ impl UTXOCohorts {
.try_for_each(|v| v.compute_rest_part1(indexes, price, starting_indexes, exit))
}
/// Recompute net_sentiment for aggregate cohorts as weighted average of source cohorts.
pub fn compute_aggregate_net_sentiment(
&mut self,
indexes: &indexes::Vecs,
starting_indexes: &ComputeIndexes,
exit: &Exit,
) -> Result<()> {
self.for_each_aggregate(|vecs, sources| {
let metrics: Vec<_> = sources.iter().map(|v| &v.metrics).collect();
vecs.metrics.compute_net_sentiment_from_others(
starting_indexes,
&metrics,
indexes,
exit,
)
})
}
/// Second phase of post-processing: compute relative metrics.
#[allow(clippy::too_many_arguments)]
pub fn compute_rest_part2<HM, DM>(

View File

@@ -42,6 +42,10 @@ pub fn compute_rest_part1(
utxo_cohorts.compute_rest_part1(indexes, price, starting_indexes, exit)?;
address_cohorts.compute_rest_part1(indexes, price, starting_indexes, exit)?;
// Recompute net_sentiment for aggregate cohorts as weighted average
utxo_cohorts.compute_aggregate_net_sentiment(indexes, starting_indexes, exit)?;
address_cohorts.compute_aggregate_net_sentiment(indexes, starting_indexes, exit)?;
Ok(())
}

View File

@@ -296,6 +296,45 @@ impl CohortMetrics {
Ok(())
}
/// Compute net_sentiment as capital-weighted average of component cohorts.
///
/// For aggregate cohorts, the simple greed-pain formula produces values outside
/// the range of components due to asymmetric weighting. This recomputes net_sentiment
/// as a proper weighted average using realized_cap as weight.
pub fn compute_net_sentiment_from_others(
&mut self,
starting_indexes: &ComputeIndexes,
others: &[&Self],
indexes: &indexes::Vecs,
exit: &Exit,
) -> Result<()> {
let Some(unrealized) = self.unrealized.as_mut() else {
return Ok(());
};
let weights: Vec<_> = others
.iter()
.filter_map(|o| Some(&o.realized.as_ref()?.realized_cap.height))
.collect();
let values: Vec<_> = others
.iter()
.filter_map(|o| Some(&o.unrealized.as_ref()?.net_sentiment.height))
.collect();
if weights.len() != others.len() || values.len() != others.len() {
return Ok(());
}
unrealized
.net_sentiment
.height
.compute_weighted_average_of_others(starting_indexes.height, &weights, &values, exit)?;
unrealized
.net_sentiment
.compute_rest(indexes, starting_indexes, exit)
}
/// First phase of computed metrics (indexes from height).
pub fn compute_rest_part1(
&mut self,

View File

@@ -5,8 +5,8 @@ use brk_types::{Dollars, Sats, StoredF32, StoredF64, Version};
use vecdb::IterableCloneableVec;
use crate::internal::{
LazyBinaryFromHeightLast, LazyBinaryFromDateLast, NegPercentageDollarsF32, NegRatio32,
PercentageDollarsF32, PercentageSatsF64, Ratio32,
LazyBinaryFromDateLast, LazyBinaryFromHeightLast, NegPercentageDollarsF32,
PercentageDollarsF32, PercentageSatsF64,
};
use super::{ImportConfig, RealizedMetrics, SupplyMetrics, UnrealizedMetrics};
@@ -337,33 +337,33 @@ impl RelativeMetrics {
// === Unrealized vs Own Total Unrealized PnL (lazy, optional) ===
unrealized_profit_rel_to_own_total_unrealized_pnl: extended.then(|| {
LazyBinaryFromHeightLast::from_computed_height_date_and_binary_block::<Ratio32, _, _>(
LazyBinaryFromHeightLast::from_computed_height_date_and_binary_block::<PercentageDollarsF32, _, _>(
&cfg.name("unrealized_profit_rel_to_own_total_unrealized_pnl"),
cfg.version,
cfg.version + v1,
&unrealized.unrealized_profit,
&unrealized.total_unrealized_pnl,
)
}),
unrealized_loss_rel_to_own_total_unrealized_pnl: extended.then(|| {
LazyBinaryFromHeightLast::from_computed_height_date_and_binary_block::<Ratio32, _, _>(
LazyBinaryFromHeightLast::from_computed_height_date_and_binary_block::<PercentageDollarsF32, _, _>(
&cfg.name("unrealized_loss_rel_to_own_total_unrealized_pnl"),
cfg.version,
cfg.version + v1,
&unrealized.unrealized_loss,
&unrealized.total_unrealized_pnl,
)
}),
neg_unrealized_loss_rel_to_own_total_unrealized_pnl: extended.then(|| {
LazyBinaryFromHeightLast::from_computed_height_date_and_binary_block::<NegRatio32, _, _>(
LazyBinaryFromHeightLast::from_computed_height_date_and_binary_block::<NegPercentageDollarsF32, _, _>(
&cfg.name("neg_unrealized_loss_rel_to_own_total_unrealized_pnl"),
cfg.version,
cfg.version + v1,
&unrealized.unrealized_loss,
&unrealized.total_unrealized_pnl,
)
}),
net_unrealized_pnl_rel_to_own_total_unrealized_pnl: extended.then(|| {
LazyBinaryFromHeightLast::from_both_binary_block::<Ratio32, _, _, _, _>(
LazyBinaryFromHeightLast::from_both_binary_block::<PercentageDollarsF32, _, _, _, _>(
&cfg.name("net_unrealized_pnl_rel_to_own_total_unrealized_pnl"),
cfg.version + v1,
cfg.version + v2,
&unrealized.net_unrealized_pnl,
&unrealized.total_unrealized_pnl,
)

View File

@@ -1,6 +1,6 @@
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{CentsSats, CentsSquaredSats, CentsUnsigned, DateIndex, Dollars, Height};
use brk_types::{CentsSats, CentsSquaredSats, CentsUnsigned, DateIndex, Dollars, Height, Version};
use rayon::prelude::*;
use vecdb::{
AnyStoredVec, AnyVec, BytesVec, Exit, GenericStoredVec, ImportableVec, Negate,
@@ -150,7 +150,7 @@ impl UnrealizedMetrics {
let net_sentiment = ComputedFromHeightLast::forced_import(
cfg.db,
&cfg.name("net_sentiment"),
cfg.version,
cfg.version + Version::ONE, // v1: weighted average for aggregate cohorts
cfg.indexes,
)?;

View File

@@ -4,10 +4,10 @@ mod close_price_times_sats;
mod difference_f32;
mod dollar_halve;
mod dollar_identity;
mod dollars_to_sats_fract;
mod dollar_minus;
mod dollar_plus;
mod dollar_times_tenths;
mod dollars_to_sats_fract;
mod f32_identity;
mod half_close_price_times_sats;
mod ohlc;
@@ -19,7 +19,6 @@ mod percentage_u32_f32;
mod percentage_u64_f32;
mod price_times_ratio;
mod ratio32;
mod ratio32_neg;
mod ratio_f32;
mod ratio_u64_f32;
mod return_f32_tenths;
@@ -47,10 +46,10 @@ pub use close_price_times_sats::*;
pub use difference_f32::*;
pub use dollar_halve::*;
pub use dollar_identity::*;
pub use dollars_to_sats_fract::*;
pub use dollar_minus::*;
pub use dollar_plus::*;
pub use dollar_times_tenths::*;
pub use dollars_to_sats_fract::*;
pub use f32_identity::*;
pub use half_close_price_times_sats::*;
pub use ohlc::*;
@@ -61,10 +60,9 @@ pub use percentage_sats_f64::*;
pub use percentage_u32_f32::*;
pub use percentage_u64_f32::*;
pub use price_times_ratio::*;
pub use ratio32::*;
pub use ratio32_neg::*;
pub use ratio_f32::*;
pub use ratio_u64_f32::*;
pub use ratio32::*;
pub use return_f32_tenths::*;
pub use return_i8::*;
pub use return_u16::*;
@@ -79,7 +77,7 @@ pub use sat_to_bitcoin::*;
pub use sats_times_close_price::*;
pub use u16_to_years::*;
pub use u64_plus::*;
pub use volatility_sqrt7::*;
pub use volatility_sqrt30::*;
pub use volatility_sqrt365::*;
pub use volatility_sqrt7::*;
pub use weight_to_fullness::*;

View File

@@ -1,13 +0,0 @@
use brk_types::{Dollars, StoredF32};
use vecdb::BinaryTransform;
/// (Dollars, Dollars) -> -StoredF32 (negated ratio)
/// Computes -(a/b) directly to avoid lazy-from-lazy chains.
pub struct NegRatio32;
impl BinaryTransform<Dollars, Dollars, StoredF32> for NegRatio32 {
#[inline(always)]
fn apply(numerator: Dollars, denominator: Dollars) -> StoredF32 {
-StoredF32::from(numerator / denominator)
}
}