global: snapshot

This commit is contained in:
nym21
2026-03-11 13:43:46 +01:00
parent c5d63b3090
commit 984122f394
82 changed files with 3962 additions and 3412 deletions

4
Cargo.lock generated
View File

@@ -529,6 +529,8 @@ dependencies = [
"rayon",
"rlimit",
"rustc-hash",
"schemars",
"serde",
"tracing",
"vecdb",
]
@@ -594,7 +596,9 @@ dependencies = [
"brk_types",
"derive_more",
"jiff",
"parking_lot",
"quickmatch",
"serde_json",
"tokio",
"vecdb",
]

File diff suppressed because it is too large Load Diff

View File

@@ -6,16 +6,16 @@ use super::CohortName;
/// "At least X% loss" threshold names (10 thresholds).
pub const LOSS_NAMES: ByLoss<CohortName> = ByLoss {
breakeven: CohortName::new("loss_ge_breakeven", "<0%", "In Loss (Below Breakeven)"),
_10pct: CohortName::new("loss_ge_10pct", "≥10%L", "10%+ Loss"),
_20pct: CohortName::new("loss_ge_20pct", "≥20%L", "20%+ Loss"),
_30pct: CohortName::new("loss_ge_30pct", "≥30%L", "30%+ Loss"),
_40pct: CohortName::new("loss_ge_40pct", "≥40%L", "40%+ Loss"),
_50pct: CohortName::new("loss_ge_50pct", "≥50%L", "50%+ Loss"),
_60pct: CohortName::new("loss_ge_60pct", "≥60%L", "60%+ Loss"),
_70pct: CohortName::new("loss_ge_70pct", "≥70%L", "70%+ Loss"),
_80pct: CohortName::new("loss_ge_80pct", "≥80%L", "80%+ Loss"),
_90pct: CohortName::new("loss_ge_90pct", "≥90%L", "90%+ Loss"),
breakeven: CohortName::new("utxos_in_loss", "<0%", "In Loss (Below Breakeven)"),
_10pct: CohortName::new("utxos_over_10pct_in_loss", "≥10%L", "10%+ Loss"),
_20pct: CohortName::new("utxos_over_20pct_in_loss", "≥20%L", "20%+ Loss"),
_30pct: CohortName::new("utxos_over_30pct_in_loss", "≥30%L", "30%+ Loss"),
_40pct: CohortName::new("utxos_over_40pct_in_loss", "≥40%L", "40%+ Loss"),
_50pct: CohortName::new("utxos_over_50pct_in_loss", "≥50%L", "50%+ Loss"),
_60pct: CohortName::new("utxos_over_60pct_in_loss", "≥60%L", "60%+ Loss"),
_70pct: CohortName::new("utxos_over_70pct_in_loss", "≥70%L", "70%+ Loss"),
_80pct: CohortName::new("utxos_over_80pct_in_loss", "≥80%L", "80%+ Loss"),
_90pct: CohortName::new("utxos_over_90pct_in_loss", "≥90%L", "90%+ Loss"),
};
/// Number of loss thresholds.

View File

@@ -6,21 +6,21 @@ use super::CohortName;
/// "At least X% profit" threshold names (15 thresholds).
pub const PROFIT_NAMES: ByProfit<CohortName> = ByProfit {
breakeven: CohortName::new("profit_ge_breakeven", "≥0%", "In Profit (Breakeven+)"),
_10pct: CohortName::new("profit_ge_10pct", "≥10%", "10%+ Profit"),
_20pct: CohortName::new("profit_ge_20pct", "≥20%", "20%+ Profit"),
_30pct: CohortName::new("profit_ge_30pct", "≥30%", "30%+ Profit"),
_40pct: CohortName::new("profit_ge_40pct", "≥40%", "40%+ Profit"),
_50pct: CohortName::new("profit_ge_50pct", "≥50%", "50%+ Profit"),
_60pct: CohortName::new("profit_ge_60pct", "≥60%", "60%+ Profit"),
_70pct: CohortName::new("profit_ge_70pct", "≥70%", "70%+ Profit"),
_80pct: CohortName::new("profit_ge_80pct", "≥80%", "80%+ Profit"),
_90pct: CohortName::new("profit_ge_90pct", "≥90%", "90%+ Profit"),
_100pct: CohortName::new("profit_ge_100pct", "≥100%", "100%+ Profit"),
_200pct: CohortName::new("profit_ge_200pct", "≥200%", "200%+ Profit"),
_300pct: CohortName::new("profit_ge_300pct", "≥300%", "300%+ Profit"),
_500pct: CohortName::new("profit_ge_500pct", "≥500%", "500%+ Profit"),
_1000pct: CohortName::new("profit_ge_1000pct", "≥1000%", "1000%+ Profit"),
breakeven: CohortName::new("utxos_in_profit", "≥0%", "In Profit (Breakeven+)"),
_10pct: CohortName::new("utxos_over_10pct_in_profit", "≥10%", "10%+ Profit"),
_20pct: CohortName::new("utxos_over_20pct_in_profit", "≥20%", "20%+ Profit"),
_30pct: CohortName::new("utxos_over_30pct_in_profit", "≥30%", "30%+ Profit"),
_40pct: CohortName::new("utxos_over_40pct_in_profit", "≥40%", "40%+ Profit"),
_50pct: CohortName::new("utxos_over_50pct_in_profit", "≥50%", "50%+ Profit"),
_60pct: CohortName::new("utxos_over_60pct_in_profit", "≥60%", "60%+ Profit"),
_70pct: CohortName::new("utxos_over_70pct_in_profit", "≥70%", "70%+ Profit"),
_80pct: CohortName::new("utxos_over_80pct_in_profit", "≥80%", "80%+ Profit"),
_90pct: CohortName::new("utxos_over_90pct_in_profit", "≥90%", "90%+ Profit"),
_100pct: CohortName::new("utxos_over_100pct_in_profit", "≥100%", "100%+ Profit"),
_200pct: CohortName::new("utxos_over_200pct_in_profit", "≥200%", "200%+ Profit"),
_300pct: CohortName::new("utxos_over_300pct_in_profit", "≥300%", "300%+ Profit"),
_500pct: CohortName::new("utxos_over_500pct_in_profit", "≥500%", "500%+ Profit"),
_1000pct: CohortName::new("utxos_over_1000pct_in_profit", "≥1000%", "1000%+ Profit"),
};
/// Number of profit thresholds.

View File

@@ -22,31 +22,31 @@ pub const PROFITABILITY_BOUNDARY_COUNT: usize = 24;
pub fn compute_profitability_boundaries(spot: Cents) -> [Cents; PROFITABILITY_BOUNDARY_COUNT] {
let s = spot.as_u128();
// Divisors in ascending boundary order (ascending price):
// profit_over_1000: price < spot/11 → boundary at spot*100/1100 = spot/11
// profit_500_to_1000: spot/11 ≤ p < spot/6 → boundary at spot*100/600 = spot/6
// profit_300_to_500: spot/6 ≤ p < spot/4 → boundary at spot*100/400 = spot/4
// profit_200_to_300: spot/4 ≤ p < spot/3 → boundary at spot*100/300 = spot/3
// profit_100_to_200: spot/3 ≤ p < spot/2 → boundary at spot*100/200 = spot/2
// profit_90_to_100: spot/2 ≤ p < spot*100/190 → boundary at spot*100/190
// profit_80_to_90: → boundary at spot*100/180
// profit_70_to_80: → boundary at spot*100/170
// profit_60_to_70: → boundary at spot*100/160
// profit_50_to_60: → boundary at spot*100/150
// profit_40_to_50: → boundary at spot*100/140
// profit_30_to_40: → boundary at spot*100/130
// profit_20_to_30: → boundary at spot*100/120
// profit_10_to_20: → boundary at spot*100/110
// profit_0_to_10: → boundary at spot (= spot*100/100)
// loss_0_to_10: spot ≤ p < spot*100/90 → boundary at spot*100/90
// loss_10_to_20: → boundary at spot*100/80
// loss_20_to_30: → boundary at spot*100/70
// loss_30_to_40: → boundary at spot*100/60
// loss_40_to_50: → boundary at spot*100/50 = spot*2
// loss_50_to_60: → boundary at spot*100/40 = spot*5/2
// loss_60_to_70: → boundary at spot*100/30 = spot*10/3
// loss_70_to_80: → boundary at spot*100/20 = spot*5
// loss_80_to_90: → boundary at spot*100/10 = spot*10
// loss_90_to_100: spot*10 ≤ p (no upper boundary)
// over_1000pct_in_profit: price < spot/11 → boundary at spot*100/1100 = spot/11
// 500pct_to_1000pct_in_profit: spot/11 ≤ p < spot/6 → boundary at spot*100/600 = spot/6
// 300pct_to_500pct_in_profit: spot/6 ≤ p < spot/4 → boundary at spot*100/400 = spot/4
// 200pct_to_300pct_in_profit: spot/4 ≤ p < spot/3 → boundary at spot*100/300 = spot/3
// 100pct_to_200pct_in_profit: spot/3 ≤ p < spot/2 → boundary at spot*100/200 = spot/2
// 90pct_to_100pct_in_profit: spot/2 ≤ p < spot*100/190 → boundary at spot*100/190
// 80pct_to_90pct_in_profit: → boundary at spot*100/180
// 70pct_to_80pct_in_profit: → boundary at spot*100/170
// 60pct_to_70pct_in_profit: → boundary at spot*100/160
// 50pct_to_60pct_in_profit: → boundary at spot*100/150
// 40pct_to_50pct_in_profit: → boundary at spot*100/140
// 30pct_to_40pct_in_profit: → boundary at spot*100/130
// 20pct_to_30pct_in_profit: → boundary at spot*100/120
// 10pct_to_20pct_in_profit: → boundary at spot*100/110
// 0pct_to_10pct_in_profit: → boundary at spot (= spot*100/100)
// 0pct_to_10pct_in_loss: spot ≤ p < spot*100/90 → boundary at spot*100/90
// 10pct_to_20pct_in_loss: → boundary at spot*100/80
// 20pct_to_30pct_in_loss: → boundary at spot*100/70
// 30pct_to_40pct_in_loss: → boundary at spot*100/60
// 40pct_to_50pct_in_loss: → boundary at spot*100/50 = spot*2
// 50pct_to_60pct_in_loss: → boundary at spot*100/40 = spot*5/2
// 60pct_to_70pct_in_loss: → boundary at spot*100/30 = spot*10/3
// 70pct_to_80pct_in_loss: → boundary at spot*100/20 = spot*5
// 80pct_to_90pct_in_loss: → boundary at spot*100/10 = spot*10
// 90pct_to_100pct_in_loss: spot*10 ≤ p (no upper boundary)
let divisors: [u128; PROFITABILITY_BOUNDARY_COUNT] = [
1100, // >1000% profit upper bound (spot/11)
600, // 500-1000% profit upper bound (spot/6)
@@ -83,31 +83,31 @@ pub fn compute_profitability_boundaries(spot: Cents) -> [Cents; PROFITABILITY_BO
/// Profitability range names (25 ranges, from most profitable to most in loss)
pub const PROFITABILITY_RANGE_NAMES: ByProfitabilityRange<CohortName> = ByProfitabilityRange {
profit_over_1000: CohortName::new("profit_over_1000pct", ">1000%", "Over 1000% Profit"),
profit_500_to_1000: CohortName::new("profit_500_to_1000pct", "500-1000%", "500-1000% Profit"),
profit_300_to_500: CohortName::new("profit_300_to_500pct", "300-500%", "300-500% Profit"),
profit_200_to_300: CohortName::new("profit_200_to_300pct", "200-300%", "200-300% Profit"),
profit_100_to_200: CohortName::new("profit_100_to_200pct", "100-200%", "100-200% Profit"),
profit_90_to_100: CohortName::new("profit_90_to_100pct", "90-100%", "90-100% Profit"),
profit_80_to_90: CohortName::new("profit_80_to_90pct", "80-90%", "80-90% Profit"),
profit_70_to_80: CohortName::new("profit_70_to_80pct", "70-80%", "70-80% Profit"),
profit_60_to_70: CohortName::new("profit_60_to_70pct", "60-70%", "60-70% Profit"),
profit_50_to_60: CohortName::new("profit_50_to_60pct", "50-60%", "50-60% Profit"),
profit_40_to_50: CohortName::new("profit_40_to_50pct", "40-50%", "40-50% Profit"),
profit_30_to_40: CohortName::new("profit_30_to_40pct", "30-40%", "30-40% Profit"),
profit_20_to_30: CohortName::new("profit_20_to_30pct", "20-30%", "20-30% Profit"),
profit_10_to_20: CohortName::new("profit_10_to_20pct", "10-20%", "10-20% Profit"),
profit_0_to_10: CohortName::new("profit_0_to_10pct", "0-10%", "0-10% Profit"),
loss_0_to_10: CohortName::new("loss_0_to_10pct", "0-10%L", "0-10% Loss"),
loss_10_to_20: CohortName::new("loss_10_to_20pct", "10-20%L", "10-20% Loss"),
loss_20_to_30: CohortName::new("loss_20_to_30pct", "20-30%L", "20-30% Loss"),
loss_30_to_40: CohortName::new("loss_30_to_40pct", "30-40%L", "30-40% Loss"),
loss_40_to_50: CohortName::new("loss_40_to_50pct", "40-50%L", "40-50% Loss"),
loss_50_to_60: CohortName::new("loss_50_to_60pct", "50-60%L", "50-60% Loss"),
loss_60_to_70: CohortName::new("loss_60_to_70pct", "60-70%L", "60-70% Loss"),
loss_70_to_80: CohortName::new("loss_70_to_80pct", "70-80%L", "70-80% Loss"),
loss_80_to_90: CohortName::new("loss_80_to_90pct", "80-90%L", "80-90% Loss"),
loss_90_to_100: CohortName::new("loss_90_to_100pct", "90-100%L", "90-100% Loss"),
over_1000pct_in_profit: CohortName::new("utxos_over_1000pct_in_profit", ">1000%", "Over 1000% Profit"),
_500pct_to_1000pct_in_profit: CohortName::new("utxos_500pct_to_1000pct_in_profit", "500-1000%", "500-1000% Profit"),
_300pct_to_500pct_in_profit: CohortName::new("utxos_300pct_to_500pct_in_profit", "300-500%", "300-500% Profit"),
_200pct_to_300pct_in_profit: CohortName::new("utxos_200pct_to_300pct_in_profit", "200-300%", "200-300% Profit"),
_100pct_to_200pct_in_profit: CohortName::new("utxos_100pct_to_200pct_in_profit", "100-200%", "100-200% Profit"),
_90pct_to_100pct_in_profit: CohortName::new("utxos_90pct_to_100pct_in_profit", "90-100%", "90-100% Profit"),
_80pct_to_90pct_in_profit: CohortName::new("utxos_80pct_to_90pct_in_profit", "80-90%", "80-90% Profit"),
_70pct_to_80pct_in_profit: CohortName::new("utxos_70pct_to_80pct_in_profit", "70-80%", "70-80% Profit"),
_60pct_to_70pct_in_profit: CohortName::new("utxos_60pct_to_70pct_in_profit", "60-70%", "60-70% Profit"),
_50pct_to_60pct_in_profit: CohortName::new("utxos_50pct_to_60pct_in_profit", "50-60%", "50-60% Profit"),
_40pct_to_50pct_in_profit: CohortName::new("utxos_40pct_to_50pct_in_profit", "40-50%", "40-50% Profit"),
_30pct_to_40pct_in_profit: CohortName::new("utxos_30pct_to_40pct_in_profit", "30-40%", "30-40% Profit"),
_20pct_to_30pct_in_profit: CohortName::new("utxos_20pct_to_30pct_in_profit", "20-30%", "20-30% Profit"),
_10pct_to_20pct_in_profit: CohortName::new("utxos_10pct_to_20pct_in_profit", "10-20%", "10-20% Profit"),
_0pct_to_10pct_in_profit: CohortName::new("utxos_0pct_to_10pct_in_profit", "0-10%", "0-10% Profit"),
_0pct_to_10pct_in_loss: CohortName::new("utxos_0pct_to_10pct_in_loss", "0-10%L", "0-10% Loss"),
_10pct_to_20pct_in_loss: CohortName::new("utxos_10pct_to_20pct_in_loss", "10-20%L", "10-20% Loss"),
_20pct_to_30pct_in_loss: CohortName::new("utxos_20pct_to_30pct_in_loss", "20-30%L", "20-30% Loss"),
_30pct_to_40pct_in_loss: CohortName::new("utxos_30pct_to_40pct_in_loss", "30-40%L", "30-40% Loss"),
_40pct_to_50pct_in_loss: CohortName::new("utxos_40pct_to_50pct_in_loss", "40-50%L", "40-50% Loss"),
_50pct_to_60pct_in_loss: CohortName::new("utxos_50pct_to_60pct_in_loss", "50-60%L", "50-60% Loss"),
_60pct_to_70pct_in_loss: CohortName::new("utxos_60pct_to_70pct_in_loss", "60-70%L", "60-70% Loss"),
_70pct_to_80pct_in_loss: CohortName::new("utxos_70pct_to_80pct_in_loss", "70-80%L", "70-80% Loss"),
_80pct_to_90pct_in_loss: CohortName::new("utxos_80pct_to_90pct_in_loss", "80-90%L", "80-90% Loss"),
_90pct_to_100pct_in_loss: CohortName::new("utxos_90pct_to_100pct_in_loss", "90-100%L", "90-100% Loss"),
};
impl ByProfitabilityRange<CohortName> {
@@ -119,34 +119,34 @@ impl ByProfitabilityRange<CohortName> {
/// 25 profitability range buckets ordered from most profitable to most in loss.
///
/// During the k-way merge (ascending price order), the cursor starts at bucket 0
/// (profit_over_1000, lowest cost basis) and advances as price crosses each boundary.
/// (over_1000pct_in_profit, lowest cost basis) and advances as price crosses each boundary.
#[derive(Default, Clone, Traversable, Serialize)]
pub struct ByProfitabilityRange<T> {
pub profit_over_1000: T,
pub profit_500_to_1000: T,
pub profit_300_to_500: T,
pub profit_200_to_300: T,
pub profit_100_to_200: T,
pub profit_90_to_100: T,
pub profit_80_to_90: T,
pub profit_70_to_80: T,
pub profit_60_to_70: T,
pub profit_50_to_60: T,
pub profit_40_to_50: T,
pub profit_30_to_40: T,
pub profit_20_to_30: T,
pub profit_10_to_20: T,
pub profit_0_to_10: T,
pub loss_0_to_10: T,
pub loss_10_to_20: T,
pub loss_20_to_30: T,
pub loss_30_to_40: T,
pub loss_40_to_50: T,
pub loss_50_to_60: T,
pub loss_60_to_70: T,
pub loss_70_to_80: T,
pub loss_80_to_90: T,
pub loss_90_to_100: T,
pub over_1000pct_in_profit: T,
pub _500pct_to_1000pct_in_profit: T,
pub _300pct_to_500pct_in_profit: T,
pub _200pct_to_300pct_in_profit: T,
pub _100pct_to_200pct_in_profit: T,
pub _90pct_to_100pct_in_profit: T,
pub _80pct_to_90pct_in_profit: T,
pub _70pct_to_80pct_in_profit: T,
pub _60pct_to_70pct_in_profit: T,
pub _50pct_to_60pct_in_profit: T,
pub _40pct_to_50pct_in_profit: T,
pub _30pct_to_40pct_in_profit: T,
pub _20pct_to_30pct_in_profit: T,
pub _10pct_to_20pct_in_profit: T,
pub _0pct_to_10pct_in_profit: T,
pub _0pct_to_10pct_in_loss: T,
pub _10pct_to_20pct_in_loss: T,
pub _20pct_to_30pct_in_loss: T,
pub _30pct_to_40pct_in_loss: T,
pub _40pct_to_50pct_in_loss: T,
pub _50pct_to_60pct_in_loss: T,
pub _60pct_to_70pct_in_loss: T,
pub _70pct_to_80pct_in_loss: T,
pub _80pct_to_90pct_in_loss: T,
pub _90pct_to_100pct_in_loss: T,
}
/// Number of profitability range buckets.
@@ -159,31 +159,31 @@ impl<T> ByProfitabilityRange<T> {
{
let n = &PROFITABILITY_RANGE_NAMES;
Self {
profit_over_1000: create(n.profit_over_1000.id),
profit_500_to_1000: create(n.profit_500_to_1000.id),
profit_300_to_500: create(n.profit_300_to_500.id),
profit_200_to_300: create(n.profit_200_to_300.id),
profit_100_to_200: create(n.profit_100_to_200.id),
profit_90_to_100: create(n.profit_90_to_100.id),
profit_80_to_90: create(n.profit_80_to_90.id),
profit_70_to_80: create(n.profit_70_to_80.id),
profit_60_to_70: create(n.profit_60_to_70.id),
profit_50_to_60: create(n.profit_50_to_60.id),
profit_40_to_50: create(n.profit_40_to_50.id),
profit_30_to_40: create(n.profit_30_to_40.id),
profit_20_to_30: create(n.profit_20_to_30.id),
profit_10_to_20: create(n.profit_10_to_20.id),
profit_0_to_10: create(n.profit_0_to_10.id),
loss_0_to_10: create(n.loss_0_to_10.id),
loss_10_to_20: create(n.loss_10_to_20.id),
loss_20_to_30: create(n.loss_20_to_30.id),
loss_30_to_40: create(n.loss_30_to_40.id),
loss_40_to_50: create(n.loss_40_to_50.id),
loss_50_to_60: create(n.loss_50_to_60.id),
loss_60_to_70: create(n.loss_60_to_70.id),
loss_70_to_80: create(n.loss_70_to_80.id),
loss_80_to_90: create(n.loss_80_to_90.id),
loss_90_to_100: create(n.loss_90_to_100.id),
over_1000pct_in_profit: create(n.over_1000pct_in_profit.id),
_500pct_to_1000pct_in_profit: create(n._500pct_to_1000pct_in_profit.id),
_300pct_to_500pct_in_profit: create(n._300pct_to_500pct_in_profit.id),
_200pct_to_300pct_in_profit: create(n._200pct_to_300pct_in_profit.id),
_100pct_to_200pct_in_profit: create(n._100pct_to_200pct_in_profit.id),
_90pct_to_100pct_in_profit: create(n._90pct_to_100pct_in_profit.id),
_80pct_to_90pct_in_profit: create(n._80pct_to_90pct_in_profit.id),
_70pct_to_80pct_in_profit: create(n._70pct_to_80pct_in_profit.id),
_60pct_to_70pct_in_profit: create(n._60pct_to_70pct_in_profit.id),
_50pct_to_60pct_in_profit: create(n._50pct_to_60pct_in_profit.id),
_40pct_to_50pct_in_profit: create(n._40pct_to_50pct_in_profit.id),
_30pct_to_40pct_in_profit: create(n._30pct_to_40pct_in_profit.id),
_20pct_to_30pct_in_profit: create(n._20pct_to_30pct_in_profit.id),
_10pct_to_20pct_in_profit: create(n._10pct_to_20pct_in_profit.id),
_0pct_to_10pct_in_profit: create(n._0pct_to_10pct_in_profit.id),
_0pct_to_10pct_in_loss: create(n._0pct_to_10pct_in_loss.id),
_10pct_to_20pct_in_loss: create(n._10pct_to_20pct_in_loss.id),
_20pct_to_30pct_in_loss: create(n._20pct_to_30pct_in_loss.id),
_30pct_to_40pct_in_loss: create(n._30pct_to_40pct_in_loss.id),
_40pct_to_50pct_in_loss: create(n._40pct_to_50pct_in_loss.id),
_50pct_to_60pct_in_loss: create(n._50pct_to_60pct_in_loss.id),
_60pct_to_70pct_in_loss: create(n._60pct_to_70pct_in_loss.id),
_70pct_to_80pct_in_loss: create(n._70pct_to_80pct_in_loss.id),
_80pct_to_90pct_in_loss: create(n._80pct_to_90pct_in_loss.id),
_90pct_to_100pct_in_loss: create(n._90pct_to_100pct_in_loss.id),
}
}
@@ -193,92 +193,92 @@ impl<T> ByProfitabilityRange<T> {
{
let n = &PROFITABILITY_RANGE_NAMES;
Ok(Self {
profit_over_1000: create(n.profit_over_1000.id)?,
profit_500_to_1000: create(n.profit_500_to_1000.id)?,
profit_300_to_500: create(n.profit_300_to_500.id)?,
profit_200_to_300: create(n.profit_200_to_300.id)?,
profit_100_to_200: create(n.profit_100_to_200.id)?,
profit_90_to_100: create(n.profit_90_to_100.id)?,
profit_80_to_90: create(n.profit_80_to_90.id)?,
profit_70_to_80: create(n.profit_70_to_80.id)?,
profit_60_to_70: create(n.profit_60_to_70.id)?,
profit_50_to_60: create(n.profit_50_to_60.id)?,
profit_40_to_50: create(n.profit_40_to_50.id)?,
profit_30_to_40: create(n.profit_30_to_40.id)?,
profit_20_to_30: create(n.profit_20_to_30.id)?,
profit_10_to_20: create(n.profit_10_to_20.id)?,
profit_0_to_10: create(n.profit_0_to_10.id)?,
loss_0_to_10: create(n.loss_0_to_10.id)?,
loss_10_to_20: create(n.loss_10_to_20.id)?,
loss_20_to_30: create(n.loss_20_to_30.id)?,
loss_30_to_40: create(n.loss_30_to_40.id)?,
loss_40_to_50: create(n.loss_40_to_50.id)?,
loss_50_to_60: create(n.loss_50_to_60.id)?,
loss_60_to_70: create(n.loss_60_to_70.id)?,
loss_70_to_80: create(n.loss_70_to_80.id)?,
loss_80_to_90: create(n.loss_80_to_90.id)?,
loss_90_to_100: create(n.loss_90_to_100.id)?,
over_1000pct_in_profit: create(n.over_1000pct_in_profit.id)?,
_500pct_to_1000pct_in_profit: create(n._500pct_to_1000pct_in_profit.id)?,
_300pct_to_500pct_in_profit: create(n._300pct_to_500pct_in_profit.id)?,
_200pct_to_300pct_in_profit: create(n._200pct_to_300pct_in_profit.id)?,
_100pct_to_200pct_in_profit: create(n._100pct_to_200pct_in_profit.id)?,
_90pct_to_100pct_in_profit: create(n._90pct_to_100pct_in_profit.id)?,
_80pct_to_90pct_in_profit: create(n._80pct_to_90pct_in_profit.id)?,
_70pct_to_80pct_in_profit: create(n._70pct_to_80pct_in_profit.id)?,
_60pct_to_70pct_in_profit: create(n._60pct_to_70pct_in_profit.id)?,
_50pct_to_60pct_in_profit: create(n._50pct_to_60pct_in_profit.id)?,
_40pct_to_50pct_in_profit: create(n._40pct_to_50pct_in_profit.id)?,
_30pct_to_40pct_in_profit: create(n._30pct_to_40pct_in_profit.id)?,
_20pct_to_30pct_in_profit: create(n._20pct_to_30pct_in_profit.id)?,
_10pct_to_20pct_in_profit: create(n._10pct_to_20pct_in_profit.id)?,
_0pct_to_10pct_in_profit: create(n._0pct_to_10pct_in_profit.id)?,
_0pct_to_10pct_in_loss: create(n._0pct_to_10pct_in_loss.id)?,
_10pct_to_20pct_in_loss: create(n._10pct_to_20pct_in_loss.id)?,
_20pct_to_30pct_in_loss: create(n._20pct_to_30pct_in_loss.id)?,
_30pct_to_40pct_in_loss: create(n._30pct_to_40pct_in_loss.id)?,
_40pct_to_50pct_in_loss: create(n._40pct_to_50pct_in_loss.id)?,
_50pct_to_60pct_in_loss: create(n._50pct_to_60pct_in_loss.id)?,
_60pct_to_70pct_in_loss: create(n._60pct_to_70pct_in_loss.id)?,
_70pct_to_80pct_in_loss: create(n._70pct_to_80pct_in_loss.id)?,
_80pct_to_90pct_in_loss: create(n._80pct_to_90pct_in_loss.id)?,
_90pct_to_100pct_in_loss: create(n._90pct_to_100pct_in_loss.id)?,
})
}
pub fn iter(&self) -> impl Iterator<Item = &T> {
[
&self.profit_over_1000,
&self.profit_500_to_1000,
&self.profit_300_to_500,
&self.profit_200_to_300,
&self.profit_100_to_200,
&self.profit_90_to_100,
&self.profit_80_to_90,
&self.profit_70_to_80,
&self.profit_60_to_70,
&self.profit_50_to_60,
&self.profit_40_to_50,
&self.profit_30_to_40,
&self.profit_20_to_30,
&self.profit_10_to_20,
&self.profit_0_to_10,
&self.loss_0_to_10,
&self.loss_10_to_20,
&self.loss_20_to_30,
&self.loss_30_to_40,
&self.loss_40_to_50,
&self.loss_50_to_60,
&self.loss_60_to_70,
&self.loss_70_to_80,
&self.loss_80_to_90,
&self.loss_90_to_100,
&self.over_1000pct_in_profit,
&self._500pct_to_1000pct_in_profit,
&self._300pct_to_500pct_in_profit,
&self._200pct_to_300pct_in_profit,
&self._100pct_to_200pct_in_profit,
&self._90pct_to_100pct_in_profit,
&self._80pct_to_90pct_in_profit,
&self._70pct_to_80pct_in_profit,
&self._60pct_to_70pct_in_profit,
&self._50pct_to_60pct_in_profit,
&self._40pct_to_50pct_in_profit,
&self._30pct_to_40pct_in_profit,
&self._20pct_to_30pct_in_profit,
&self._10pct_to_20pct_in_profit,
&self._0pct_to_10pct_in_profit,
&self._0pct_to_10pct_in_loss,
&self._10pct_to_20pct_in_loss,
&self._20pct_to_30pct_in_loss,
&self._30pct_to_40pct_in_loss,
&self._40pct_to_50pct_in_loss,
&self._50pct_to_60pct_in_loss,
&self._60pct_to_70pct_in_loss,
&self._70pct_to_80pct_in_loss,
&self._80pct_to_90pct_in_loss,
&self._90pct_to_100pct_in_loss,
]
.into_iter()
}
pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut T> {
[
&mut self.profit_over_1000,
&mut self.profit_500_to_1000,
&mut self.profit_300_to_500,
&mut self.profit_200_to_300,
&mut self.profit_100_to_200,
&mut self.profit_90_to_100,
&mut self.profit_80_to_90,
&mut self.profit_70_to_80,
&mut self.profit_60_to_70,
&mut self.profit_50_to_60,
&mut self.profit_40_to_50,
&mut self.profit_30_to_40,
&mut self.profit_20_to_30,
&mut self.profit_10_to_20,
&mut self.profit_0_to_10,
&mut self.loss_0_to_10,
&mut self.loss_10_to_20,
&mut self.loss_20_to_30,
&mut self.loss_30_to_40,
&mut self.loss_40_to_50,
&mut self.loss_50_to_60,
&mut self.loss_60_to_70,
&mut self.loss_70_to_80,
&mut self.loss_80_to_90,
&mut self.loss_90_to_100,
&mut self.over_1000pct_in_profit,
&mut self._500pct_to_1000pct_in_profit,
&mut self._300pct_to_500pct_in_profit,
&mut self._200pct_to_300pct_in_profit,
&mut self._100pct_to_200pct_in_profit,
&mut self._90pct_to_100pct_in_profit,
&mut self._80pct_to_90pct_in_profit,
&mut self._70pct_to_80pct_in_profit,
&mut self._60pct_to_70pct_in_profit,
&mut self._50pct_to_60pct_in_profit,
&mut self._40pct_to_50pct_in_profit,
&mut self._30pct_to_40pct_in_profit,
&mut self._20pct_to_30pct_in_profit,
&mut self._10pct_to_20pct_in_profit,
&mut self._0pct_to_10pct_in_profit,
&mut self._0pct_to_10pct_in_loss,
&mut self._10pct_to_20pct_in_loss,
&mut self._20pct_to_30pct_in_loss,
&mut self._30pct_to_40pct_in_loss,
&mut self._40pct_to_50pct_in_loss,
&mut self._50pct_to_60pct_in_loss,
&mut self._60pct_to_70pct_in_loss,
&mut self._70pct_to_80pct_in_loss,
&mut self._80pct_to_90pct_in_loss,
&mut self._90pct_to_100pct_in_loss,
]
.into_iter()
}
@@ -288,31 +288,31 @@ impl<T> ByProfitabilityRange<T> {
T: Send + Sync,
{
[
&mut self.profit_over_1000,
&mut self.profit_500_to_1000,
&mut self.profit_300_to_500,
&mut self.profit_200_to_300,
&mut self.profit_100_to_200,
&mut self.profit_90_to_100,
&mut self.profit_80_to_90,
&mut self.profit_70_to_80,
&mut self.profit_60_to_70,
&mut self.profit_50_to_60,
&mut self.profit_40_to_50,
&mut self.profit_30_to_40,
&mut self.profit_20_to_30,
&mut self.profit_10_to_20,
&mut self.profit_0_to_10,
&mut self.loss_0_to_10,
&mut self.loss_10_to_20,
&mut self.loss_20_to_30,
&mut self.loss_30_to_40,
&mut self.loss_40_to_50,
&mut self.loss_50_to_60,
&mut self.loss_60_to_70,
&mut self.loss_70_to_80,
&mut self.loss_80_to_90,
&mut self.loss_90_to_100,
&mut self.over_1000pct_in_profit,
&mut self._500pct_to_1000pct_in_profit,
&mut self._300pct_to_500pct_in_profit,
&mut self._200pct_to_300pct_in_profit,
&mut self._100pct_to_200pct_in_profit,
&mut self._90pct_to_100pct_in_profit,
&mut self._80pct_to_90pct_in_profit,
&mut self._70pct_to_80pct_in_profit,
&mut self._60pct_to_70pct_in_profit,
&mut self._50pct_to_60pct_in_profit,
&mut self._40pct_to_50pct_in_profit,
&mut self._30pct_to_40pct_in_profit,
&mut self._20pct_to_30pct_in_profit,
&mut self._10pct_to_20pct_in_profit,
&mut self._0pct_to_10pct_in_profit,
&mut self._0pct_to_10pct_in_loss,
&mut self._10pct_to_20pct_in_loss,
&mut self._20pct_to_30pct_in_loss,
&mut self._30pct_to_40pct_in_loss,
&mut self._40pct_to_50pct_in_loss,
&mut self._50pct_to_60pct_in_loss,
&mut self._60pct_to_70pct_in_loss,
&mut self._70pct_to_80pct_in_loss,
&mut self._80pct_to_90pct_in_loss,
&mut self._90pct_to_100pct_in_loss,
]
.into_par_iter()
}
@@ -320,62 +320,62 @@ impl<T> ByProfitabilityRange<T> {
/// Access as a fixed-size array of references (for indexed access during merge).
pub fn as_array(&self) -> [&T; PROFITABILITY_RANGE_COUNT] {
[
&self.profit_over_1000,
&self.profit_500_to_1000,
&self.profit_300_to_500,
&self.profit_200_to_300,
&self.profit_100_to_200,
&self.profit_90_to_100,
&self.profit_80_to_90,
&self.profit_70_to_80,
&self.profit_60_to_70,
&self.profit_50_to_60,
&self.profit_40_to_50,
&self.profit_30_to_40,
&self.profit_20_to_30,
&self.profit_10_to_20,
&self.profit_0_to_10,
&self.loss_0_to_10,
&self.loss_10_to_20,
&self.loss_20_to_30,
&self.loss_30_to_40,
&self.loss_40_to_50,
&self.loss_50_to_60,
&self.loss_60_to_70,
&self.loss_70_to_80,
&self.loss_80_to_90,
&self.loss_90_to_100,
&self.over_1000pct_in_profit,
&self._500pct_to_1000pct_in_profit,
&self._300pct_to_500pct_in_profit,
&self._200pct_to_300pct_in_profit,
&self._100pct_to_200pct_in_profit,
&self._90pct_to_100pct_in_profit,
&self._80pct_to_90pct_in_profit,
&self._70pct_to_80pct_in_profit,
&self._60pct_to_70pct_in_profit,
&self._50pct_to_60pct_in_profit,
&self._40pct_to_50pct_in_profit,
&self._30pct_to_40pct_in_profit,
&self._20pct_to_30pct_in_profit,
&self._10pct_to_20pct_in_profit,
&self._0pct_to_10pct_in_profit,
&self._0pct_to_10pct_in_loss,
&self._10pct_to_20pct_in_loss,
&self._20pct_to_30pct_in_loss,
&self._30pct_to_40pct_in_loss,
&self._40pct_to_50pct_in_loss,
&self._50pct_to_60pct_in_loss,
&self._60pct_to_70pct_in_loss,
&self._70pct_to_80pct_in_loss,
&self._80pct_to_90pct_in_loss,
&self._90pct_to_100pct_in_loss,
]
}
/// Access as a fixed-size array of mutable references (for indexed access during merge).
pub fn as_array_mut(&mut self) -> [&mut T; PROFITABILITY_RANGE_COUNT] {
[
&mut self.profit_over_1000,
&mut self.profit_500_to_1000,
&mut self.profit_300_to_500,
&mut self.profit_200_to_300,
&mut self.profit_100_to_200,
&mut self.profit_90_to_100,
&mut self.profit_80_to_90,
&mut self.profit_70_to_80,
&mut self.profit_60_to_70,
&mut self.profit_50_to_60,
&mut self.profit_40_to_50,
&mut self.profit_30_to_40,
&mut self.profit_20_to_30,
&mut self.profit_10_to_20,
&mut self.profit_0_to_10,
&mut self.loss_0_to_10,
&mut self.loss_10_to_20,
&mut self.loss_20_to_30,
&mut self.loss_30_to_40,
&mut self.loss_40_to_50,
&mut self.loss_50_to_60,
&mut self.loss_60_to_70,
&mut self.loss_70_to_80,
&mut self.loss_80_to_90,
&mut self.loss_90_to_100,
&mut self.over_1000pct_in_profit,
&mut self._500pct_to_1000pct_in_profit,
&mut self._300pct_to_500pct_in_profit,
&mut self._200pct_to_300pct_in_profit,
&mut self._100pct_to_200pct_in_profit,
&mut self._90pct_to_100pct_in_profit,
&mut self._80pct_to_90pct_in_profit,
&mut self._70pct_to_80pct_in_profit,
&mut self._60pct_to_70pct_in_profit,
&mut self._50pct_to_60pct_in_profit,
&mut self._40pct_to_50pct_in_profit,
&mut self._30pct_to_40pct_in_profit,
&mut self._20pct_to_30pct_in_profit,
&mut self._10pct_to_20pct_in_profit,
&mut self._0pct_to_10pct_in_profit,
&mut self._0pct_to_10pct_in_loss,
&mut self._10pct_to_20pct_in_loss,
&mut self._20pct_to_30pct_in_loss,
&mut self._30pct_to_40pct_in_loss,
&mut self._40pct_to_50pct_in_loss,
&mut self._50pct_to_60pct_in_loss,
&mut self._60pct_to_70pct_in_loss,
&mut self._70pct_to_80pct_in_loss,
&mut self._80pct_to_90pct_in_loss,
&mut self._90pct_to_100pct_in_loss,
]
}
}

View File

@@ -43,7 +43,7 @@ impl Vecs {
)?;
// Compute blocks before next adjustment
self.blocks_before_next_adjustment
self.blocks_before_next
.height
.compute_transform(
starting_indexes.height,
@@ -53,9 +53,9 @@ impl Vecs {
)?;
// Compute days before next adjustment
self.days_before_next_adjustment.height.compute_transform(
self.days_before_next.height.compute_transform(
starting_indexes.height,
&self.blocks_before_next_adjustment.height,
&self.blocks_before_next.height,
|(h, blocks, ..)| (h, (*blocks as f32 / TARGET_BLOCKS_PER_DAY_F32).into()),
exit,
)?;

View File

@@ -33,13 +33,13 @@ impl Vecs {
indexes,
)?,
epoch: ComputedPerBlock::forced_import(db, "difficulty_epoch", version, indexes)?,
blocks_before_next_adjustment: ComputedPerBlock::forced_import(
blocks_before_next: ComputedPerBlock::forced_import(
db,
"blocks_before_next_difficulty_adjustment",
version + v2,
indexes,
)?,
days_before_next_adjustment: ComputedPerBlock::forced_import(
days_before_next: ComputedPerBlock::forced_import(
db,
"days_before_next_difficulty_adjustment",
version + v2,

View File

@@ -9,6 +9,6 @@ pub struct Vecs<M: StorageMode = Rw> {
pub as_hash: ComputedPerBlock<StoredF64, M>,
pub adjustment: PercentPerBlock<BasisPointsSigned32, M>,
pub epoch: ComputedPerBlock<Epoch, M>,
pub blocks_before_next_adjustment: ComputedPerBlock<StoredU32, M>,
pub days_before_next_adjustment: ComputedPerBlock<StoredF32, M>,
pub blocks_before_next: ComputedPerBlock<StoredU32, M>,
pub days_before_next: ComputedPerBlock<StoredF32, M>,
}

View File

@@ -20,16 +20,16 @@ impl Vecs {
exit,
)?;
self.blocks_before_next_halving.height.compute_transform(
self.blocks_before_next.height.compute_transform(
starting_indexes.height,
&indexes.height.identity,
|(h, ..)| (h, StoredU32::from(h.left_before_next_halving())),
exit,
)?;
self.days_before_next_halving.height.compute_transform(
self.days_before_next.height.compute_transform(
starting_indexes.height,
&self.blocks_before_next_halving.height,
&self.blocks_before_next.height,
|(h, blocks, ..)| (h, (*blocks as f32 / TARGET_BLOCKS_PER_DAY_F32).into()),
exit,
)?;

View File

@@ -15,13 +15,13 @@ impl Vecs {
Ok(Self {
epoch: ComputedPerBlock::forced_import(db, "halving_epoch", version, indexes)?,
blocks_before_next_halving: ComputedPerBlock::forced_import(
blocks_before_next: ComputedPerBlock::forced_import(
db,
"blocks_before_next_halving",
version + v2,
indexes,
)?,
days_before_next_halving: ComputedPerBlock::forced_import(
days_before_next: ComputedPerBlock::forced_import(
db,
"days_before_next_halving",
version + v2,

View File

@@ -6,6 +6,6 @@ use crate::internal::ComputedPerBlock;
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
pub epoch: ComputedPerBlock<Halving, M>,
pub blocks_before_next_halving: ComputedPerBlock<StoredU32, M>,
pub days_before_next_halving: ComputedPerBlock<StoredF32, M>,
pub blocks_before_next: ComputedPerBlock<StoredU32, M>,
pub days_before_next: ComputedPerBlock<StoredF32, M>,
}

View File

@@ -23,7 +23,7 @@ impl Vecs {
self.hodl_bank.compute_cumulative_transformed_binary(
starting_indexes.height,
&prices.price.usd.height,
&prices.spot.usd.height,
&self.vocdd_median_1y,
|price, median| StoredF64::from(f64::from(price) - f64::from(median)),
exit,
@@ -31,7 +31,7 @@ impl Vecs {
self.value.height.compute_divide(
starting_indexes.height,
&prices.price.usd.height,
&prices.spot.usd.height,
&self.hodl_bank,
exit,
)?;

View File

@@ -27,7 +27,7 @@ impl Vecs {
.compute(starting_indexes.height, &window_starts, exit, |vec| {
vec.compute_multiply(
starting_indexes.height,
&prices.price.usd.height,
&prices.spot.usd.height,
&coinblocks_destroyed.raw.height,
exit,
)?;
@@ -38,7 +38,7 @@ impl Vecs {
.compute(starting_indexes.height, &window_starts, exit, |vec| {
vec.compute_multiply(
starting_indexes.height,
&prices.price.usd.height,
&prices.spot.usd.height,
&activity.coinblocks_created.raw.height,
exit,
)?;
@@ -49,7 +49,7 @@ impl Vecs {
.compute(starting_indexes.height, &window_starts, exit, |vec| {
vec.compute_multiply(
starting_indexes.height,
&prices.price.usd.height,
&prices.spot.usd.height,
&activity.coinblocks_stored.raw.height,
exit,
)?;
@@ -63,7 +63,7 @@ impl Vecs {
.compute(starting_indexes.height, &window_starts, exit, |vec| {
vec.compute_transform3(
starting_indexes.height,
&prices.price.usd.height,
&prices.spot.usd.height,
&coindays_destroyed.raw.height,
circulating_supply,
|(i, price, cdd, supply, _): (_, Dollars, StoredF64, Bitcoin, _)| {

View File

@@ -144,42 +144,42 @@ pub(crate) fn process_blocks(
let first_p2a_vec = indexer
.vecs
.addresses
.first_p2aaddressindex
.p2a.first_index
.collect_range_at(start_usize, end_usize);
let first_p2pk33_vec = indexer
.vecs
.addresses
.first_p2pk33addressindex
.p2pk33.first_index
.collect_range_at(start_usize, end_usize);
let first_p2pk65_vec = indexer
.vecs
.addresses
.first_p2pk65addressindex
.p2pk65.first_index
.collect_range_at(start_usize, end_usize);
let first_p2pkh_vec = indexer
.vecs
.addresses
.first_p2pkhaddressindex
.p2pkh.first_index
.collect_range_at(start_usize, end_usize);
let first_p2sh_vec = indexer
.vecs
.addresses
.first_p2shaddressindex
.p2sh.first_index
.collect_range_at(start_usize, end_usize);
let first_p2tr_vec = indexer
.vecs
.addresses
.first_p2traddressindex
.p2tr.first_index
.collect_range_at(start_usize, end_usize);
let first_p2wpkh_vec = indexer
.vecs
.addresses
.first_p2wpkhaddressindex
.p2wpkh.first_index
.collect_range_at(start_usize, end_usize);
let first_p2wsh_vec = indexer
.vecs
.addresses
.first_p2wshaddressindex
.p2wsh.first_index
.collect_range_at(start_usize, end_usize);
// Track running totals - recover from previous height if resuming

View File

@@ -182,7 +182,7 @@ impl RealizedMinimal {
self.nupl.bps.height.compute_transform2(
starting_indexes.height,
&prices.price.cents.height,
&prices.spot.cents.height,
&self.price.cents.height,
|(i, price, realized_price, ..)| {
let p = price.as_u128();

View File

@@ -10,11 +10,11 @@ use crate::distribution::metrics::{ImportConfig, UnrealizedCore};
/// Extended relative metrics for own total unrealized PnL (extended only).
#[derive(Traversable)]
pub struct RelativeExtendedOwnPnl<M: StorageMode = Rw> {
#[traversable(wrap = "unrealized/profit", rename = "rel_to_own_gross_pnl")]
#[traversable(wrap = "unrealized/profit", rename = "rel_to_own_gross")]
pub unrealized_profit_rel_to_own_gross_pnl: PercentPerBlock<BasisPoints16, M>,
#[traversable(wrap = "unrealized/loss", rename = "rel_to_own_gross_pnl")]
#[traversable(wrap = "unrealized/loss", rename = "rel_to_own_gross")]
pub unrealized_loss_rel_to_own_gross_pnl: PercentPerBlock<BasisPoints16, M>,
#[traversable(wrap = "unrealized/net_pnl", rename = "rel_to_own_gross_pnl")]
#[traversable(wrap = "unrealized/net_pnl", rename = "rel_to_own_gross")]
pub net_unrealized_pnl_rel_to_own_gross_pnl: PercentPerBlock<BasisPointsSigned32, M>,
}

View File

@@ -10,11 +10,11 @@ use crate::distribution::metrics::{ImportConfig, SupplyCore};
/// Relative-to-all metrics (not present for the "all" cohort itself).
#[derive(Traversable)]
pub struct RelativeToAll<M: StorageMode = Rw> {
#[traversable(wrap = "supply", rename = "rel_to_circulating_supply")]
#[traversable(wrap = "supply", rename = "rel_to_circulating")]
pub supply_rel_to_circulating_supply: PercentPerBlock<BasisPoints16, M>,
#[traversable(wrap = "supply/in_profit", rename = "rel_to_circulating_supply")]
#[traversable(wrap = "supply/in_profit", rename = "rel_to_circulating")]
pub supply_in_profit_rel_to_circulating_supply: PercentPerBlock<BasisPoints16, M>,
#[traversable(wrap = "supply/in_loss", rename = "rel_to_circulating_supply")]
#[traversable(wrap = "supply/in_loss", rename = "rel_to_circulating")]
pub supply_in_loss_rel_to_circulating_supply: PercentPerBlock<BasisPoints16, M>,
}

View File

@@ -136,7 +136,7 @@ impl UnrealizedFull {
starting_indexes.height,
&self.inner.investor_cap_in_loss_raw,
&self.inner.invested_capital_in_loss_raw,
&prices.price.cents.height,
&prices.spot.cents.height,
|(h, investor_cap, invested_cap, spot, ..)| {
if invested_cap.inner() == 0 {
return (h, Cents::ZERO);
@@ -152,7 +152,7 @@ impl UnrealizedFull {
starting_indexes.height,
&self.inner.investor_cap_in_profit_raw,
&self.inner.invested_capital_in_profit_raw,
&prices.price.cents.height,
&prices.spot.cents.height,
|(h, investor_cap, invested_cap, spot, ..)| {
if invested_cap.inner() == 0 {
return (h, Cents::ZERO);

View File

@@ -3,11 +3,10 @@ mod block;
pub mod cohorts;
pub mod compute;
pub mod metrics;
mod range_map;
mod state;
mod vecs;
pub use range_map::RangeMap;
pub use brk_types::RangeMap;
pub use vecs::Vecs;
pub const DB_NAME: &str = "distribution";

View File

@@ -45,8 +45,10 @@ pub struct AddressMetricsVecs<M: StorageMode = Rw> {
pub total: TotalAddrCountVecs<M>,
pub new: NewAddrCountVecs<M>,
pub delta: DeltaVecs<M>,
#[traversable(wrap = "indexes", rename = "funded")]
pub funded_index:
LazyVecFrom1<FundedAddressIndex, FundedAddressIndex, FundedAddressIndex, FundedAddressData>,
#[traversable(wrap = "indexes", rename = "empty")]
pub empty_index:
LazyVecFrom1<EmptyAddressIndex, EmptyAddressIndex, EmptyAddressIndex, EmptyAddressData>,
}
@@ -59,9 +61,13 @@ pub struct Vecs<M: StorageMode = Rw> {
pub states_path: PathBuf,
pub supply_state: M::Stored<BytesVec<Height, SupplyState>>,
#[traversable(wrap = "addresses", rename = "indexes")]
pub any_address_indexes: AnyAddressIndexesVecs<M>,
#[traversable(wrap = "addresses", rename = "data")]
pub addresses_data: AddressesDataVecs<M>,
#[traversable(wrap = "cohorts", rename = "utxo")]
pub utxo_cohorts: UTXOCohorts<M>,
#[traversable(wrap = "cohorts", rename = "address")]
pub address_cohorts: AddressCohorts<M>,
pub coinblocks_destroyed: ComputedPerBlockCumulative<StoredF64, M>,
pub addresses: AddressMetricsVecs<M>,
@@ -213,7 +219,7 @@ impl Vecs {
exit: &Exit,
) -> Result<()> {
let cache_target_len = prices
.price
.spot
.cents
.height
.len()
@@ -225,7 +231,7 @@ impl Vecs {
self.cached_price_range_max.truncate(cache_target_len);
} else if cache_target_len > cache_current_len {
let new_prices = prices
.price
.spot
.cents
.height
.collect_range_at(cache_current_len, cache_target_len);

View File

@@ -94,7 +94,7 @@ impl Vecs {
identity: LazyVecFrom1::init(
"p2pk33addressindex",
version,
indexer.vecs.addresses.p2pk33bytes.read_only_boxed_clone(),
indexer.vecs.addresses.p2pk33.bytes.read_only_boxed_clone(),
|index, _| index,
),
},
@@ -102,7 +102,7 @@ impl Vecs {
identity: LazyVecFrom1::init(
"p2pk65addressindex",
version,
indexer.vecs.addresses.p2pk65bytes.read_only_boxed_clone(),
indexer.vecs.addresses.p2pk65.bytes.read_only_boxed_clone(),
|index, _| index,
),
},
@@ -110,7 +110,7 @@ impl Vecs {
identity: LazyVecFrom1::init(
"p2pkhaddressindex",
version,
indexer.vecs.addresses.p2pkhbytes.read_only_boxed_clone(),
indexer.vecs.addresses.p2pkh.bytes.read_only_boxed_clone(),
|index, _| index,
),
},
@@ -118,7 +118,7 @@ impl Vecs {
identity: LazyVecFrom1::init(
"p2shaddressindex",
version,
indexer.vecs.addresses.p2shbytes.read_only_boxed_clone(),
indexer.vecs.addresses.p2sh.bytes.read_only_boxed_clone(),
|index, _| index,
),
},
@@ -126,7 +126,7 @@ impl Vecs {
identity: LazyVecFrom1::init(
"p2traddressindex",
version,
indexer.vecs.addresses.p2trbytes.read_only_boxed_clone(),
indexer.vecs.addresses.p2tr.bytes.read_only_boxed_clone(),
|index, _| index,
),
},
@@ -134,7 +134,7 @@ impl Vecs {
identity: LazyVecFrom1::init(
"p2wpkhaddressindex",
version,
indexer.vecs.addresses.p2wpkhbytes.read_only_boxed_clone(),
indexer.vecs.addresses.p2wpkh.bytes.read_only_boxed_clone(),
|index, _| index,
),
},
@@ -142,7 +142,7 @@ impl Vecs {
identity: LazyVecFrom1::init(
"p2wshaddressindex",
version,
indexer.vecs.addresses.p2wshbytes.read_only_boxed_clone(),
indexer.vecs.addresses.p2wsh.bytes.read_only_boxed_clone(),
|index, _| index,
),
},
@@ -150,7 +150,7 @@ impl Vecs {
identity: LazyVecFrom1::init(
"p2aaddressindex",
version,
indexer.vecs.addresses.p2abytes.read_only_boxed_clone(),
indexer.vecs.addresses.p2a.bytes.read_only_boxed_clone(),
|index, _| index,
),
},
@@ -158,7 +158,7 @@ impl Vecs {
identity: LazyVecFrom1::init(
"p2msoutputindex",
version,
indexer.vecs.scripts.p2ms_to_txindex.read_only_boxed_clone(),
indexer.vecs.scripts.p2ms.to_txindex.read_only_boxed_clone(),
|index, _| index,
),
},
@@ -169,7 +169,7 @@ impl Vecs {
indexer
.vecs
.scripts
.empty_to_txindex
.empty.to_txindex
.read_only_boxed_clone(),
|index, _| index,
),
@@ -181,7 +181,7 @@ impl Vecs {
indexer
.vecs
.scripts
.unknown_to_txindex
.unknown.to_txindex
.read_only_boxed_clone(),
|index, _| index,
),
@@ -193,7 +193,7 @@ impl Vecs {
indexer
.vecs
.scripts
.opreturn_to_txindex
.opreturn.to_txindex
.read_only_boxed_clone(),
|index, _| index,
),

View File

@@ -59,7 +59,7 @@ impl Amount<Height> {
self.cents.compute_binary::<Sats, Cents, SatsToCents>(
max_from,
&self.sats,
&prices.price.cents.height,
&prices.spot.cents.height,
exit,
)?;
Ok(())

View File

@@ -68,7 +68,7 @@ impl AmountPerBlock {
self.cents.compute_binary::<Sats, Cents, SatsToCents>(
max_from,
&self.sats.height,
&prices.price.cents.height,
&prices.spot.cents.height,
exit,
)?;
Ok(())

View File

@@ -62,7 +62,7 @@ impl AmountPerBlockCumulative {
self.base.cents.compute_binary::<Sats, Cents, SatsToCents>(
max_from,
&self.base.sats.height,
&prices.price.cents.height,
&prices.spot.cents.height,
exit,
)?;

View File

@@ -60,7 +60,7 @@ impl AmountPerBlockCumulativeSum {
.compute_binary::<Sats, Cents, SatsToCents>(
max_from,
&self.base.sats.height,
&prices.price.cents.height,
&prices.spot.cents.height,
exit,
)?;

View File

@@ -61,7 +61,7 @@ impl AmountPerBlockFull {
.compute_binary::<Sats, Cents, SatsToCents>(
max_from,
&self.base.sats.height,
&prices.price.cents.height,
&prices.spot.cents.height,
exit,
)?;

View File

@@ -1,7 +1,7 @@
use brk_traversable::Traversable;
use brk_types::{
Day1, Day3, Epoch, FromCoarserIndex, Halving, Height, Hour1, Hour4, Hour12,
Minute10, Minute30, Month1, Month3, Month6, Version, Week1, Year1, Year10,
Day1, Day3, Epoch, FromCoarserIndex, Halving, Height, Hour1, Hour4, Hour12, Minute10, Minute30,
Month1, Month3, Month6, Version, Week1, Year1, Year10,
};
use derive_more::{Deref, DerefMut};
use schemars::JsonSchema;
@@ -70,7 +70,7 @@ where
};
}
fn for_each_range_from_coarser<
fn for_each_range<
I: VecIndex,
O: VecValue,
S1I: VecIndex + FromCoarserIndex<I>,
@@ -103,7 +103,7 @@ where
version,
height_source.clone(),
indexes.$idx.identity.read_only_boxed_clone(),
for_each_range_from_coarser,
for_each_range,
)
};
}

View File

@@ -71,7 +71,7 @@ impl PriceWithRatioPerBlock {
F: FnMut(&mut EagerVec<PcoVec<Height, Cents>>) -> Result<()>,
{
compute_price(&mut self.cents.height)?;
self.compute_ratio(starting_indexes, &prices.price.cents.height, exit)
self.compute_ratio(starting_indexes, &prices.spot.cents.height, exit)
}
}
@@ -104,7 +104,7 @@ impl PriceWithRatioExtendedPerBlock {
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
let close_price = &prices.price.cents.height;
let close_price = &prices.spot.cents.height;
self.base.compute_ratio(starting_indexes, close_price, exit)?;
self.percentiles.compute(
starting_indexes,

View File

@@ -13,17 +13,17 @@ impl Vecs {
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
self.price.cents.height.compute_all_time_high(
self.high.cents.height.compute_all_time_high(
starting_indexes.height,
&prices.price.cents.height,
&prices.spot.cents.height,
exit,
)?;
let mut ath_ts: Option<Timestamp> = None;
self.days_since.height.compute_transform3(
starting_indexes.height,
&self.price.cents.height,
&prices.price.cents.height,
&self.high.cents.height,
&prices.spot.cents.height,
&blocks.time.timestamp_monotonic,
|(i, ath, price, ts, slf)| {
if ath_ts.is_none() {
@@ -68,8 +68,8 @@ impl Vecs {
self.drawdown.compute_drawdown(
starting_indexes.height,
&prices.price.cents.height,
&self.price.cents.height,
&prices.spot.cents.height,
&self.high.cents.height,
exit,
)?;

View File

@@ -18,7 +18,7 @@ impl Vecs {
) -> Result<Self> {
let v = version + VERSION;
let price = Price::forced_import(db, "price_ath", v, indexes)?;
let high = Price::forced_import(db, "price_ath", v, indexes)?;
let max_days_between =
ComputedPerBlock::forced_import(db, "max_days_between_price_ath", v, indexes)?;
@@ -41,7 +41,7 @@ impl Vecs {
let drawdown = PercentPerBlock::forced_import(db, "price_drawdown", v, indexes)?;
Ok(Self {
price,
high,
drawdown,
days_since,
years_since,

View File

@@ -6,7 +6,7 @@ use crate::internal::{ComputedPerBlock, DerivedResolutions, PercentPerBlock, Pri
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
pub price: Price<ComputedPerBlock<Cents, M>>,
pub high: Price<ComputedPerBlock<Cents, M>>,
pub drawdown: PercentPerBlock<BasisPointsSigned16, M>,
pub days_since: ComputedPerBlock<StoredF32, M>,
pub years_since: DerivedResolutions<StoredF32, StoredF32>,

View File

@@ -98,7 +98,7 @@ impl Vecs {
{
returns.compute_binary::<Cents, Cents, RatioDiffCentsBps32>(
starting_indexes.height,
&prices.price.cents.height,
&prices.spot.cents.height,
&average_price.cents.height,
exit,
)?;
@@ -155,7 +155,7 @@ impl Vecs {
{
returns.compute_binary::<Cents, Cents, RatioDiffCentsBps32>(
starting_indexes.height,
&prices.price.cents.height,
&prices.spot.cents.height,
&lookback_price.cents.height,
exit,
)?;
@@ -253,7 +253,7 @@ impl Vecs {
{
returns.compute_binary::<Cents, Cents, RatioDiffCentsBps32>(
starting_indexes.height,
&prices.price.cents.height,
&prices.spot.cents.height,
&average_price.cents.height,
exit,
)?;

View File

@@ -13,7 +13,7 @@ impl Vecs {
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
let price = &prices.price.cents.height;
let price = &prices.spot.cents.height;
for (price_lookback, days) in self.price_lookback.iter_mut_with_days() {
let window_starts = blocks.lookback.start_vec(days as usize);

View File

@@ -13,7 +13,7 @@ impl Vecs {
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
let close = &prices.price.cents.height;
let close = &prices.spot.cents.height;
for (sma, period) in [
(&mut self.sma._1w, 7),

View File

@@ -13,7 +13,7 @@ impl Vecs {
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
let price = &prices.price.cents.height;
let price = &prices.spot.cents.height;
for (min_vec, max_vec, starts) in [
(

View File

@@ -22,7 +22,7 @@ impl Vecs {
{
returns.compute_binary::<Dollars, Dollars, RatioDiffDollarsBps32>(
starting_indexes.height,
&prices.price.usd.height,
&prices.spot.usd.height,
&lookback_price.usd.height,
exit,
)?;

View File

@@ -24,7 +24,7 @@ impl Vecs {
) -> Result<()> {
// Stochastic Oscillator: K = (close - low_2w) / (high_2w - low_2w), stored as ratio (0-1)
{
let price = &prices.price.usd.height;
let price = &prices.spot.usd.height;
self.stoch_k.bps.height.compute_transform3(
starting_indexes.height,
price,

View File

@@ -16,7 +16,7 @@ pub(super) fn compute(
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
let close = &prices.price.usd.height;
let close = &prices.spot.usd.height;
let ws_fast = blocks.lookback.start_vec(fast_days);
let ws_slow = blocks.lookback.start_vec(slow_days);
let ws_signal = blocks.lookback.start_vec(signal_days);

View File

@@ -92,7 +92,7 @@ impl Vecs {
)?;
self.subsidy.compute(prices, starting_indexes.height, exit)?;
self.unclaimed_rewards.compute(
self.unclaimed.compute(
starting_indexes.height,
&window_starts,
prices,

View File

@@ -23,7 +23,7 @@ impl Vecs {
)?,
subsidy: AmountPerBlockCumulative::forced_import(db, "subsidy", version, indexes)?,
fees: AmountPerBlockFull::forced_import(db, "fees", version, indexes)?,
unclaimed_rewards: AmountPerBlockCumulativeSum::forced_import(
unclaimed: AmountPerBlockCumulativeSum::forced_import(
db,
"unclaimed_rewards",
version,

View File

@@ -12,13 +12,17 @@ pub struct Vecs<M: StorageMode = Rw> {
pub coinbase: AmountPerBlockCumulativeSum<M>,
pub subsidy: AmountPerBlockCumulative<M>,
pub fees: AmountPerBlockFull<M>,
pub unclaimed_rewards: AmountPerBlockCumulativeSum<M>,
pub unclaimed: AmountPerBlockCumulativeSum<M>,
#[traversable(wrap = "fees", rename = "dominance")]
pub fee_dominance: PercentPerBlock<BasisPoints16, M>,
#[traversable(rename = "fee_dominance")]
#[traversable(wrap = "fees", rename = "dominance")]
pub fee_dominance_rolling: PercentRollingWindows<BasisPoints16, M>,
#[traversable(wrap = "subsidy", rename = "dominance")]
pub subsidy_dominance: PercentPerBlock<BasisPoints16, M>,
#[traversable(rename = "subsidy_dominance")]
#[traversable(wrap = "subsidy", rename = "dominance")]
pub subsidy_dominance_rolling: PercentRollingWindows<BasisPoints16, M>,
#[traversable(wrap = "subsidy", rename = "sma_1y")]
pub subsidy_sma_1y: FiatPerBlock<Cents, M>,
#[traversable(wrap = "fees", rename = "ratio_multiple")]
pub fee_ratio_multiple: RatioRollingWindows<BasisPoints32, M>,
}

View File

@@ -31,7 +31,7 @@ impl Vecs {
)
})?;
self.utxo_count.height.compute_transform3(
self.unspent.height.compute_transform3(
starting_indexes.height,
&self.total.full.cumulative,
&inputs_count.full.cumulative,

View File

@@ -21,7 +21,7 @@ impl Vecs {
version,
indexes,
)?,
utxo_count: ComputedPerBlock::forced_import(
unspent: ComputedPerBlock::forced_import(
db,
"exact_utxo_count",
version,

View File

@@ -7,5 +7,5 @@ use crate::internal::{ComputedPerBlock, ComputedPerBlockAggregated};
#[derive(Traversable)]
pub struct Vecs<M: StorageMode = Rw> {
pub total: ComputedPerBlockAggregated<StoredU64, M>,
pub utxo_count: ComputedPerBlock<StoredU64, M>,
pub unspent: ComputedPerBlock<StoredU64, M>,
}

View File

@@ -120,14 +120,14 @@ impl Vecs {
let first_txoutindex = indexer.vecs.transactions.first_txoutindex.reader();
let outputtype = indexer.vecs.outputs.outputtype.reader();
let typeindex = indexer.vecs.outputs.typeindex.reader();
let p2pk65 = indexer.vecs.addresses.p2pk65bytes.reader();
let p2pk33 = indexer.vecs.addresses.p2pk33bytes.reader();
let p2pkh = indexer.vecs.addresses.p2pkhbytes.reader();
let p2sh = indexer.vecs.addresses.p2shbytes.reader();
let p2wpkh = indexer.vecs.addresses.p2wpkhbytes.reader();
let p2wsh = indexer.vecs.addresses.p2wshbytes.reader();
let p2tr = indexer.vecs.addresses.p2trbytes.reader();
let p2a = indexer.vecs.addresses.p2abytes.reader();
let p2pk65 = indexer.vecs.addresses.p2pk65.bytes.reader();
let p2pk33 = indexer.vecs.addresses.p2pk33.bytes.reader();
let p2pkh = indexer.vecs.addresses.p2pkh.bytes.reader();
let p2sh = indexer.vecs.addresses.p2sh.bytes.reader();
let p2wpkh = indexer.vecs.addresses.p2wpkh.bytes.reader();
let p2wsh = indexer.vecs.addresses.p2wsh.bytes.reader();
let p2tr = indexer.vecs.addresses.p2tr.bytes.reader();
let p2a = indexer.vecs.addresses.p2a.bytes.reader();
let unknown = self.pools.get_unknown();

View File

@@ -73,7 +73,7 @@ impl Vecs {
// Reorg: truncate to starting_indexes
let truncate_to = self
.price
.spot
.cents
.height
.len()
@@ -101,7 +101,7 @@ impl Vecs {
let config = Config::default();
let committed = self.spot.cents.height.len();
let prev_cents = self
.price
.spot
.cents
.height
.collect_one_at(committed - 1)
@@ -234,7 +234,7 @@ impl<M: StorageMode> Vecs<M> {
let config = Config::default();
let height = indexer.vecs.blocks.timestamp.len();
let last_cents = self
.price
.spot
.cents
.height
.collect_one_at(self.spot.cents.height.len() - 1)

View File

@@ -20,8 +20,8 @@ impl Vecs {
.compute(starting_indexes.height, &window_starts, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.addresses.first_p2aaddressindex,
&indexer.vecs.addresses.p2abytes,
&indexer.vecs.addresses.p2a.first_index,
&indexer.vecs.addresses.p2a.bytes,
exit,
)?)
})?;
@@ -30,8 +30,8 @@ impl Vecs {
.compute(starting_indexes.height, &window_starts, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.scripts.first_p2msoutputindex,
&indexer.vecs.scripts.p2ms_to_txindex,
&indexer.vecs.scripts.p2ms.first_index,
&indexer.vecs.scripts.p2ms.to_txindex,
exit,
)?)
})?;
@@ -40,8 +40,8 @@ impl Vecs {
.compute(starting_indexes.height, &window_starts, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.addresses.first_p2pk33addressindex,
&indexer.vecs.addresses.p2pk33bytes,
&indexer.vecs.addresses.p2pk33.first_index,
&indexer.vecs.addresses.p2pk33.bytes,
exit,
)?)
})?;
@@ -50,8 +50,8 @@ impl Vecs {
.compute(starting_indexes.height, &window_starts, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.addresses.first_p2pk65addressindex,
&indexer.vecs.addresses.p2pk65bytes,
&indexer.vecs.addresses.p2pk65.first_index,
&indexer.vecs.addresses.p2pk65.bytes,
exit,
)?)
})?;
@@ -60,8 +60,8 @@ impl Vecs {
.compute(starting_indexes.height, &window_starts, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.addresses.first_p2pkhaddressindex,
&indexer.vecs.addresses.p2pkhbytes,
&indexer.vecs.addresses.p2pkh.first_index,
&indexer.vecs.addresses.p2pkh.bytes,
exit,
)?)
})?;
@@ -70,8 +70,8 @@ impl Vecs {
.compute(starting_indexes.height, &window_starts, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.addresses.first_p2shaddressindex,
&indexer.vecs.addresses.p2shbytes,
&indexer.vecs.addresses.p2sh.first_index,
&indexer.vecs.addresses.p2sh.bytes,
exit,
)?)
})?;
@@ -80,8 +80,8 @@ impl Vecs {
.compute(starting_indexes.height, &window_starts, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.addresses.first_p2traddressindex,
&indexer.vecs.addresses.p2trbytes,
&indexer.vecs.addresses.p2tr.first_index,
&indexer.vecs.addresses.p2tr.bytes,
exit,
)?)
})?;
@@ -90,8 +90,8 @@ impl Vecs {
.compute(starting_indexes.height, &window_starts, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.addresses.first_p2wpkhaddressindex,
&indexer.vecs.addresses.p2wpkhbytes,
&indexer.vecs.addresses.p2wpkh.first_index,
&indexer.vecs.addresses.p2wpkh.bytes,
exit,
)?)
})?;
@@ -100,8 +100,8 @@ impl Vecs {
.compute(starting_indexes.height, &window_starts, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.addresses.first_p2wshaddressindex,
&indexer.vecs.addresses.p2wshbytes,
&indexer.vecs.addresses.p2wsh.first_index,
&indexer.vecs.addresses.p2wsh.bytes,
exit,
)?)
})?;
@@ -110,8 +110,8 @@ impl Vecs {
.compute(starting_indexes.height, &window_starts, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.scripts.first_opreturnindex,
&indexer.vecs.scripts.opreturn_to_txindex,
&indexer.vecs.scripts.opreturn.first_index,
&indexer.vecs.scripts.opreturn.to_txindex,
exit,
)?)
})?;
@@ -120,8 +120,8 @@ impl Vecs {
.compute(starting_indexes.height, &window_starts, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.scripts.first_unknownoutputindex,
&indexer.vecs.scripts.unknown_to_txindex,
&indexer.vecs.scripts.unknown.first_index,
&indexer.vecs.scripts.unknown.to_txindex,
exit,
)?)
})?;
@@ -130,8 +130,8 @@ impl Vecs {
.compute(starting_indexes.height, &window_starts, exit, |v| {
Ok(v.compute_count_from_indexes(
starting_indexes.height,
&indexer.vecs.scripts.first_emptyoutputindex,
&indexer.vecs.scripts.empty_to_txindex,
&indexer.vecs.scripts.empty.first_index,
&indexer.vecs.scripts.empty.to_txindex,
exit,
)?)
})?;

View File

@@ -60,7 +60,7 @@ impl Vecs {
// 2. Compute unspendable supply = opreturn + unclaimed_rewards + genesis (at height 0)
// Get reference to opreturn height vec for computing unspendable
let opreturn_height = &self.opreturn.base.sats.height;
let unclaimed_height = &mining.rewards.unclaimed_rewards.base.sats.height;
let unclaimed_height = &mining.rewards.unclaimed.base.sats.height;
self.unspendable.compute(
starting_indexes.height,

View File

@@ -58,13 +58,13 @@ impl Vecs {
)?;
// Annualized volume: rolling 1y sum of per-block sent volume
self.annualized_volume.sats.height.compute_rolling_sum(
self.annualized.sats.height.compute_rolling_sum(
starting_indexes.height,
&blocks.lookback._1y,
&self.sent_sum.sats,
exit,
)?;
self.annualized_volume
self.annualized
.compute(prices, starting_indexes.height, exit)?;
self.tx_per_sec

View File

@@ -23,7 +23,7 @@ impl Vecs {
version,
indexes,
)?,
annualized_volume: AmountPerBlock::forced_import(
annualized: AmountPerBlock::forced_import(
db,
"annualized_volume",
version,

View File

@@ -8,7 +8,7 @@ use crate::internal::{AmountPerBlock, AmountPerBlockRolling, ComputedPerBlock};
pub struct Vecs<M: StorageMode = Rw> {
pub sent_sum: AmountPerBlockRolling<M>,
pub received_sum: AmountPerBlockRolling<M>,
pub annualized_volume: AmountPerBlock<M>,
pub annualized: AmountPerBlock<M>,
pub tx_per_sec: ComputedPerBlock<StoredF32, M>,
pub outputs_per_sec: ComputedPerBlock<StoredF32, M>,
pub inputs_per_sec: ComputedPerBlock<StoredF32, M>,

View File

@@ -117,7 +117,7 @@ pub enum Error {
#[error("{0}")]
OutOfRange(String),
#[error("Parse error: {0}")]
#[error("{0}")]
Parse(String),
#[error("Internal error: {0}")]
@@ -136,6 +136,9 @@ pub enum Error {
#[error("No metrics specified")]
NoMetrics,
#[error("No data available")]
NoData,
#[error("Request weight {requested} exceeds maximum {max}")]
WeightExceeded { requested: usize, max: usize },

View File

@@ -20,6 +20,8 @@ brk_store = { workspace = true }
brk_types = { workspace = true }
brk_traversable = { workspace = true }
fjall = { workspace = true }
schemars = { workspace = true }
serde = { workspace = true }
tracing = { workspace = true }
rayon = { workspace = true }
rlimit = "0.11.0"

View File

@@ -30,40 +30,40 @@ impl IndexesExt for Indexes {
.first_txoutindex
.checked_push(height, self.txoutindex)?;
vecs.scripts
.first_emptyoutputindex
.empty.first_index
.checked_push(height, self.emptyoutputindex)?;
vecs.scripts
.first_p2msoutputindex
.p2ms.first_index
.checked_push(height, self.p2msoutputindex)?;
vecs.scripts
.first_opreturnindex
.opreturn.first_index
.checked_push(height, self.opreturnindex)?;
vecs.addresses
.first_p2aaddressindex
.p2a.first_index
.checked_push(height, self.p2aaddressindex)?;
vecs.scripts
.first_unknownoutputindex
.unknown.first_index
.checked_push(height, self.unknownoutputindex)?;
vecs.addresses
.first_p2pk33addressindex
.p2pk33.first_index
.checked_push(height, self.p2pk33addressindex)?;
vecs.addresses
.first_p2pk65addressindex
.p2pk65.first_index
.checked_push(height, self.p2pk65addressindex)?;
vecs.addresses
.first_p2pkhaddressindex
.p2pkh.first_index
.checked_push(height, self.p2pkhaddressindex)?;
vecs.addresses
.first_p2shaddressindex
.p2sh.first_index
.checked_push(height, self.p2shaddressindex)?;
vecs.addresses
.first_p2traddressindex
.p2tr.first_index
.checked_push(height, self.p2traddressindex)?;
vecs.addresses
.first_p2wpkhaddressindex
.p2wpkh.first_index
.checked_push(height, self.p2wpkhaddressindex)?;
vecs.addresses
.first_p2wshaddressindex
.p2wsh.first_index
.checked_push(height, self.p2wshaddressindex)?;
Ok(())
@@ -98,68 +98,68 @@ impl IndexesExt for Indexes {
};
let emptyoutputindex = starting_index(
&vecs.scripts.first_emptyoutputindex,
&vecs.scripts.empty_to_txindex,
&vecs.scripts.empty.first_index,
&vecs.scripts.empty.to_txindex,
starting_height,
)?;
let p2msoutputindex = starting_index(
&vecs.scripts.first_p2msoutputindex,
&vecs.scripts.p2ms_to_txindex,
&vecs.scripts.p2ms.first_index,
&vecs.scripts.p2ms.to_txindex,
starting_height,
)?;
let opreturnindex = starting_index(
&vecs.scripts.first_opreturnindex,
&vecs.scripts.opreturn_to_txindex,
&vecs.scripts.opreturn.first_index,
&vecs.scripts.opreturn.to_txindex,
starting_height,
)?;
let p2pk33addressindex = starting_index(
&vecs.addresses.first_p2pk33addressindex,
&vecs.addresses.p2pk33bytes,
&vecs.addresses.p2pk33.first_index,
&vecs.addresses.p2pk33.bytes,
starting_height,
)?;
let p2pk65addressindex = starting_index(
&vecs.addresses.first_p2pk65addressindex,
&vecs.addresses.p2pk65bytes,
&vecs.addresses.p2pk65.first_index,
&vecs.addresses.p2pk65.bytes,
starting_height,
)?;
let p2pkhaddressindex = starting_index(
&vecs.addresses.first_p2pkhaddressindex,
&vecs.addresses.p2pkhbytes,
&vecs.addresses.p2pkh.first_index,
&vecs.addresses.p2pkh.bytes,
starting_height,
)?;
let p2shaddressindex = starting_index(
&vecs.addresses.first_p2shaddressindex,
&vecs.addresses.p2shbytes,
&vecs.addresses.p2sh.first_index,
&vecs.addresses.p2sh.bytes,
starting_height,
)?;
let p2traddressindex = starting_index(
&vecs.addresses.first_p2traddressindex,
&vecs.addresses.p2trbytes,
&vecs.addresses.p2tr.first_index,
&vecs.addresses.p2tr.bytes,
starting_height,
)?;
let p2wpkhaddressindex = starting_index(
&vecs.addresses.first_p2wpkhaddressindex,
&vecs.addresses.p2wpkhbytes,
&vecs.addresses.p2wpkh.first_index,
&vecs.addresses.p2wpkh.bytes,
starting_height,
)?;
let p2wshaddressindex = starting_index(
&vecs.addresses.first_p2wshaddressindex,
&vecs.addresses.p2wshbytes,
&vecs.addresses.p2wsh.first_index,
&vecs.addresses.p2wsh.bytes,
starting_height,
)?;
let p2aaddressindex = starting_index(
&vecs.addresses.first_p2aaddressindex,
&vecs.addresses.p2abytes,
&vecs.addresses.p2a.first_index,
&vecs.addresses.p2a.bytes,
starting_height,
)?;
@@ -182,8 +182,8 @@ impl IndexesExt for Indexes {
)?;
let unknownoutputindex = starting_index(
&vecs.scripts.first_unknownoutputindex,
&vecs.scripts.unknown_to_txindex,
&vecs.scripts.unknown.first_index,
&vecs.scripts.unknown.to_txindex,
starting_height,
)?;

View File

@@ -168,25 +168,25 @@ pub(super) fn finalize_outputs(
match outputtype {
OutputType::P2MS => {
scripts
.p2ms_to_txindex
.p2ms.to_txindex
.checked_push(indexes.p2msoutputindex, txindex)?;
indexes.p2msoutputindex.copy_then_increment()
}
OutputType::OpReturn => {
scripts
.opreturn_to_txindex
.opreturn.to_txindex
.checked_push(indexes.opreturnindex, txindex)?;
indexes.opreturnindex.copy_then_increment()
}
OutputType::Empty => {
scripts
.empty_to_txindex
.empty.to_txindex
.checked_push(indexes.emptyoutputindex, txindex)?;
indexes.emptyoutputindex.copy_then_increment()
}
OutputType::Unknown => {
scripts
.unknown_to_txindex
.unknown.to_txindex
.checked_push(indexes.unknownoutputindex, txindex)?;
indexes.unknownoutputindex.copy_then_increment()
}

View File

@@ -39,14 +39,14 @@ impl Readers {
txoutindex_to_outputtype: vecs.outputs.outputtype.reader(),
txoutindex_to_typeindex: vecs.outputs.typeindex.reader(),
addressbytes: AddressReaders {
p2pk65: vecs.addresses.p2pk65bytes.reader(),
p2pk33: vecs.addresses.p2pk33bytes.reader(),
p2pkh: vecs.addresses.p2pkhbytes.reader(),
p2sh: vecs.addresses.p2shbytes.reader(),
p2wpkh: vecs.addresses.p2wpkhbytes.reader(),
p2wsh: vecs.addresses.p2wshbytes.reader(),
p2tr: vecs.addresses.p2trbytes.reader(),
p2a: vecs.addresses.p2abytes.reader(),
p2pk65: vecs.addresses.p2pk65.bytes.reader(),
p2pk33: vecs.addresses.p2pk33.bytes.reader(),
p2pkh: vecs.addresses.p2pkh.bytes.reader(),
p2sh: vecs.addresses.p2sh.bytes.reader(),
p2wpkh: vecs.addresses.p2wpkh.bytes.reader(),
p2wsh: vecs.addresses.p2wsh.bytes.reader(),
p2tr: vecs.addresses.p2tr.bytes.reader(),
p2a: vecs.addresses.p2a.bytes.reader(),
},
}
}

View File

@@ -7,34 +7,32 @@ use brk_types::{
P2WSHBytes, TypeIndex, Version,
};
use rayon::prelude::*;
use schemars::JsonSchema;
use serde::Serialize;
use vecdb::{
AnyStoredVec, BytesVec, Database, ImportableVec, PcoVec, ReadableVec, Rw, Stamp, StorageMode,
VecIndex, WritableVec,
AnyStoredVec, BytesVec, BytesVecValue, Database, Formattable, ImportableVec, PcoVec,
PcoVecValue, ReadableVec, Rw, Stamp, StorageMode, VecIndex, WritableVec,
};
use crate::parallel_import;
use crate::readers::AddressReaders;
#[derive(Traversable)]
pub struct AddressTypeVecs<I: VecIndex + PcoVecValue + Formattable + Serialize + JsonSchema, B: BytesVecValue + Formattable + Serialize + JsonSchema, M: StorageMode = Rw> {
pub first_index: M::Stored<PcoVec<Height, I>>,
pub bytes: M::Stored<BytesVec<I, B>>,
}
#[derive(Traversable)]
pub struct AddressesVecs<M: StorageMode = Rw> {
// Height to first address index (per address type)
pub first_p2pk65addressindex: M::Stored<PcoVec<Height, P2PK65AddressIndex>>,
pub first_p2pk33addressindex: M::Stored<PcoVec<Height, P2PK33AddressIndex>>,
pub first_p2pkhaddressindex: M::Stored<PcoVec<Height, P2PKHAddressIndex>>,
pub first_p2shaddressindex: M::Stored<PcoVec<Height, P2SHAddressIndex>>,
pub first_p2wpkhaddressindex: M::Stored<PcoVec<Height, P2WPKHAddressIndex>>,
pub first_p2wshaddressindex: M::Stored<PcoVec<Height, P2WSHAddressIndex>>,
pub first_p2traddressindex: M::Stored<PcoVec<Height, P2TRAddressIndex>>,
pub first_p2aaddressindex: M::Stored<PcoVec<Height, P2AAddressIndex>>,
// Address index to bytes (per address type)
pub p2pk65bytes: M::Stored<BytesVec<P2PK65AddressIndex, P2PK65Bytes>>,
pub p2pk33bytes: M::Stored<BytesVec<P2PK33AddressIndex, P2PK33Bytes>>,
pub p2pkhbytes: M::Stored<BytesVec<P2PKHAddressIndex, P2PKHBytes>>,
pub p2shbytes: M::Stored<BytesVec<P2SHAddressIndex, P2SHBytes>>,
pub p2wpkhbytes: M::Stored<BytesVec<P2WPKHAddressIndex, P2WPKHBytes>>,
pub p2wshbytes: M::Stored<BytesVec<P2WSHAddressIndex, P2WSHBytes>>,
pub p2trbytes: M::Stored<BytesVec<P2TRAddressIndex, P2TRBytes>>,
pub p2abytes: M::Stored<BytesVec<P2AAddressIndex, P2ABytes>>,
pub p2pk65: AddressTypeVecs<P2PK65AddressIndex, P2PK65Bytes, M>,
pub p2pk33: AddressTypeVecs<P2PK33AddressIndex, P2PK33Bytes, M>,
pub p2pkh: AddressTypeVecs<P2PKHAddressIndex, P2PKHBytes, M>,
pub p2sh: AddressTypeVecs<P2SHAddressIndex, P2SHBytes, M>,
pub p2wpkh: AddressTypeVecs<P2WPKHAddressIndex, P2WPKHBytes, M>,
pub p2wsh: AddressTypeVecs<P2WSHAddressIndex, P2WSHBytes, M>,
pub p2tr: AddressTypeVecs<P2TRAddressIndex, P2TRBytes, M>,
pub p2a: AddressTypeVecs<P2AAddressIndex, P2ABytes, M>,
}
impl AddressesVecs {
@@ -75,22 +73,14 @@ impl AddressesVecs {
p2abytes = BytesVec::forced_import(db, "p2abytes", version),
};
Ok(Self {
first_p2pk65addressindex,
first_p2pk33addressindex,
first_p2pkhaddressindex,
first_p2shaddressindex,
first_p2wpkhaddressindex,
first_p2wshaddressindex,
first_p2traddressindex,
first_p2aaddressindex,
p2pk65bytes,
p2pk33bytes,
p2pkhbytes,
p2shbytes,
p2wpkhbytes,
p2wshbytes,
p2trbytes,
p2abytes,
p2pk65: AddressTypeVecs { first_index: first_p2pk65addressindex, bytes: p2pk65bytes },
p2pk33: AddressTypeVecs { first_index: first_p2pk33addressindex, bytes: p2pk33bytes },
p2pkh: AddressTypeVecs { first_index: first_p2pkhaddressindex, bytes: p2pkhbytes },
p2sh: AddressTypeVecs { first_index: first_p2shaddressindex, bytes: p2shbytes },
p2wpkh: AddressTypeVecs { first_index: first_p2wpkhaddressindex, bytes: p2wpkhbytes },
p2wsh: AddressTypeVecs { first_index: first_p2wshaddressindex, bytes: p2wshbytes },
p2tr: AddressTypeVecs { first_index: first_p2traddressindex, bytes: p2trbytes },
p2a: AddressTypeVecs { first_index: first_p2aaddressindex, bytes: p2abytes },
})
}
@@ -108,59 +98,59 @@ impl AddressesVecs {
p2aaddressindex: P2AAddressIndex,
stamp: Stamp,
) -> Result<()> {
self.first_p2pk65addressindex
self.p2pk65.first_index
.truncate_if_needed_with_stamp(height, stamp)?;
self.first_p2pk33addressindex
self.p2pk33.first_index
.truncate_if_needed_with_stamp(height, stamp)?;
self.first_p2pkhaddressindex
self.p2pkh.first_index
.truncate_if_needed_with_stamp(height, stamp)?;
self.first_p2shaddressindex
self.p2sh.first_index
.truncate_if_needed_with_stamp(height, stamp)?;
self.first_p2wpkhaddressindex
self.p2wpkh.first_index
.truncate_if_needed_with_stamp(height, stamp)?;
self.first_p2wshaddressindex
self.p2wsh.first_index
.truncate_if_needed_with_stamp(height, stamp)?;
self.first_p2traddressindex
self.p2tr.first_index
.truncate_if_needed_with_stamp(height, stamp)?;
self.first_p2aaddressindex
self.p2a.first_index
.truncate_if_needed_with_stamp(height, stamp)?;
self.p2pk65bytes
self.p2pk65.bytes
.truncate_if_needed_with_stamp(p2pk65addressindex, stamp)?;
self.p2pk33bytes
self.p2pk33.bytes
.truncate_if_needed_with_stamp(p2pk33addressindex, stamp)?;
self.p2pkhbytes
self.p2pkh.bytes
.truncate_if_needed_with_stamp(p2pkhaddressindex, stamp)?;
self.p2shbytes
self.p2sh.bytes
.truncate_if_needed_with_stamp(p2shaddressindex, stamp)?;
self.p2wpkhbytes
self.p2wpkh.bytes
.truncate_if_needed_with_stamp(p2wpkhaddressindex, stamp)?;
self.p2wshbytes
self.p2wsh.bytes
.truncate_if_needed_with_stamp(p2wshaddressindex, stamp)?;
self.p2trbytes
self.p2tr.bytes
.truncate_if_needed_with_stamp(p2traddressindex, stamp)?;
self.p2abytes
self.p2a.bytes
.truncate_if_needed_with_stamp(p2aaddressindex, stamp)?;
Ok(())
}
pub fn par_iter_mut_any(&mut self) -> impl ParallelIterator<Item = &mut dyn AnyStoredVec> {
[
&mut self.first_p2pk65addressindex as &mut dyn AnyStoredVec,
&mut self.first_p2pk33addressindex,
&mut self.first_p2pkhaddressindex,
&mut self.first_p2shaddressindex,
&mut self.first_p2wpkhaddressindex,
&mut self.first_p2wshaddressindex,
&mut self.first_p2traddressindex,
&mut self.first_p2aaddressindex,
&mut self.p2pk65bytes,
&mut self.p2pk33bytes,
&mut self.p2pkhbytes,
&mut self.p2shbytes,
&mut self.p2wpkhbytes,
&mut self.p2wshbytes,
&mut self.p2trbytes,
&mut self.p2abytes,
&mut self.p2pk65.first_index as &mut dyn AnyStoredVec,
&mut self.p2pk33.first_index,
&mut self.p2pkh.first_index,
&mut self.p2sh.first_index,
&mut self.p2wpkh.first_index,
&mut self.p2wsh.first_index,
&mut self.p2tr.first_index,
&mut self.p2a.first_index,
&mut self.p2pk65.bytes,
&mut self.p2pk33.bytes,
&mut self.p2pkh.bytes,
&mut self.p2sh.bytes,
&mut self.p2wpkh.bytes,
&mut self.p2wsh.bytes,
&mut self.p2tr.bytes,
&mut self.p2a.bytes,
]
.into_par_iter()
}
@@ -175,35 +165,35 @@ impl AddressesVecs {
) -> Option<AddressBytes> {
match addresstype {
OutputType::P2PK65 => self
.p2pk65bytes
.p2pk65.bytes
.get_pushed_or_read(typeindex.into(), &readers.p2pk65)
.map(AddressBytes::from),
OutputType::P2PK33 => self
.p2pk33bytes
.p2pk33.bytes
.get_pushed_or_read(typeindex.into(), &readers.p2pk33)
.map(AddressBytes::from),
OutputType::P2PKH => self
.p2pkhbytes
.p2pkh.bytes
.get_pushed_or_read(typeindex.into(), &readers.p2pkh)
.map(AddressBytes::from),
OutputType::P2SH => self
.p2shbytes
.p2sh.bytes
.get_pushed_or_read(typeindex.into(), &readers.p2sh)
.map(AddressBytes::from),
OutputType::P2WPKH => self
.p2wpkhbytes
.p2wpkh.bytes
.get_pushed_or_read(typeindex.into(), &readers.p2wpkh)
.map(AddressBytes::from),
OutputType::P2WSH => self
.p2wshbytes
.p2wsh.bytes
.get_pushed_or_read(typeindex.into(), &readers.p2wsh)
.map(AddressBytes::from),
OutputType::P2TR => self
.p2trbytes
.p2tr.bytes
.get_pushed_or_read(typeindex.into(), &readers.p2tr)
.map(AddressBytes::from),
OutputType::P2A => self
.p2abytes
.p2a.bytes
.get_pushed_or_read(typeindex.into(), &readers.p2a)
.map(AddressBytes::from),
_ => unreachable!("get_bytes_by_type called with non-address type"),
@@ -212,14 +202,14 @@ impl AddressesVecs {
pub fn push_bytes_if_needed(&mut self, index: TypeIndex, bytes: AddressBytes) -> Result<()> {
match bytes {
AddressBytes::P2PK65(bytes) => self.p2pk65bytes.checked_push(index.into(), bytes)?,
AddressBytes::P2PK33(bytes) => self.p2pk33bytes.checked_push(index.into(), bytes)?,
AddressBytes::P2PKH(bytes) => self.p2pkhbytes.checked_push(index.into(), bytes)?,
AddressBytes::P2SH(bytes) => self.p2shbytes.checked_push(index.into(), bytes)?,
AddressBytes::P2WPKH(bytes) => self.p2wpkhbytes.checked_push(index.into(), bytes)?,
AddressBytes::P2WSH(bytes) => self.p2wshbytes.checked_push(index.into(), bytes)?,
AddressBytes::P2TR(bytes) => self.p2trbytes.checked_push(index.into(), bytes)?,
AddressBytes::P2A(bytes) => self.p2abytes.checked_push(index.into(), bytes)?,
AddressBytes::P2PK65(bytes) => self.p2pk65.bytes.checked_push(index.into(), bytes)?,
AddressBytes::P2PK33(bytes) => self.p2pk33.bytes.checked_push(index.into(), bytes)?,
AddressBytes::P2PKH(bytes) => self.p2pkh.bytes.checked_push(index.into(), bytes)?,
AddressBytes::P2SH(bytes) => self.p2sh.bytes.checked_push(index.into(), bytes)?,
AddressBytes::P2WPKH(bytes) => self.p2wpkh.bytes.checked_push(index.into(), bytes)?,
AddressBytes::P2WSH(bytes) => self.p2wsh.bytes.checked_push(index.into(), bytes)?,
AddressBytes::P2TR(bytes) => self.p2tr.bytes.checked_push(index.into(), bytes)?,
AddressBytes::P2A(bytes) => self.p2a.bytes.checked_push(index.into(), bytes)?,
};
Ok(())
}
@@ -233,10 +223,10 @@ impl AddressesVecs {
height: Height,
) -> Result<Box<dyn Iterator<Item = AddressHash> + '_>> {
macro_rules! make_iter {
($height_vec:expr, $bytes_vec:expr) => {{
match $height_vec.collect_one(height) {
($addr:expr) => {{
match $addr.first_index.collect_one(height) {
Some(mut index) => {
let reader = $bytes_vec.reader();
let reader = $addr.bytes.reader();
Ok(Box::new(std::iter::from_fn(move || {
reader.try_get(index.to_usize()).map(|typedbytes| {
let bytes = AddressBytes::from(typedbytes);
@@ -255,14 +245,14 @@ impl AddressesVecs {
}
match address_type {
OutputType::P2PK65 => make_iter!(self.first_p2pk65addressindex, self.p2pk65bytes),
OutputType::P2PK33 => make_iter!(self.first_p2pk33addressindex, self.p2pk33bytes),
OutputType::P2PKH => make_iter!(self.first_p2pkhaddressindex, self.p2pkhbytes),
OutputType::P2SH => make_iter!(self.first_p2shaddressindex, self.p2shbytes),
OutputType::P2WPKH => make_iter!(self.first_p2wpkhaddressindex, self.p2wpkhbytes),
OutputType::P2WSH => make_iter!(self.first_p2wshaddressindex, self.p2wshbytes),
OutputType::P2TR => make_iter!(self.first_p2traddressindex, self.p2trbytes),
OutputType::P2A => make_iter!(self.first_p2aaddressindex, self.p2abytes),
OutputType::P2PK65 => make_iter!(self.p2pk65),
OutputType::P2PK33 => make_iter!(self.p2pk33),
OutputType::P2PKH => make_iter!(self.p2pkh),
OutputType::P2SH => make_iter!(self.p2sh),
OutputType::P2WPKH => make_iter!(self.p2wpkh),
OutputType::P2WSH => make_iter!(self.p2wsh),
OutputType::P2TR => make_iter!(self.p2tr),
OutputType::P2A => make_iter!(self.p2a),
_ => Ok(Box::new(std::iter::empty())),
}
}

View File

@@ -32,10 +32,15 @@ pub struct Vecs<M: StorageMode = Rw> {
#[traversable(skip)]
db: Database,
pub blocks: BlocksVecs<M>,
#[traversable(wrap = "transactions", rename = "raw")]
pub transactions: TransactionsVecs<M>,
#[traversable(wrap = "inputs", rename = "raw")]
pub inputs: InputsVecs<M>,
#[traversable(wrap = "outputs", rename = "raw")]
pub outputs: OutputsVecs<M>,
#[traversable(wrap = "addresses", rename = "raw")]
pub addresses: AddressesVecs<M>,
#[traversable(wrap = "scripts", rename = "raw")]
pub scripts: ScriptsVecs<M>,
}

View File

@@ -4,22 +4,27 @@ use brk_types::{
EmptyOutputIndex, Height, OpReturnIndex, P2MSOutputIndex, TxIndex, UnknownOutputIndex, Version,
};
use rayon::prelude::*;
use vecdb::{AnyStoredVec, Database, ImportableVec, PcoVec, Rw, Stamp, StorageMode, WritableVec};
use schemars::JsonSchema;
use serde::Serialize;
use vecdb::{
AnyStoredVec, Database, Formattable, ImportableVec, PcoVec, PcoVecValue, Rw, Stamp,
StorageMode, VecIndex, WritableVec,
};
use crate::parallel_import;
#[derive(Traversable)]
pub struct ScriptTypeVecs<I: VecIndex + PcoVecValue + Formattable + Serialize + JsonSchema, M: StorageMode = Rw> {
pub first_index: M::Stored<PcoVec<Height, I>>,
pub to_txindex: M::Stored<PcoVec<I, TxIndex>>,
}
#[derive(Traversable)]
pub struct ScriptsVecs<M: StorageMode = Rw> {
// Height to first output index (per output type)
pub first_emptyoutputindex: M::Stored<PcoVec<Height, EmptyOutputIndex>>,
pub first_opreturnindex: M::Stored<PcoVec<Height, OpReturnIndex>>,
pub first_p2msoutputindex: M::Stored<PcoVec<Height, P2MSOutputIndex>>,
pub first_unknownoutputindex: M::Stored<PcoVec<Height, UnknownOutputIndex>>,
// Output index to txindex (per output type)
pub empty_to_txindex: M::Stored<PcoVec<EmptyOutputIndex, TxIndex>>,
pub opreturn_to_txindex: M::Stored<PcoVec<OpReturnIndex, TxIndex>>,
pub p2ms_to_txindex: M::Stored<PcoVec<P2MSOutputIndex, TxIndex>>,
pub unknown_to_txindex: M::Stored<PcoVec<UnknownOutputIndex, TxIndex>>,
pub empty: ScriptTypeVecs<EmptyOutputIndex, M>,
pub opreturn: ScriptTypeVecs<OpReturnIndex, M>,
pub p2ms: ScriptTypeVecs<P2MSOutputIndex, M>,
pub unknown: ScriptTypeVecs<UnknownOutputIndex, M>,
}
impl ScriptsVecs {
@@ -44,14 +49,10 @@ impl ScriptsVecs {
unknownoutputindex_to_txindex = PcoVec::forced_import(db, "txindex", version),
};
Ok(Self {
first_emptyoutputindex,
first_opreturnindex,
first_p2msoutputindex,
first_unknownoutputindex,
empty_to_txindex: emptyoutputindex_to_txindex,
opreturn_to_txindex: opreturnindex_to_txindex,
p2ms_to_txindex: p2msoutputindex_to_txindex,
unknown_to_txindex: unknownoutputindex_to_txindex,
empty: ScriptTypeVecs { first_index: first_emptyoutputindex, to_txindex: emptyoutputindex_to_txindex },
opreturn: ScriptTypeVecs { first_index: first_opreturnindex, to_txindex: opreturnindex_to_txindex },
p2ms: ScriptTypeVecs { first_index: first_p2msoutputindex, to_txindex: p2msoutputindex_to_txindex },
unknown: ScriptTypeVecs { first_index: first_unknownoutputindex, to_txindex: unknownoutputindex_to_txindex },
})
}
@@ -64,35 +65,35 @@ impl ScriptsVecs {
unknownoutputindex: UnknownOutputIndex,
stamp: Stamp,
) -> Result<()> {
self.first_emptyoutputindex
self.empty.first_index
.truncate_if_needed_with_stamp(height, stamp)?;
self.first_opreturnindex
self.opreturn.first_index
.truncate_if_needed_with_stamp(height, stamp)?;
self.first_p2msoutputindex
self.p2ms.first_index
.truncate_if_needed_with_stamp(height, stamp)?;
self.first_unknownoutputindex
self.unknown.first_index
.truncate_if_needed_with_stamp(height, stamp)?;
self.empty_to_txindex
self.empty.to_txindex
.truncate_if_needed_with_stamp(emptyoutputindex, stamp)?;
self.opreturn_to_txindex
self.opreturn.to_txindex
.truncate_if_needed_with_stamp(opreturnindex, stamp)?;
self.p2ms_to_txindex
self.p2ms.to_txindex
.truncate_if_needed_with_stamp(p2msoutputindex, stamp)?;
self.unknown_to_txindex
self.unknown.to_txindex
.truncate_if_needed_with_stamp(unknownoutputindex, stamp)?;
Ok(())
}
pub fn par_iter_mut_any(&mut self) -> impl ParallelIterator<Item = &mut dyn AnyStoredVec> {
[
&mut self.first_emptyoutputindex as &mut dyn AnyStoredVec,
&mut self.first_opreturnindex,
&mut self.first_p2msoutputindex,
&mut self.first_unknownoutputindex,
&mut self.empty_to_txindex,
&mut self.opreturn_to_txindex,
&mut self.p2ms_to_txindex,
&mut self.unknown_to_txindex,
&mut self.empty.first_index as &mut dyn AnyStoredVec,
&mut self.opreturn.first_index,
&mut self.p2ms.first_index,
&mut self.unknown.first_index,
&mut self.empty.to_txindex,
&mut self.opreturn.to_txindex,
&mut self.p2ms.to_txindex,
&mut self.unknown.to_txindex,
]
.into_par_iter()
}

View File

@@ -23,7 +23,9 @@ brk_traversable = { workspace = true }
brk_types = { workspace = true }
derive_more = { workspace = true }
jiff = { workspace = true }
parking_lot = { workspace = true }
quickmatch = { path = "../../../quickmatch" }
# quickmatch = "0.3.1"
tokio = { workspace = true, optional = true }
serde_json = { workspace = true }
vecdb = { workspace = true }

View File

@@ -1,27 +1,33 @@
use std::collections::BTreeMap;
use std::{collections::BTreeMap, sync::LazyLock};
use brk_error::{Error, Result};
use brk_traversable::TreeNode;
use brk_types::{
DetailedMetricCount, Etag, Format, Index, IndexInfo, LegacyValue, Limit, Metric, MetricData,
MetricOutput, MetricOutputLegacy, MetricSelection, Output, OutputLegacy, PaginatedMetrics,
Pagination, PaginationIndex, Version,
Date, DetailedMetricCount, Epoch, Etag, Format, Halving, Height, Index, IndexInfo, LegacyValue,
Limit, Metric, MetricData, MetricInfo, MetricOutput, MetricOutputLegacy, MetricSelection,
Output, OutputLegacy, PaginatedMetrics, Pagination, PaginationIndex, RangeIndex, RangeMap,
SearchQuery, Timestamp, Version,
};
use vecdb::AnyExportableVec;
use parking_lot::RwLock;
use vecdb::{AnyExportableVec, ReadableVec};
use crate::{
Query,
vecs::{IndexToVec, MetricToVec},
};
/// Monotonic block timestamps → height. Lazily extended as new blocks are indexed.
static HEIGHT_BY_MONOTONIC_TIMESTAMP: LazyLock<RwLock<RangeMap<Timestamp, Height>>> =
LazyLock::new(|| RwLock::new(RangeMap::default()));
/// Estimated bytes per column header
const CSV_HEADER_BYTES_PER_COL: usize = 10;
/// Estimated bytes per cell value
const CSV_CELL_BYTES: usize = 15;
impl Query {
pub fn match_metric(&self, metric: &Metric, limit: Limit) -> Vec<&'static str> {
self.vecs().matches(metric, limit)
pub fn search_metrics(&self, query: &SearchQuery) -> Vec<&'static str> {
self.vecs().matches(&query.q, query.limit)
}
pub fn metric_not_found_error(&self, metric: &Metric) -> Error {
@@ -40,7 +46,7 @@ impl Query {
// Metric doesn't exist, suggest alternatives
let matches = self
.match_metric(metric, Limit::DEFAULT)
.vecs().matches(metric, Limit::DEFAULT)
.into_iter()
.map(|s| s.to_string())
.collect();
@@ -101,6 +107,15 @@ impl Query {
Ok(csv)
}
/// Returns the latest value for a single metric as a JSON value.
pub fn latest(&self, metric: &Metric, index: Index) -> Result<serde_json::Value> {
let vec = self
.vecs()
.get(metric, index)
.ok_or_else(|| self.metric_not_found_error(metric))?;
vec.last_json_value().ok_or(Error::NoData)
}
/// Search for vecs matching the given metrics and index.
/// Returns error if no metrics requested or any requested metric is not found.
pub fn search(&self, params: &MetricSelection) -> Result<Vec<&'static dyn AnyExportableVec>> {
@@ -129,21 +144,29 @@ impl Query {
let total = vecs.iter().map(|v| v.len()).min().unwrap_or(0);
let version: Version = vecs.iter().map(|v| v.version()).sum();
let index = params.index;
let start = params
.start()
.map(|s| vecs.iter().map(|v| v.i64_to_usize(s)).min().unwrap_or(0))
.unwrap_or(0);
let start = match params.start() {
Some(ri) => {
let i = self.range_index_to_i64(ri, index)?;
vecs.iter().map(|v| v.i64_to_usize(i)).min().unwrap_or(0)
}
None => 0,
};
let end = params
.end_for_len(total)
.map(|e| {
let end = match params.end() {
Some(ri) => {
let i = self.range_index_to_i64(ri, index)?;
vecs.iter()
.map(|v| v.i64_to_usize(e))
.map(|v| v.i64_to_usize(i))
.min()
.unwrap_or(total)
})
.unwrap_or(total);
}
None => params
.limit()
.map(|l| (start + *l).min(total))
.unwrap_or(total),
};
let weight = Self::weight(&vecs, Some(start as i64), Some(end as i64));
if weight > max_weight {
@@ -211,6 +234,25 @@ impl Query {
})
}
/// Format a resolved query as raw data (just the JSON array, no MetricData wrapper).
pub fn format_raw(&self, resolved: ResolvedQuery) -> Result<MetricOutput> {
let ResolvedQuery {
vecs, version, total, start, end, ..
} = resolved;
let count = end.saturating_sub(start);
let mut buf = Vec::with_capacity(count * 12 + 2);
vecs[0].write_json(Some(start), Some(end), &mut buf)?;
Ok(MetricOutput {
output: Output::Json(buf),
version,
total,
start,
end,
})
}
pub fn metric_to_index_to_vec(&self) -> &BTreeMap<&str, IndexToVec<'_>> {
&self.vecs().metric_to_index_to_vec
}
@@ -242,10 +284,76 @@ impl Query {
self.vecs().index_to_ids(paginated_index)
}
pub fn metric_info(&self, metric: &Metric) -> Option<MetricInfo> {
let index_to_vec = self.vecs().metric_to_index_to_vec.get(metric.replace("-", "_").as_str())?;
let value_type = index_to_vec.values().next()?.value_type_to_string();
let indexes = index_to_vec.keys().copied().collect();
Some(MetricInfo {
indexes,
value_type,
})
}
pub fn metric_to_indexes(&self, metric: Metric) -> Option<&Vec<Index>> {
self.vecs().metric_to_indexes(metric)
}
/// Resolve a RangeIndex to an i64 offset for the given index type.
fn range_index_to_i64(&self, ri: RangeIndex, index: Index) -> Result<i64> {
match ri {
RangeIndex::Int(i) => Ok(i),
RangeIndex::Date(date) => self.date_to_i64(date, index),
RangeIndex::Timestamp(ts) => self.timestamp_to_i64(ts, index),
}
}
fn date_to_i64(&self, date: Date, index: Index) -> Result<i64> {
// Direct date-based index conversion (day1, week1, month1, etc.)
if let Some(idx) = index.date_to_index(date) {
return Ok(idx as i64);
}
// Fall through to timestamp-based resolution (height, epoch, halving)
self.timestamp_to_i64(Timestamp::from(date), index)
}
fn timestamp_to_i64(&self, ts: Timestamp, index: Index) -> Result<i64> {
// Direct timestamp-based index conversion (minute10, hour1, etc.)
if let Some(idx) = index.timestamp_to_index(ts) {
return Ok(idx as i64);
}
// Height-based indexes: find block height, then convert
let height = Height::from(self.height_for_timestamp(ts));
match index {
Index::Height => Ok(usize::from(height) as i64),
Index::Epoch => Ok(usize::from(Epoch::from(height)) as i64),
Index::Halving => Ok(usize::from(Halving::from(height)) as i64),
_ => Err(Error::Parse(format!(
"date/timestamp ranges not supported for index '{index}'"
))),
}
}
/// Find the first block height at or after a given timestamp.
/// O(log n) binary search. Lazily rebuilt as new blocks arrive.
fn height_for_timestamp(&self, ts: Timestamp) -> usize {
let current_height: usize = self.height().into();
// Fast path: read lock, ceil is &self
{
let map = HEIGHT_BY_MONOTONIC_TIMESTAMP.read();
if map.len() > current_height {
return map.ceil(ts).map(usize::from).unwrap_or(current_height);
}
}
// Slow path: rebuild from computer's precomputed monotonic timestamps
let mut map = HEIGHT_BY_MONOTONIC_TIMESTAMP.write();
if map.len() <= current_height {
*map = RangeMap::from(self.computer().blocks.time.timestamp_monotonic.collect());
}
map.ceil(ts).map(usize::from).unwrap_or(current_height)
}
/// Deprecated - format a resolved query as legacy output (expensive).
pub fn format_legacy(&self, resolved: ResolvedQuery) -> Result<MetricOutputLegacy> {
let ResolvedQuery {

View File

@@ -8,3 +8,4 @@ mod price;
mod transaction;
pub use block::BLOCK_TXS_PAGE_SIZE;
pub use metrics::ResolvedQuery;

View File

@@ -19,7 +19,7 @@ mod r#impl;
#[cfg(feature = "tokio")]
pub use r#async::*;
pub use r#impl::BLOCK_TXS_PAGE_SIZE;
pub use r#impl::{BLOCK_TXS_PAGE_SIZE, ResolvedQuery};
pub use vecs::Vecs;
#[derive(Clone)]

View File

@@ -142,7 +142,7 @@ impl<'a> Vecs<'a> {
self.counts_by_db
.entry(db.to_string())
.or_default()
.add_endpoint(is_lazy);
.add_endpoint(name, is_lazy);
}
pub fn metrics(&'static self, pagination: Pagination) -> PaginatedMetrics {

View File

@@ -7,7 +7,9 @@ use axum::{
http::{HeaderMap, StatusCode, Uri},
response::{IntoResponse, Response},
};
use brk_types::{Format, MetricSelection, Output};
use brk_error::Result as BrkResult;
use brk_query::{Query as BrkQuery, ResolvedQuery};
use brk_types::{Format, MetricOutput, MetricSelection, Output};
use crate::{
Result,
@@ -20,9 +22,30 @@ use super::AppState;
pub async fn handler(
uri: Uri,
headers: HeaderMap,
Extension(addr): Extension<SocketAddr>,
addr: Extension<SocketAddr>,
Query(params): Query<MetricSelection>,
State(state): State<AppState>,
state: State<AppState>,
) -> Result<Response> {
format_and_respond(uri, headers, addr, params, state, |q, r| q.format(r)).await
}
pub async fn raw_handler(
uri: Uri,
headers: HeaderMap,
addr: Extension<SocketAddr>,
Query(params): Query<MetricSelection>,
state: State<AppState>,
) -> Result<Response> {
format_and_respond(uri, headers, addr, params, state, |q, r| q.format_raw(r)).await
}
async fn format_and_respond(
uri: Uri,
headers: HeaderMap,
Extension(addr): Extension<SocketAddr>,
params: MetricSelection,
state: State<AppState>,
formatter: fn(&BrkQuery, ResolvedQuery) -> BrkResult<MetricOutput>,
) -> Result<Response> {
// Phase 1: Search and resolve metadata (cheap)
let resolved = state
@@ -51,7 +74,7 @@ pub async fn handler(
.get_or_insert(&cache_key, async move {
query
.run(move |q| {
let out = q.format(resolved)?;
let out = formatter(q, resolved)?;
let raw = match out.output {
Output::CSV(s) => Bytes::from(s),
Output::Json(v) => Bytes::from(v),

View File

@@ -10,8 +10,9 @@ use axum::{
use brk_traversable::TreeNode;
use brk_types::{
CostBasisCohortParam, CostBasisFormatted, CostBasisParams, CostBasisQuery, DataRangeFormat,
Date, Index, IndexInfo, LimitParam, Metric, MetricCount, MetricData, MetricParam,
Date, Index, IndexInfo, Metric, MetricCount, MetricData, MetricInfo, MetricParam,
MetricSelection, MetricSelectionLegacy, MetricWithIndex, Metrics, PaginatedMetrics, Pagination,
SearchQuery,
};
use crate::{CacheStrategy, Error, extended::TransformResponseExtended};
@@ -124,16 +125,15 @@ impl ApiMetricsRoutes for ApiRouter<AppState> {
),
)
.api_route(
"/api/metrics/search/{metric}",
"/api/metrics/search",
get_with(
async |
uri: Uri,
headers: HeaderMap,
State(state): State<AppState>,
Path(path): Path<MetricParam>,
Query(query): Query<LimitParam>
Query(query): Query<SearchQuery>
| {
state.cached_json(&headers, CacheStrategy::Static, &uri, move |q| Ok(q.match_metric(&path.metric, query.limit))).await
state.cached_json(&headers, CacheStrategy::Static, &uri, move |q| Ok(q.search_metrics(&query))).await
},
|op| op
.id("search_metrics")
@@ -155,26 +155,80 @@ impl ApiMetricsRoutes for ApiRouter<AppState> {
Path(path): Path<MetricParam>
| {
state.cached_json(&headers, CacheStrategy::Static, &uri, move |q| {
if let Some(indexes) = q.metric_to_indexes(path.metric.clone()) {
return Ok(indexes.clone())
}
Err(q.metric_not_found_error(&path.metric))
q.metric_info(&path.metric).ok_or_else(|| q.metric_not_found_error(&path.metric))
}).await
},
|op| op
.id("get_metric_info")
.metrics_tag()
.summary("Get supported indexes for a metric")
.summary("Get metric info")
.description(
"Returns the list of indexes supported by the specified metric. \
For example, `realized_price` might be available on day1, week1, and month1."
"Returns the supported indexes and value type for the specified metric."
)
.ok_response::<Vec<Index>>()
.ok_response::<MetricInfo>()
.not_modified()
.not_found()
.server_error(),
),
)
.api_route(
"/api/metric/{metric}/{index}/latest",
get_with(
async |uri: Uri,
headers: HeaderMap,
State(state): State<AppState>,
Path(path): Path<MetricWithIndex>| {
state
.cached_json(&headers, CacheStrategy::Height, &uri, move |q| {
q.latest(&path.metric, path.index)
})
.await
},
|op| op
.id("get_metric_latest")
.metrics_tag()
.summary("Get latest metric value")
.description(
"Returns the single most recent value for a metric, unwrapped (not inside a MetricData object)."
)
.ok_response::<serde_json::Value>()
.not_found(),
),
)
.api_route(
"/api/metric/{metric}/{index}/data",
get_with(
async |uri: Uri,
headers: HeaderMap,
addr: Extension<SocketAddr>,
state: State<AppState>,
Path(path): Path<MetricWithIndex>,
Query(range): Query<DataRangeFormat>|
-> Response {
data::raw_handler(
uri,
headers,
addr,
Query(MetricSelection::from((path.index, path.metric, range))),
state,
)
.await
.into_response()
},
|op| op
.id("get_metric_data")
.metrics_tag()
.summary("Get raw metric data")
.description(
"Returns just the data array without the MetricData wrapper. \
Supports the same range and format parameters as the standard endpoint."
)
.ok_response::<Vec<serde_json::Value>>()
.csv_response()
.not_modified()
.not_found(),
),
)
.api_route(
"/api/metric/{metric}/{index}",
get_with(

View File

@@ -35,8 +35,9 @@ pub fn create_openapi() -> OpenApi {
```bash
curl -s https://bitview.space/api/block-height/0
curl -s https://bitview.space/api/metrics/search/price
curl -s https://bitview.space/api/metric/price/day1
curl -s https://bitview.space/api/metrics/search?q=price
curl -s https://bitview.space/api/metric/price/day
curl -s https://bitview.space/api/metric/price/day/latest
```
### Errors

View File

@@ -57,6 +57,7 @@ fn error_status(e: &BrkError) -> StatusCode {
BrkError::UnknownAddress
| BrkError::UnknownTxid
| BrkError::NotFound(_)
| BrkError::NoData
| BrkError::MetricNotFound(_) => StatusCode::NOT_FOUND,
BrkError::AuthFailed => StatusCode::FORBIDDEN,
@@ -79,6 +80,7 @@ fn error_code(e: &BrkError) -> &'static str {
BrkError::UnknownAddress => "unknown_address",
BrkError::UnknownTxid => "unknown_txid",
BrkError::NotFound(_) => "not_found",
BrkError::NoData => "no_data",
BrkError::MetricNotFound(_) => "metric_not_found",
BrkError::MempoolNotAvailable => "mempool_not_available",
BrkError::AuthFailed => "auth_failed",

View File

@@ -202,7 +202,7 @@ impl PrintableIndex for Day1 {
}
fn to_possible_strings() -> &'static [&'static str] {
&["1d", "d", "date", "daily", "day1", "dateindex"]
&["1d", "d", "day", "date", "daily", "day1", "dateindex"]
}
}

View File

@@ -120,14 +120,14 @@ impl From<usize> for Dollars {
impl Add for Dollars {
type Output = Self;
fn add(self, rhs: Self) -> Self::Output {
Self::from(CentsSigned::from(self) + CentsSigned::from(rhs))
Self(self.0 + rhs.0)
}
}
impl Sub for Dollars {
type Output = Self;
fn sub(self, rhs: Self) -> Self::Output {
Self::from(CentsSigned::from(self) - CentsSigned::from(rhs))
Self(self.0 - rhs.0)
}
}
@@ -346,13 +346,13 @@ impl From<Dollars> for u128 {
impl AddAssign for Dollars {
fn add_assign(&mut self, rhs: Self) {
*self = Dollars::from(CentsSigned::from(*self) + CentsSigned::from(rhs));
self.0 += rhs.0;
}
}
impl SubAssign for Dollars {
fn sub_assign(&mut self, rhs: Self) {
*self = Dollars::from(CentsSigned::from(*self) - CentsSigned::from(rhs));
self.0 -= rhs.0;
}
}

View File

@@ -67,7 +67,7 @@ impl PrintableIndex for Hour1 {
}
fn to_possible_strings() -> &'static [&'static str] {
&["1h", "hour1"]
&["1h", "h", "hour", "hourly", "hour1"]
}
}

View File

@@ -253,7 +253,7 @@ impl Index {
Self::Day3 => return Some(usize::from(Day3::from_timestamp(ts))),
_ => return self.date_to_index(Date::from(ts)),
};
Some(((*ts - INDEX_EPOCH) / interval) as usize)
Some((*ts).saturating_sub(INDEX_EPOCH) as usize / interval as usize)
}
/// Convert an index value to a date for day-precision or coarser indexes.
@@ -299,12 +299,7 @@ impl<'de> Deserialize<'de> for Index {
D: serde::Deserializer<'de>,
{
let str = String::deserialize(deserializer)?;
if let Ok(index) = Index::try_from(str.as_str()) {
// dbg!(index);
Ok(index)
} else {
Err(serde::de::Error::custom("Bad index"))
}
Index::try_from(str.as_str()).map_err(|e| serde::de::Error::custom(e))
}
}

View File

@@ -92,6 +92,7 @@ mod mempoolinfo;
mod metric;
mod metriccount;
mod metricdata;
mod metricinfo;
mod metricoutput;
mod metricparam;
mod metrics;
@@ -141,12 +142,14 @@ mod poolslugparam;
mod poolssummary;
mod poolstats;
mod port;
mod range_map;
mod rangeindex;
mod rawlocktime;
mod recommendedfees;
mod rewardstats;
mod sats;
mod sats_signed;
mod searchquery;
mod satsfract;
mod stored_bool;
mod stored_f32;
@@ -285,6 +288,7 @@ pub use mempoolinfo::*;
pub use metric::*;
pub use metriccount::*;
pub use metricdata::*;
pub use metricinfo::*;
pub use metricoutput::*;
pub use metricparam::*;
pub use metrics::*;
@@ -334,6 +338,7 @@ pub use poolslugparam::*;
pub use poolssummary::*;
pub use poolstats::*;
pub use port::*;
pub use range_map::*;
pub use rangeindex::*;
pub use rawlocktime::*;
pub use recommendedfees::*;
@@ -341,6 +346,7 @@ pub use rewardstats::*;
pub use sats::*;
pub use sats_signed::*;
pub use satsfract::*;
pub use searchquery::*;
pub use stored_bool::*;
pub use stored_f32::*;
pub use stored_f64::*;

View File

@@ -1,5 +1,6 @@
use std::collections::BTreeMap;
use rustc_hash::FxHashSet;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
@@ -18,16 +19,21 @@ pub struct MetricCount {
/// Number of eager (stored on disk) metric-index combinations
#[schemars(example = 16000)]
pub stored_endpoints: usize,
#[serde(skip)]
seen: FxHashSet<String>,
}
impl MetricCount {
pub fn add_endpoint(&mut self, is_lazy: bool) {
pub fn add_endpoint(&mut self, name: &str, is_lazy: bool) {
self.total_endpoints += 1;
if is_lazy {
self.lazy_endpoints += 1;
} else {
self.stored_endpoints += 1;
}
if self.seen.insert(name.to_string()) {
self.distinct_metrics += 1;
}
}
}

View File

@@ -17,6 +17,9 @@ pub struct MetricData<T = Value> {
pub version: Version,
/// The index type used for this query
pub index: Index,
/// Value type (e.g. "f32", "u64", "Sats")
#[serde(rename = "type", default)]
pub value_type: String,
/// Total number of data points in the metric
pub total: usize,
/// Start index (inclusive) of the returned range
@@ -48,6 +51,8 @@ impl MetricData {
buf.extend_from_slice(itoa_buf.format(u32::from(vec.version())).as_bytes());
buf.extend_from_slice(b",\"index\":\"");
buf.extend_from_slice(index.name().as_bytes());
buf.extend_from_slice(b"\",\"type\":\"");
buf.extend_from_slice(vec.value_type_to_string().as_bytes());
buf.extend_from_slice(b"\",\"total\":");
buf.extend_from_slice(itoa_buf.format(total).as_bytes());
buf.extend_from_slice(b",\"start\":");
@@ -186,373 +191,3 @@ impl<'de, T: DeserializeOwned> Deserialize<'de> for DateMetricData<T> {
})
}
}
#[cfg(test)]
mod tests {
use super::*;
fn date_based_metric() -> MetricData<i32> {
MetricData {
version: Version::ONE,
index: Index::Day1,
total: 100,
start: 0,
end: 5,
stamp: "2024-01-01T00:00:00Z".to_string(),
data: vec![100, 200, 300, 400, 500],
}
}
fn height_based_metric() -> MetricData<f64> {
MetricData {
version: Version::ONE,
index: Index::Height,
total: 1000,
start: 800000,
end: 800005,
stamp: "2024-01-01T00:00:00Z".to_string(),
data: vec![1.5, 2.5, 3.5, 4.5, 5.5],
}
}
#[test]
fn test_indexes_returns_range() {
let metric = date_based_metric();
let indexes: Vec<_> = metric.indexes().collect();
assert_eq!(indexes, vec![0, 1, 2, 3, 4]);
}
#[test]
fn test_indexes_with_offset() {
let metric = height_based_metric();
let indexes: Vec<_> = metric.indexes().collect();
assert_eq!(indexes, vec![800000, 800001, 800002, 800003, 800004]);
}
#[test]
fn test_is_date_based_true() {
let metric = date_based_metric();
assert!(metric.is_date_based());
}
#[test]
fn test_is_date_based_false() {
let metric = height_based_metric();
assert!(!metric.is_date_based());
}
#[test]
fn test_dates_for_day1() {
let metric = date_based_metric();
let dates: Vec<_> = metric.dates().unwrap().collect();
assert_eq!(dates.len(), 5);
// Day1 0 = Jan 1, 2009
assert_eq!(dates[0].year(), 2009);
assert_eq!(dates[0].month(), 1);
assert_eq!(dates[0].day(), 1);
// Day1 1 = Jan 2, 2009
assert_eq!(dates[1].year(), 2009);
assert_eq!(dates[1].month(), 1);
assert_eq!(dates[1].day(), 2);
}
#[test]
fn test_iter() {
let metric = date_based_metric();
let pairs: Vec<_> = metric.iter().collect();
assert_eq!(pairs.len(), 5);
assert_eq!(pairs[0], (0, &100));
assert_eq!(pairs[1], (1, &200));
assert_eq!(pairs[4], (4, &500));
}
#[test]
fn test_iter_with_offset() {
let metric = height_based_metric();
let pairs: Vec<_> = metric.iter().collect();
assert_eq!(pairs.len(), 5);
assert_eq!(pairs[0], (800000, &1.5));
assert_eq!(pairs[4], (800004, &5.5));
}
#[test]
fn test_iter_dates() {
let metric = date_based_metric();
let pairs: Vec<_> = metric.iter_dates().unwrap().collect();
assert_eq!(pairs.len(), 5);
// First pair: (Jan 1 2009, 100)
assert_eq!(pairs[0].0.year(), 2009);
assert_eq!(pairs[0].0.month(), 1);
assert_eq!(pairs[0].0.day(), 1);
assert_eq!(pairs[0].1, &100);
// Second pair: (Jan 2 2009, 200)
assert_eq!(pairs[1].0.day(), 2);
assert_eq!(pairs[1].1, &200);
}
#[test]
fn test_dates_returns_none_for_non_date_index() {
let metric = height_based_metric();
assert!(metric.dates().is_none());
}
#[test]
fn test_iter_dates_returns_none_for_non_date_index() {
let metric = height_based_metric();
assert!(metric.iter_dates().is_none());
}
#[test]
fn test_date_metric_data_try_new_ok() {
let metric = date_based_metric();
let date_metric = DateMetricData::try_new(metric).unwrap();
assert_eq!(date_metric.data.len(), 5);
let dates: Vec<_> = date_metric.dates().unwrap().collect();
assert_eq!(dates.len(), 5);
assert_eq!(dates[0].year(), 2009);
}
#[test]
fn test_date_metric_data_try_new_err() {
let metric = height_based_metric();
assert!(DateMetricData::try_new(metric).is_err());
}
#[test]
fn test_date_metric_data_iter_dates() {
let metric = date_based_metric();
let date_metric = DateMetricData::try_new(metric).unwrap();
let pairs: Vec<_> = date_metric.iter_dates().unwrap().collect();
assert_eq!(pairs.len(), 5);
assert_eq!(pairs[0].0.day(), 1);
assert_eq!(pairs[0].1, &100);
}
#[test]
fn test_date_metric_data_deref() {
let metric = date_based_metric();
let date_metric = DateMetricData::try_new(metric).unwrap();
// Access MetricData methods via Deref
assert!(date_metric.is_date_based());
assert_eq!(date_metric.indexes().count(), 5);
}
// Sub-daily tests
fn sub_daily_metric() -> MetricData<f64> {
MetricData {
version: Version::ONE,
index: Index::Hour1,
total: 200000,
start: 0,
end: 3,
stamp: "2024-01-01T00:00:00Z".to_string(),
data: vec![10.0, 20.0, 30.0],
}
}
#[test]
fn test_sub_daily_is_date_based() {
let metric = sub_daily_metric();
assert!(metric.is_date_based());
}
#[test]
fn test_sub_daily_dates_returns_none() {
let metric = sub_daily_metric();
assert!(metric.dates().is_none());
}
#[test]
fn test_sub_daily_timestamps_returns_some() {
let metric = sub_daily_metric();
let ts: Vec<_> = metric.timestamps().unwrap().collect();
assert_eq!(ts.len(), 3);
// Hour1 index 0 = INDEX_EPOCH (2009-01-01 00:00:00 UTC)
assert_eq!(*ts[0], 1230768000);
// Hour1 index 1 = INDEX_EPOCH + 3600
assert_eq!(*ts[1], 1230768000 + 3600);
}
#[test]
fn test_sub_daily_iter_timestamps() {
let metric = sub_daily_metric();
let pairs: Vec<_> = metric.iter_timestamps().unwrap().collect();
assert_eq!(pairs.len(), 3);
assert_eq!(*pairs[0].0, 1230768000);
assert_eq!(pairs[0].1, &10.0);
}
#[test]
fn test_date_metric_data_sub_daily_timestamps() {
let metric = sub_daily_metric();
let date_metric = DateMetricData::try_new(metric).unwrap();
// dates() returns None for sub-daily
assert!(date_metric.dates().is_none());
// timestamps() works for all date-based
let ts: Vec<_> = date_metric.timestamps().collect();
assert_eq!(ts.len(), 3);
}
#[test]
fn test_date_metric_data_iter_timestamps() {
let metric = sub_daily_metric();
let date_metric = DateMetricData::try_new(metric).unwrap();
let pairs: Vec<_> = date_metric.iter_timestamps().collect();
assert_eq!(pairs.len(), 3);
assert_eq!(pairs[2].1, &30.0);
}
#[test]
fn test_day1_timestamps_also_works() {
// timestamps() works for daily indexes too
let metric = date_based_metric();
let ts: Vec<_> = metric.timestamps().unwrap().collect();
assert_eq!(ts.len(), 5);
}
// Empty data
fn empty_metric() -> MetricData<i32> {
MetricData {
version: Version::ONE,
index: Index::Day1,
total: 100,
start: 5,
end: 5,
stamp: "2024-01-01T00:00:00Z".to_string(),
data: vec![],
}
}
#[test]
fn test_empty_indexes() {
let metric = empty_metric();
assert_eq!(metric.indexes().count(), 0);
}
#[test]
fn test_empty_iter() {
let metric = empty_metric();
assert_eq!(metric.iter().count(), 0);
}
#[test]
fn test_empty_dates() {
let metric = empty_metric();
assert_eq!(metric.dates().unwrap().count(), 0);
}
#[test]
fn test_empty_timestamps() {
let metric = empty_metric();
assert_eq!(metric.timestamps().unwrap().count(), 0);
}
// Non-date timestamps/iter_timestamps
#[test]
fn test_timestamps_returns_none_for_non_date() {
let metric = height_based_metric();
assert!(metric.timestamps().is_none());
}
#[test]
fn test_iter_timestamps_returns_none_for_non_date() {
let metric = height_based_metric();
assert!(metric.iter_timestamps().is_none());
}
// DateMetricData sub-daily iter_dates returns None
#[test]
fn test_date_metric_data_sub_daily_iter_dates_returns_none() {
let metric = sub_daily_metric();
let date_metric = DateMetricData::try_new(metric).unwrap();
assert!(date_metric.iter_dates().is_none());
}
// Month1 dates
fn month1_metric() -> MetricData<i32> {
MetricData {
version: Version::ONE,
index: Index::Month1,
total: 200,
start: 0,
end: 3,
stamp: "2024-01-01T00:00:00Z".to_string(),
data: vec![1000, 2000, 3000],
}
}
#[test]
fn test_dates_for_month1() {
let metric = month1_metric();
let dates: Vec<_> = metric.dates().unwrap().collect();
assert_eq!(dates.len(), 3);
assert_eq!(dates[0].year(), 2009);
assert_eq!(dates[0].month(), 1);
assert_eq!(dates[0].day(), 1);
assert_eq!(dates[1].month(), 2);
assert_eq!(dates[2].month(), 3);
}
#[test]
fn test_timestamps_for_month1() {
let metric = month1_metric();
let ts: Vec<_> = metric.timestamps().unwrap().collect();
assert_eq!(ts.len(), 3);
// Each should be a valid timestamp
assert!(*ts[0] > 0);
assert!(*ts[1] > *ts[0]);
assert!(*ts[2] > *ts[1]);
}
// Deserialize roundtrip
#[test]
fn test_date_metric_data_deserialize_valid() {
let json = r#"{"version":1,"index":"day1","total":100,"start":0,"end":2,"stamp":"2024-01-01T00:00:00Z","data":[1,2]}"#;
let result: Result<DateMetricData<i32>, _> = serde_json::from_str(json);
assert!(result.is_ok());
let dm = result.unwrap();
assert_eq!(dm.data.len(), 2);
}
#[test]
fn test_date_metric_data_deserialize_rejects_non_date() {
let json = r#"{"version":1,"index":"height","total":100,"start":0,"end":2,"stamp":"2024-01-01T00:00:00Z","data":[1,2]}"#;
let result: Result<DateMetricData<i32>, _> = serde_json::from_str(json);
assert!(result.is_err());
let err = result.unwrap_err().to_string();
assert!(
err.contains("date-based"),
"error should mention date-based: {}",
err
);
}
// timestamp_to_index tests
#[test]
fn test_timestamp_to_index_hour1() {
// INDEX_EPOCH + 2 hours
let ts = Timestamp::new(1230768000 + 7200);
assert_eq!(Index::Hour1.timestamp_to_index(ts), Some(2));
}
#[test]
fn test_timestamp_to_index_non_date_returns_none() {
let ts = Timestamp::new(1230768000);
assert!(Index::Height.timestamp_to_index(ts).is_none());
}
#[test]
fn test_timestamp_to_index_day1_via_date_fallback() {
// Day1 goes through date_to_index fallback
// 2009-01-09 = Day1 index 8
let ts = Timestamp::from(Date::new(2009, 1, 9));
assert_eq!(Index::Day1.timestamp_to_index(ts), Some(8));
}
}

View File

@@ -0,0 +1,14 @@
use schemars::JsonSchema;
use serde::Serialize;
use crate::Index;
/// Metadata about a metric
#[derive(Debug, Serialize, JsonSchema)]
pub struct MetricInfo {
/// Available indexes
pub indexes: Vec<Index>,
/// Value type (e.g. "f32", "u64", "Sats")
#[serde(rename = "type")]
pub value_type: &'static str,
}

View File

@@ -4,22 +4,41 @@ use std::marker::PhantomData;
const CACHE_SIZE: usize = 128;
const CACHE_MASK: usize = CACHE_SIZE - 1;
/// Cache entry: (range_low, range_high, value, occupied).
type CacheEntry<I, V> = (I, I, V, bool);
/// Maps ranges of indices to values for efficient reverse lookups.
///
/// Instead of storing a value for every index, stores first_index values
/// in a sorted Vec and uses binary search to find the value for any index.
/// The value is derived from the position in the Vec.
/// Stores first_index values in a sorted Vec and uses binary search
/// to find the value for any index. The value is derived from the position.
///
/// Includes a direct-mapped cache for O(1) lookups when there's locality.
#[derive(Debug, Clone)]
/// Includes a direct-mapped cache for O(1) floor lookups when there's locality.
pub struct RangeMap<I, V> {
/// Sorted vec of first_index values. Position in vec = value.
first_indexes: Vec<I>,
/// Direct-mapped cache: (range_low, range_high, value, occupied). Inline for zero indirection.
cache: [(I, I, V, bool); CACHE_SIZE],
cache: [CacheEntry<I, V>; CACHE_SIZE],
_phantom: PhantomData<V>,
}
impl<I: Default + Copy, V: Default + Copy> Clone for RangeMap<I, V> {
fn clone(&self) -> Self {
Self {
first_indexes: self.first_indexes.clone(),
cache: [(I::default(), I::default(), V::default(), false); CACHE_SIZE],
_phantom: PhantomData,
}
}
}
impl<I: Default + Copy, V: Default + Copy> From<Vec<I>> for RangeMap<I, V> {
fn from(first_indexes: Vec<I>) -> Self {
Self {
first_indexes,
cache: [(I::default(), I::default(), V::default(), false); CACHE_SIZE],
_phantom: PhantomData,
}
}
}
impl<I: Default + Copy, V: Default + Copy> Default for RangeMap<I, V> {
fn default() -> Self {
Self {
@@ -32,20 +51,25 @@ impl<I: Default + Copy, V: Default + Copy> Default for RangeMap<I, V> {
impl<I: Ord + Copy + Default + Into<usize>, V: From<usize> + Copy + Default> RangeMap<I, V> {
/// Number of ranges stored.
pub(crate) fn len(&self) -> usize {
pub fn len(&self) -> usize {
self.first_indexes.len()
}
/// Truncate to `new_len` ranges and clear the cache.
pub(crate) fn truncate(&mut self, new_len: usize) {
pub fn truncate(&mut self, new_len: usize) {
self.first_indexes.truncate(new_len);
self.clear_cache();
}
/// Reserve capacity for additional entries.
pub fn reserve(&mut self, additional: usize) {
self.first_indexes.reserve(additional);
}
/// Push a new first_index. Value is implicitly the current length.
/// Must be called in order (first_index must be >= all previous).
#[inline]
pub(crate) fn push(&mut self, first_index: I) {
pub fn push(&mut self, first_index: I) {
debug_assert!(
self.first_indexes
.last()
@@ -55,40 +79,52 @@ impl<I: Ord + Copy + Default + Into<usize>, V: From<usize> + Copy + Default> Ran
self.first_indexes.push(first_index);
}
/// Look up value for an index, checking cache first.
/// Returns the value (position) of the largest first_index <= given index.
/// Returns the last pushed first_index, if any.
#[inline]
pub(crate) fn get(&mut self, index: I) -> Option<V> {
pub fn last_key(&self) -> Option<I> {
self.first_indexes.last().copied()
}
/// Floor: returns the value (position) of the largest first_index <= given index.
#[inline]
pub fn get(&mut self, index: I) -> Option<V> {
if self.first_indexes.is_empty() {
return None;
}
// Direct-mapped cache lookup: O(1), no aging
let slot = Self::cache_slot(&index);
let entry = &self.cache[slot];
if entry.3 && index >= entry.0 && index < entry.1 {
return Some(entry.2);
}
// Cache miss - binary search
let pos = self.first_indexes.partition_point(|&first| first <= index);
if pos > 0 {
let value = V::from(pos - 1);
let low = self.first_indexes[pos - 1];
let is_last = pos == self.first_indexes.len();
// Cache non-last ranges (last range has unbounded high)
if !is_last {
let high = self.first_indexes[pos];
self.cache[slot] = (low, high, value, true);
if pos < self.first_indexes.len() {
self.cache[slot] = (self.first_indexes[pos - 1], self.first_indexes[pos], value, true);
}
Some(value)
} else {
None
}
}
/// Ceil: returns the value (position) of the smallest first_index >= given index.
#[inline]
pub fn ceil(&self, index: I) -> Option<V> {
if self.first_indexes.is_empty() {
return None;
}
let pos = self.first_indexes.partition_point(|&first| first < index);
if pos < self.first_indexes.len() {
Some(V::from(pos))
} else {
None
}
}
#[inline]
fn cache_slot(index: &I) -> usize {
let v: usize = (*index).into();

View File

@@ -0,0 +1,13 @@
use schemars::JsonSchema;
use serde::Deserialize;
use crate::{Limit, Metric};
#[derive(Debug, Deserialize, JsonSchema)]
pub struct SearchQuery {
/// Search query string
pub q: Metric,
/// Maximum number of results
#[serde(default)]
pub limit: Limit,
}

View File

@@ -124,6 +124,13 @@ impl From<usize> for Timestamp {
}
}
impl From<Timestamp> for usize {
#[inline]
fn from(value: Timestamp) -> Self {
value.0 as usize
}
}
impl From<Date> for Timestamp {
#[inline]
fn from(value: Date) -> Self {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -16,7 +16,11 @@ Free, no auth required. JSON and CSV output. Mempool.space compatible for block/
Search for metrics by keyword:
GET /api/metrics/search/{query}
GET /api/metrics/search?q={query}
Get metric info (available indexes, value type):
GET /api/metric/{metric}
Browse all available metrics:
@@ -35,17 +39,26 @@ Browse the full metric catalog as a tree:
Get a metric by name and index:
GET /api/metric/{metric}/{index}
GET /api/metric/{metric}/{index}?start=2025-01-01&end=2025-06-01
GET /api/metric/{metric}/{index}?start=-30
Get just the data array (no wrapper):
GET /api/metric/{metric}/{index}/data
Get the latest value:
GET /api/metric/{metric}/{index}/latest
Example — last 30 days of Bitcoin closing price:
GET /api/metric/close/1d?start=-30
GET /api/metric/price/day?start=-30
Fetch multiple metrics at once:
GET /api/metrics/bulk?index={index}&metrics={metric1},{metric2}
See the `MetricData` schema and query parameters (`start`, `end`, `limit`) in the [OpenAPI spec](https://bitview.space/api.json).
Range parameters `start` and `end` accept integers, dates (YYYY-MM-DD), or ISO 8601 timestamps. See the [OpenAPI spec](https://bitview.space/api.json) for full details.
## Block Explorer