global: snapshot

This commit is contained in:
nym21
2026-03-04 23:49:28 +01:00
parent ef0b77baa8
commit 6f2a87be4f
17 changed files with 460 additions and 422 deletions

View File

@@ -1,6 +1,6 @@
use brk_cohort::Filter;
use brk_error::Result;
use brk_types::{BasisPoints16, BasisPointsSigned16, Cents, Height, Version};
use brk_types::{BasisPoints16, BasisPoints32, BasisPointsSigned32, Cents, Height, Version};
use schemars::JsonSchema;
use vecdb::{BytesVec, BytesVecValue, Database, ImportableVec};
@@ -87,11 +87,11 @@ impl<'a> ImportConfig<'a> {
)
}
pub(crate) fn import_percent_bps16(
pub(crate) fn import_percent_bps32(
&self,
suffix: &str,
offset: Version,
) -> Result<PercentFromHeight<BasisPointsSigned16>> {
) -> Result<PercentFromHeight<BasisPointsSigned32>> {
PercentFromHeight::forced_import(
self.db,
&self.name(suffix),
@@ -199,11 +199,24 @@ impl<'a> ImportConfig<'a> {
)
}
pub(crate) fn import_percent_rolling_bp16(
pub(crate) fn import_percent_bp32(
&self,
suffix: &str,
offset: Version,
) -> Result<PercentRollingWindows<BasisPoints16>> {
) -> Result<PercentFromHeight<BasisPoints32>> {
PercentFromHeight::forced_import(
self.db,
&self.name(suffix),
self.version + offset,
self.indexes,
)
}
pub(crate) fn import_percent_rolling_bp32(
&self,
suffix: &str,
offset: Version,
) -> Result<PercentRollingWindows<BasisPoints32>> {
PercentRollingWindows::forced_import(
self.db,
&self.name(suffix),
@@ -225,11 +238,11 @@ impl<'a> ImportConfig<'a> {
)
}
pub(crate) fn import_percent_emas_1w_1m_bp16(
pub(crate) fn import_percent_emas_1w_1m_bp32(
&self,
suffix: &str,
offset: Version,
) -> Result<PercentRollingEmas1w1m<BasisPoints16>> {
) -> Result<PercentRollingEmas1w1m<BasisPoints32>> {
PercentRollingEmas1w1m::forced_import(
self.db,
&self.name(suffix),

View File

@@ -1,7 +1,7 @@
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{
BasisPoints16, BasisPoints32, BasisPointsSigned16, Bitcoin, Cents, CentsSats, CentsSigned,
BasisPoints16, BasisPoints32, BasisPointsSigned32, Bitcoin, Cents, CentsSats, CentsSigned,
CentsSquaredSats, Dollars, Height, Indexes, Sats, StoredF32, StoredF64, Version,
};
use vecdb::{
@@ -16,9 +16,9 @@ use crate::{
CentsPlus, CentsUnsignedToDollars, ComputedFromHeight, ComputedFromHeightCumulative,
ComputedFromHeightRatio, FiatFromHeight, Identity, LazyFromHeight,
NegCentsUnsignedToDollars, PercentFromHeight, PercentRollingEmas1w1m,
PercentRollingWindows, Price, RatioCents64, RatioCentsBp16, RatioCentsSignedCentsBps16,
RatioCentsSignedDollarsBps16, RollingEmas1w1m, RollingEmas2w, RollingWindows,
ValueFromHeightCumulative,
PercentRollingWindows, Price, RatioCents64, RatioCentsBp16, RatioCentsBp32,
RatioCentsSignedCentsBps32, RatioCentsSignedDollarsBps32, RollingEmas1w1m, RollingEmas2w,
RollingWindows, ValueFromHeightCumulative,
},
prices,
};
@@ -55,7 +55,7 @@ pub struct RealizedBase<M: StorageMode = Rw> {
pub realized_profit_rel_to_realized_cap: PercentFromHeight<BasisPoints16, M>,
pub realized_loss_rel_to_realized_cap: PercentFromHeight<BasisPoints16, M>,
pub net_realized_pnl_rel_to_realized_cap: PercentFromHeight<BasisPointsSigned16, M>,
pub net_realized_pnl_rel_to_realized_cap: PercentFromHeight<BasisPointsSigned32, M>,
pub profit_value_created: ComputedFromHeight<Cents, M>,
pub profit_value_destroyed: ComputedFromHeight<Cents, M>,
@@ -75,15 +75,15 @@ pub struct RealizedBase<M: StorageMode = Rw> {
pub sopr_24h_ema: RollingEmas1w1m<StoredF64, M>,
pub gross_pnl_sum: RollingWindows<Cents, M>,
pub sell_side_risk_ratio: PercentRollingWindows<BasisPoints16, M>,
pub sell_side_risk_ratio_24h_ema: PercentRollingEmas1w1m<BasisPoints16, M>,
pub sell_side_risk_ratio: PercentRollingWindows<BasisPoints32, M>,
pub sell_side_risk_ratio_24h_ema: PercentRollingEmas1w1m<BasisPoints32, M>,
pub net_pnl_change_1m: ComputedFromHeight<CentsSigned, M>,
pub net_pnl_change_1m_rel_to_realized_cap: PercentFromHeight<BasisPointsSigned16, M>,
pub net_pnl_change_1m_rel_to_market_cap: PercentFromHeight<BasisPointsSigned16, M>,
pub net_pnl_change_1m_rel_to_realized_cap: PercentFromHeight<BasisPointsSigned32, M>,
pub net_pnl_change_1m_rel_to_market_cap: PercentFromHeight<BasisPointsSigned32, M>,
pub peak_regret: ComputedFromHeightCumulative<Cents, M>,
pub peak_regret_rel_to_realized_cap: PercentFromHeight<BasisPoints16, M>,
pub peak_regret_rel_to_realized_cap: PercentFromHeight<BasisPoints32, M>,
pub sent_in_profit: ValueFromHeightCumulative<M>,
pub sent_in_profit_ema: RollingEmas2w<M>,
@@ -126,7 +126,7 @@ impl RealizedBase {
let realized_loss_rel_to_realized_cap =
cfg.import_percent_bp16("realized_loss_rel_to_realized_cap", v1)?;
let net_realized_pnl_rel_to_realized_cap =
cfg.import_percent_bps16("net_realized_pnl_rel_to_realized_cap", v1)?;
cfg.import_percent_bps32("net_realized_pnl_rel_to_realized_cap", Version::new(2))?;
let realized_price = cfg.import_price("realized_price", v1)?;
let investor_price = cfg.import_price("investor_price", v0)?;
@@ -169,15 +169,15 @@ impl RealizedBase {
let value_destroyed_sum = cfg.import_rolling("value_destroyed", v1)?;
let gross_pnl_sum = cfg.import_rolling("gross_pnl_sum", v1)?;
let sopr = cfg.import_rolling("sopr", v1)?;
let sell_side_risk_ratio = cfg.import_percent_rolling_bp16("sell_side_risk_ratio", v1)?;
let sell_side_risk_ratio = cfg.import_percent_rolling_bp32("sell_side_risk_ratio", Version::new(2))?;
// EMAs
let sopr_24h_ema = cfg.import_emas_1w_1m("sopr_24h", v1)?;
let sell_side_risk_ratio_24h_ema =
cfg.import_percent_emas_1w_1m_bp16("sell_side_risk_ratio_24h", v1)?;
cfg.import_percent_emas_1w_1m_bp32("sell_side_risk_ratio_24h", Version::new(2))?;
let peak_regret_rel_to_realized_cap =
cfg.import_percent_bp16("realized_peak_regret_rel_to_realized_cap", v1)?;
cfg.import_percent_bp32("realized_peak_regret_rel_to_realized_cap", Version::new(2))?;
Ok(Self {
realized_cap_cents,
@@ -220,9 +220,9 @@ impl RealizedBase {
sell_side_risk_ratio_24h_ema,
net_pnl_change_1m: cfg.import_computed("net_pnl_change_1m", Version::new(3))?,
net_pnl_change_1m_rel_to_realized_cap: cfg
.import_percent_bps16("net_pnl_change_1m_rel_to_realized_cap", Version::new(3))?,
.import_percent_bps32("net_pnl_change_1m_rel_to_realized_cap", Version::new(4))?,
net_pnl_change_1m_rel_to_market_cap: cfg
.import_percent_bps16("net_pnl_change_1m_rel_to_market_cap", Version::new(3))?,
.import_percent_bps32("net_pnl_change_1m_rel_to_market_cap", Version::new(4))?,
peak_regret,
peak_regret_rel_to_realized_cap,
sent_in_profit: cfg.import_value_cumulative("sent_in_profit", v0)?,
@@ -585,7 +585,7 @@ impl RealizedBase {
.into_iter()
.zip(self.gross_pnl_sum.as_array())
{
ssrr.compute_binary::<Cents, Cents, RatioCentsBp16>(
ssrr.compute_binary::<Cents, Cents, RatioCentsBp32>(
starting_indexes.height,
&rv.height,
&self.realized_cap_cents.height,
@@ -663,14 +663,14 @@ impl RealizedBase {
exit,
)?;
self.net_realized_pnl_rel_to_realized_cap
.compute_binary::<CentsSigned, Cents, RatioCentsSignedCentsBps16>(
.compute_binary::<CentsSigned, Cents, RatioCentsSignedCentsBps32>(
starting_indexes.height,
&self.net_realized_pnl.height,
&self.realized_cap_cents.height,
exit,
)?;
self.peak_regret_rel_to_realized_cap
.compute_binary::<Cents, Cents, RatioCentsBp16>(
.compute_binary::<Cents, Cents, RatioCentsBp32>(
starting_indexes.height,
&self.peak_regret.height,
&self.realized_cap_cents.height,
@@ -686,7 +686,7 @@ impl RealizedBase {
)?;
self.net_pnl_change_1m_rel_to_realized_cap
.compute_binary::<CentsSigned, Cents, RatioCentsSignedCentsBps16>(
.compute_binary::<CentsSigned, Cents, RatioCentsSignedCentsBps32>(
starting_indexes.height,
&self.net_pnl_change_1m.height,
&self.realized_cap_cents.height,
@@ -694,7 +694,7 @@ impl RealizedBase {
)?;
self.net_pnl_change_1m_rel_to_market_cap
.compute_binary::<CentsSigned, Dollars, RatioCentsSignedDollarsBps16>(
.compute_binary::<CentsSigned, Dollars, RatioCentsSignedDollarsBps32>(
starting_indexes.height,
&self.net_pnl_change_1m.height,
height_to_market_cap,

View File

@@ -1,12 +1,12 @@
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{BasisPoints16, Cents, Dollars, Height, Indexes, StoredF64, Version};
use brk_types::{BasisPoints32, Cents, Dollars, Height, Indexes, StoredF64, Version};
use vecdb::{Exit, ReadableVec, Rw, StorageMode};
use crate::{
blocks,
internal::{
ComputedFromHeightRatioExtension, PercentFromHeight, RatioCents64, RatioDollarsBp16,
ComputedFromHeightRatioExtension, PercentFromHeight, RatioCents64, RatioDollarsBp32,
RollingWindows,
},
};
@@ -17,7 +17,7 @@ use super::RealizedBase;
#[derive(Traversable)]
pub struct RealizedExtended<M: StorageMode = Rw> {
pub realized_cap_rel_to_own_market_cap: PercentFromHeight<BasisPoints16, M>,
pub realized_cap_rel_to_own_market_cap: PercentFromHeight<BasisPoints32, M>,
pub realized_profit_sum: RollingWindows<Cents, M>,
pub realized_loss_sum: RollingWindows<Cents, M>,
@@ -32,7 +32,7 @@ impl RealizedExtended {
pub(crate) fn forced_import(cfg: &ImportConfig) -> Result<Self> {
Ok(RealizedExtended {
realized_cap_rel_to_own_market_cap: cfg
.import_percent_bp16("realized_cap_rel_to_own_market_cap", Version::ZERO)?,
.import_percent_bp32("realized_cap_rel_to_own_market_cap", Version::ONE)?,
realized_profit_sum: cfg.import_rolling("realized_profit", Version::ONE)?,
realized_loss_sum: cfg.import_rolling("realized_loss", Version::ONE)?,
realized_profit_to_loss_ratio: cfg
@@ -78,7 +78,7 @@ impl RealizedExtended {
// Realized cap relative to own market cap
self.realized_cap_rel_to_own_market_cap
.compute_binary::<Dollars, Dollars, RatioDollarsBp16>(
.compute_binary::<Dollars, Dollars, RatioDollarsBp32>(
starting_indexes.height,
&base.realized_cap.height,
height_to_market_cap,

View File

@@ -1,11 +1,11 @@
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{BasisPoints16, BasisPointsSigned16, Dollars, Height, Sats, StoredF32, Version};
use brk_types::{BasisPoints16, BasisPointsSigned32, Dollars, Height, Sats, StoredF32, Version};
use vecdb::{Exit, ReadableCloneableVec, ReadableVec, Rw, StorageMode};
use crate::internal::{
Bps16ToFloat, LazyFromHeight, NegRatioDollarsBps16, PercentFromHeight, RatioDollarsBp16,
RatioDollarsBps16, RatioSatsBp16,
Bps32ToFloat, LazyFromHeight, NegRatioDollarsBps32, PercentFromHeight, RatioDollarsBp16,
RatioDollarsBps32, RatioSatsBp16,
};
use crate::distribution::metrics::{ImportConfig, RealizedBase, UnrealizedBase};
@@ -17,9 +17,9 @@ pub struct RelativeBase<M: StorageMode = Rw> {
pub unrealized_profit_rel_to_market_cap: PercentFromHeight<BasisPoints16, M>,
pub unrealized_loss_rel_to_market_cap: PercentFromHeight<BasisPoints16, M>,
pub neg_unrealized_loss_rel_to_market_cap: PercentFromHeight<BasisPointsSigned16, M>,
pub net_unrealized_pnl_rel_to_market_cap: PercentFromHeight<BasisPointsSigned16, M>,
pub nupl: LazyFromHeight<StoredF32, BasisPointsSigned16>,
pub neg_unrealized_loss_rel_to_market_cap: PercentFromHeight<BasisPointsSigned32, M>,
pub net_unrealized_pnl_rel_to_market_cap: PercentFromHeight<BasisPointsSigned32, M>,
pub nupl: LazyFromHeight<StoredF32, BasisPointsSigned32>,
pub invested_capital_in_profit_rel_to_realized_cap: PercentFromHeight<BasisPoints16, M>,
pub invested_capital_in_loss_rel_to_realized_cap: PercentFromHeight<BasisPoints16, M>,
@@ -31,11 +31,11 @@ impl RelativeBase {
let v2 = Version::new(2);
let net_unrealized_pnl_rel_to_market_cap =
cfg.import_percent_bps16("net_unrealized_pnl_rel_to_market_cap", v2)?;
cfg.import_percent_bps32("net_unrealized_pnl_rel_to_market_cap", Version::new(3))?;
let nupl = LazyFromHeight::from_computed::<Bps16ToFloat>(
let nupl = LazyFromHeight::from_computed::<Bps32ToFloat>(
&cfg.name("nupl"),
cfg.version + v2,
cfg.version + Version::new(3),
net_unrealized_pnl_rel_to_market_cap
.bps
.height
@@ -53,7 +53,7 @@ impl RelativeBase {
unrealized_loss_rel_to_market_cap: cfg
.import_percent_bp16("unrealized_loss_rel_to_market_cap", v2)?,
neg_unrealized_loss_rel_to_market_cap: cfg
.import_percent_bps16("neg_unrealized_loss_rel_to_market_cap", v2)?,
.import_percent_bps32("neg_unrealized_loss_rel_to_market_cap", Version::new(3))?,
net_unrealized_pnl_rel_to_market_cap,
nupl,
invested_capital_in_profit_rel_to_realized_cap: cfg.import_percent_bp16(
@@ -105,14 +105,14 @@ impl RelativeBase {
exit,
)?;
self.neg_unrealized_loss_rel_to_market_cap
.compute_binary::<Dollars, Dollars, NegRatioDollarsBps16>(
.compute_binary::<Dollars, Dollars, NegRatioDollarsBps32>(
max_from,
&unrealized.unrealized_loss.usd.height,
market_cap,
exit,
)?;
self.net_unrealized_pnl_rel_to_market_cap
.compute_binary::<Dollars, Dollars, RatioDollarsBps16>(
.compute_binary::<Dollars, Dollars, RatioDollarsBps32>(
max_from,
&unrealized.net_unrealized_pnl.usd.height,
market_cap,

View File

@@ -1,10 +1,10 @@
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{BasisPoints16, BasisPointsSigned16, Dollars, Height, Version};
use brk_types::{BasisPoints16, BasisPoints32, BasisPointsSigned32, Dollars, Height, Version};
use vecdb::{Exit, ReadableVec, Rw, StorageMode};
use crate::internal::{
NegRatioDollarsBps16, PercentFromHeight, RatioDollarsBp16, RatioDollarsBps16,
NegRatioDollarsBps32, PercentFromHeight, RatioDollarsBp16, RatioDollarsBp32, RatioDollarsBps32,
};
use crate::distribution::metrics::{ImportConfig, UnrealizedBase};
@@ -13,9 +13,9 @@ use crate::distribution::metrics::{ImportConfig, UnrealizedBase};
#[derive(Traversable)]
pub struct RelativeExtendedOwnMarketCap<M: StorageMode = Rw> {
pub unrealized_profit_rel_to_own_market_cap: PercentFromHeight<BasisPoints16, M>,
pub unrealized_loss_rel_to_own_market_cap: PercentFromHeight<BasisPoints16, M>,
pub neg_unrealized_loss_rel_to_own_market_cap: PercentFromHeight<BasisPointsSigned16, M>,
pub net_unrealized_pnl_rel_to_own_market_cap: PercentFromHeight<BasisPointsSigned16, M>,
pub unrealized_loss_rel_to_own_market_cap: PercentFromHeight<BasisPoints32, M>,
pub neg_unrealized_loss_rel_to_own_market_cap: PercentFromHeight<BasisPointsSigned32, M>,
pub net_unrealized_pnl_rel_to_own_market_cap: PercentFromHeight<BasisPointsSigned32, M>,
}
impl RelativeExtendedOwnMarketCap {
@@ -26,11 +26,11 @@ impl RelativeExtendedOwnMarketCap {
unrealized_profit_rel_to_own_market_cap: cfg
.import_percent_bp16("unrealized_profit_rel_to_own_market_cap", v2)?,
unrealized_loss_rel_to_own_market_cap: cfg
.import_percent_bp16("unrealized_loss_rel_to_own_market_cap", v2)?,
.import_percent_bp32("unrealized_loss_rel_to_own_market_cap", Version::new(3))?,
neg_unrealized_loss_rel_to_own_market_cap: cfg
.import_percent_bps16("neg_unrealized_loss_rel_to_own_market_cap", v2)?,
.import_percent_bps32("neg_unrealized_loss_rel_to_own_market_cap", Version::new(3))?,
net_unrealized_pnl_rel_to_own_market_cap: cfg
.import_percent_bps16("net_unrealized_pnl_rel_to_own_market_cap", v2)?,
.import_percent_bps32("net_unrealized_pnl_rel_to_own_market_cap", Version::new(3))?,
})
}
@@ -49,21 +49,21 @@ impl RelativeExtendedOwnMarketCap {
exit,
)?;
self.unrealized_loss_rel_to_own_market_cap
.compute_binary::<Dollars, Dollars, RatioDollarsBp16>(
.compute_binary::<Dollars, Dollars, RatioDollarsBp32>(
max_from,
&unrealized.unrealized_loss.usd.height,
own_market_cap,
exit,
)?;
self.neg_unrealized_loss_rel_to_own_market_cap
.compute_binary::<Dollars, Dollars, NegRatioDollarsBps16>(
.compute_binary::<Dollars, Dollars, NegRatioDollarsBps32>(
max_from,
&unrealized.unrealized_loss.usd.height,
own_market_cap,
exit,
)?;
self.net_unrealized_pnl_rel_to_own_market_cap
.compute_binary::<Dollars, Dollars, RatioDollarsBps16>(
.compute_binary::<Dollars, Dollars, RatioDollarsBps32>(
max_from,
&unrealized.net_unrealized_pnl.usd.height,
own_market_cap,

View File

@@ -1,10 +1,10 @@
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{BasisPoints16, BasisPointsSigned16, Dollars, Height, Version};
use brk_types::{BasisPoints16, BasisPointsSigned32, Dollars, Height, Version};
use vecdb::{Exit, Rw, StorageMode};
use crate::internal::{
NegRatioDollarsBps16, PercentFromHeight, RatioDollarsBp16, RatioDollarsBps16,
NegRatioDollarsBps32, PercentFromHeight, RatioDollarsBp16, RatioDollarsBps32,
};
use crate::distribution::metrics::{ImportConfig, UnrealizedBase};
@@ -14,14 +14,13 @@ use crate::distribution::metrics::{ImportConfig, UnrealizedBase};
pub struct RelativeExtendedOwnPnl<M: StorageMode = Rw> {
pub unrealized_profit_rel_to_own_gross_pnl: PercentFromHeight<BasisPoints16, M>,
pub unrealized_loss_rel_to_own_gross_pnl: PercentFromHeight<BasisPoints16, M>,
pub neg_unrealized_loss_rel_to_own_gross_pnl: PercentFromHeight<BasisPointsSigned16, M>,
pub net_unrealized_pnl_rel_to_own_gross_pnl: PercentFromHeight<BasisPointsSigned16, M>,
pub neg_unrealized_loss_rel_to_own_gross_pnl: PercentFromHeight<BasisPointsSigned32, M>,
pub net_unrealized_pnl_rel_to_own_gross_pnl: PercentFromHeight<BasisPointsSigned32, M>,
}
impl RelativeExtendedOwnPnl {
pub(crate) fn forced_import(cfg: &ImportConfig) -> Result<Self> {
let v1 = Version::ONE;
let v2 = Version::new(2);
Ok(Self {
unrealized_profit_rel_to_own_gross_pnl: cfg
@@ -29,9 +28,9 @@ impl RelativeExtendedOwnPnl {
unrealized_loss_rel_to_own_gross_pnl: cfg
.import_percent_bp16("unrealized_loss_rel_to_own_gross_pnl", v1)?,
neg_unrealized_loss_rel_to_own_gross_pnl: cfg
.import_percent_bps16("neg_unrealized_loss_rel_to_own_gross_pnl", v1)?,
.import_percent_bps32("neg_unrealized_loss_rel_to_own_gross_pnl", Version::new(2))?,
net_unrealized_pnl_rel_to_own_gross_pnl: cfg
.import_percent_bps16("net_unrealized_pnl_rel_to_own_gross_pnl", v2)?,
.import_percent_bps32("net_unrealized_pnl_rel_to_own_gross_pnl", Version::new(3))?,
})
}
@@ -56,14 +55,14 @@ impl RelativeExtendedOwnPnl {
exit,
)?;
self.neg_unrealized_loss_rel_to_own_gross_pnl
.compute_binary::<Dollars, Dollars, NegRatioDollarsBps16>(
.compute_binary::<Dollars, Dollars, NegRatioDollarsBps32>(
max_from,
&unrealized.unrealized_loss.usd.height,
&unrealized.gross_pnl.usd.height,
exit,
)?;
self.net_unrealized_pnl_rel_to_own_gross_pnl
.compute_binary::<Dollars, Dollars, RatioDollarsBps16>(
.compute_binary::<Dollars, Dollars, RatioDollarsBps32>(
max_from,
&unrealized.net_unrealized_pnl.usd.height,
&unrealized.gross_pnl.usd.height,

View File

@@ -1,5 +1,5 @@
use std::{
collections::BTreeMap,
collections::{btree_map::Entry, BTreeMap},
fs,
path::{Path, PathBuf},
};
@@ -9,7 +9,7 @@ use brk_types::{
Cents, CentsCompact, CentsSats, CentsSquaredSats, CostBasisDistribution, Height, Sats,
};
use rustc_hash::FxHashMap;
use vecdb::Bytes;
use vecdb::{Bytes, unlikely};
use super::{CachedUnrealizedState, Percentiles, UnrealizedState};
@@ -187,24 +187,46 @@ impl CostBasisData {
self.percentiles_dirty = true;
let map = &mut self.state.as_mut().unwrap().base.map;
for (cents, (inc, dec)) in self.pending.drain() {
let entry = map.entry(cents).or_default();
*entry += inc;
if *entry < dec {
panic!(
"CostBasisData::apply_pending underflow!\n\
Path: {:?}\n\
Price: {}\n\
Current + increments: {}\n\
Trying to decrement by: {}",
self.pathbuf,
cents.to_dollars(),
entry,
dec
);
}
*entry -= dec;
if *entry == Sats::ZERO {
map.remove(&cents);
match map.entry(cents) {
Entry::Occupied(mut e) => {
*e.get_mut() += inc;
if unlikely(*e.get() < dec) {
panic!(
"CostBasisData::apply_pending underflow!\n\
Path: {:?}\n\
Price: {}\n\
Current + increments: {}\n\
Trying to decrement by: {}",
self.pathbuf,
cents.to_dollars(),
e.get(),
dec
);
}
*e.get_mut() -= dec;
if *e.get() == Sats::ZERO {
e.remove();
}
}
Entry::Vacant(e) => {
if unlikely(inc < dec) {
panic!(
"CostBasisData::apply_pending underflow (new entry)!\n\
Path: {:?}\n\
Price: {}\n\
Increment: {}\n\
Trying to decrement by: {}",
self.pathbuf,
cents.to_dollars(),
inc,
dec
);
}
let val = inc - dec;
if val != Sats::ZERO {
e.insert(val);
}
}
}
}
@@ -213,7 +235,7 @@ impl CostBasisData {
state.cap_raw += self.pending_raw.cap_inc;
// Check for underflow before subtracting
if state.cap_raw.inner() < self.pending_raw.cap_dec.inner() {
if unlikely(state.cap_raw.inner() < self.pending_raw.cap_dec.inner()) {
panic!(
"CostBasisData::apply_pending cap_raw underflow!\n\
Path: {:?}\n\
@@ -231,7 +253,7 @@ impl CostBasisData {
if has_investor_cap {
state.investor_cap_raw += self.pending_raw.investor_cap_inc;
if state.investor_cap_raw.inner() < self.pending_raw.investor_cap_dec.inner() {
if unlikely(state.investor_cap_raw.inner() < self.pending_raw.investor_cap_dec.inner()) {
panic!(
"CostBasisData::apply_pending investor_cap_raw underflow!\n\
Path: {:?}\n\

View File

@@ -0,0 +1,197 @@
use brk_types::StoredF32;
/// Fast expanding percentile tracker using a Fenwick tree (Binary Indexed Tree).
///
/// Values are discretized to BasisPoints32 precision (×10000) and tracked in
/// a fixed-size frequency array with Fenwick prefix sums. This gives:
/// - O(log N) insert (N = tree size, ~18 ops for 200k buckets)
/// - O(log N) percentile query via prefix-sum walk
/// - Exact at BasisPoints32 resolution (no approximation)
#[derive(Clone)]
pub(crate) struct ExpandingPercentiles {
/// Fenwick tree storing cumulative frequency counts.
/// Index 0 is unused (1-indexed). tree[i] covers bucket (i - 1 + offset).
tree: Vec<u64>,
count: u64,
/// Offset so bucket 0 in the tree corresponds to BPS value `offset`.
offset: i32,
size: usize,
}
/// Max BPS value supported. Ratio of 42.0 = 420,000 BPS.
const MAX_BPS: i32 = 430_000;
/// Min BPS value supported (0 = ratio of 0.0).
const MIN_BPS: i32 = 0;
const TREE_SIZE: usize = (MAX_BPS - MIN_BPS) as usize + 1;
impl Default for ExpandingPercentiles {
fn default() -> Self {
Self {
tree: vec![0u64; TREE_SIZE + 1], // 1-indexed
count: 0,
offset: MIN_BPS,
size: TREE_SIZE,
}
}
}
impl ExpandingPercentiles {
pub fn count(&self) -> u64 {
self.count
}
pub fn reset(&mut self) {
self.tree.iter_mut().for_each(|v| *v = 0);
self.count = 0;
}
/// Convert f32 ratio to bucket index (1-indexed for Fenwick).
#[inline]
fn to_bucket(&self, value: f32) -> usize {
let bps = (value as f64 * 10000.0).round() as i32;
let clamped = bps.clamp(self.offset, self.offset + self.size as i32 - 1);
(clamped - self.offset) as usize + 1 // 1-indexed
}
/// Bulk-load values in O(n + N) instead of O(n log N).
/// Builds raw frequency counts, then converts to Fenwick in-place.
pub fn add_bulk(&mut self, values: &[StoredF32]) {
// Build raw frequency counts into tree (treated as flat array)
for &v in values {
let v = *v;
if v.is_nan() {
continue;
}
self.count += 1;
let bucket = self.to_bucket(v);
self.tree[bucket] += 1;
}
// Convert flat frequencies to Fenwick tree in O(N)
for i in 1..=self.size {
let parent = i + (i & i.wrapping_neg());
if parent <= self.size {
let val = self.tree[i];
self.tree[parent] += val;
}
}
}
/// Add a value. O(log N).
#[inline]
pub fn add(&mut self, value: f32) {
if value.is_nan() {
return;
}
self.count += 1;
let mut i = self.to_bucket(value);
while i <= self.size {
self.tree[i] += 1;
i += i & i.wrapping_neg(); // i += lowbit(i)
}
}
/// Find the bucket containing the k-th element (1-indexed k).
/// Uses the standard Fenwick tree walk-down in O(log N).
#[inline]
fn kth(&self, mut k: u64) -> usize {
let mut pos = 0;
let mut bit = 1 << (usize::BITS - 1 - self.size.leading_zeros()); // highest power of 2 <= size
while bit > 0 {
let next = pos + bit;
if next <= self.size && self.tree[next] < k {
k -= self.tree[next];
pos = next;
}
bit >>= 1;
}
pos + 1 // 1-indexed bucket
}
/// Convert bucket index back to BPS u32 value.
#[inline]
fn bucket_to_bps(&self, bucket: usize) -> u32 {
(bucket as i32 - 1 + self.offset) as u32
}
/// Compute 6 percentiles in one call. O(6 × log N).
/// Quantiles q must be in (0, 1).
pub fn quantiles(&self, qs: &[f64; 6], out: &mut [u32; 6]) {
if self.count == 0 {
out.iter_mut().for_each(|o| *o = 0);
return;
}
for (i, &q) in qs.iter().enumerate() {
// k = ceil(q * count), clamped to [1, count]
let k = ((q * self.count as f64).ceil() as u64).clamp(1, self.count);
out[i] = self.bucket_to_bps(self.kth(k));
}
}
}
#[cfg(test)]
mod tests {
use super::*;
fn quantile(ep: &ExpandingPercentiles, q: f64) -> u32 {
let mut out = [0u32; 6];
ep.quantiles(&[q, q, q, q, q, q], &mut out);
out[0]
}
#[test]
fn basic_quantiles() {
let mut ep = ExpandingPercentiles::default();
// Add ratios 0.01 to 1.0 (BPS 100 to 10000)
for i in 1..=1000 {
ep.add(i as f32 / 1000.0);
}
assert_eq!(ep.count(), 1000);
let median = quantile(&ep, 0.5);
// 0.5 ratio = 5000 BPS, median of 1..1000 ratios ≈ 500/1000 = 0.5 = 5000 BPS
assert!(
(median as i32 - 5000).abs() < 100,
"median was {median}"
);
let p99 = quantile(&ep, 0.99);
assert!(
(p99 as i32 - 9900).abs() < 100,
"p99 was {p99}"
);
let p01 = quantile(&ep, 0.01);
assert!(
(p01 as i32 - 100).abs() < 100,
"p01 was {p01}"
);
}
#[test]
fn empty() {
let ep = ExpandingPercentiles::default();
assert_eq!(ep.count(), 0);
assert_eq!(quantile(&ep, 0.5), 0);
}
#[test]
fn single_value() {
let mut ep = ExpandingPercentiles::default();
ep.add(0.42); // 4200 BPS
assert_eq!(quantile(&ep, 0.0001), 4200);
assert_eq!(quantile(&ep, 0.5), 4200);
assert_eq!(quantile(&ep, 0.9999), 4200);
}
#[test]
fn reset_works() {
let mut ep = ExpandingPercentiles::default();
for i in 0..100 {
ep.add(i as f32 / 100.0);
}
assert_eq!(ep.count(), 100);
ep.reset();
assert_eq!(ep.count(), 0);
assert_eq!(quantile(&ep, 0.5), 0);
}
}

View File

@@ -3,10 +3,10 @@ mod drawdown;
mod sliding_distribution;
mod sliding_median;
pub(crate) mod sliding_window;
mod tdigest;
mod expanding_percentiles;
pub(crate) use aggregation::*;
pub(crate) use drawdown::*;
pub(crate) use sliding_distribution::*;
pub(crate) use sliding_median::*;
pub(crate) use tdigest::*;
pub(crate) use expanding_percentiles::*;

View File

@@ -112,11 +112,13 @@ where
average_out.checked_push_at(i, T::from(window.average()))?;
min_out.checked_push_at(i, T::from(window.min()))?;
max_out.checked_push_at(i, T::from(window.max()))?;
p10_out.checked_push_at(i, T::from(window.percentile(0.10)))?;
p25_out.checked_push_at(i, T::from(window.percentile(0.25)))?;
median_out.checked_push_at(i, T::from(window.percentile(0.50)))?;
p75_out.checked_push_at(i, T::from(window.percentile(0.75)))?;
p90_out.checked_push_at(i, T::from(window.percentile(0.90)))?;
let [p10, p25, p50, p75, p90] =
window.percentiles(&[0.10, 0.25, 0.50, 0.75, 0.90]);
p10_out.checked_push_at(i, T::from(p10))?;
p25_out.checked_push_at(i, T::from(p25))?;
median_out.checked_push_at(i, T::from(p50))?;
p75_out.checked_push_at(i, T::from(p75))?;
p90_out.checked_push_at(i, T::from(p90))?;
}
if average_out.batch_limit_reached() {

View File

@@ -202,4 +202,70 @@ impl SlidingWindowSorted {
self.sorted.kth(lo) * (1.0 - frac) + self.sorted.kth(hi) * frac
}
}
/// Extract multiple percentiles in a single pass through the sorted blocks.
/// Percentiles must be sorted ascending. Returns interpolated values.
pub fn percentiles<const N: usize>(&self, ps: &[f64; N]) -> [f64; N] {
let len = self.sorted.len();
if len == 0 {
return [0.0; N];
}
if len == 1 {
return [self.sorted.kth(0); N];
}
// Collect all unique ranks needed (lo and hi for each percentile)
let last = (len - 1) as f64;
let mut rank_set: [usize; 10] = [0; 10];
let mut rank_count = 0;
let mut lo_hi: [(usize, usize, f64); N] = [(0, 0, 0.0); N];
for (i, &p) in ps.iter().enumerate() {
let rank = p * last;
let lo = rank.floor() as usize;
let hi = rank.ceil() as usize;
let frac = rank - lo as f64;
lo_hi[i] = (lo, hi, frac);
// Insert unique ranks in sorted order (they're already ~sorted since ps is sorted)
if rank_count == 0 || rank_set[rank_count - 1] != lo {
rank_set[rank_count] = lo;
rank_count += 1;
}
if hi != lo && (rank_count == 0 || rank_set[rank_count - 1] != hi) {
rank_set[rank_count] = hi;
rank_count += 1;
}
}
// Single pass through blocks to get all values
let ranks = &rank_set[..rank_count];
let mut values = [0.0f64; 10];
let mut ri = 0;
let mut cumulative = 0;
for block in &self.sorted.blocks {
while ri < rank_count && ranks[ri] - cumulative < block.len() {
values[ri] = block[ranks[ri] - cumulative];
ri += 1;
}
cumulative += block.len();
if ri >= rank_count {
break;
}
}
// Interpolate results
let mut out = [0.0; N];
for (i, &(lo, hi, frac)) in lo_hi.iter().enumerate() {
if lo == hi {
let ri = ranks.partition_point(|&r| r < lo);
out[i] = values[ri];
} else {
let lo_ri = ranks.partition_point(|&r| r < lo);
let hi_ri = ranks.partition_point(|&r| r < hi);
out[i] = values[lo_ri] * (1.0 - frac) + values[hi_ri] * frac;
}
}
out
}
}

View File

@@ -1,288 +0,0 @@
/// Streaming t-digest for approximate quantile estimation.
///
/// Uses the merging algorithm with scale function k₂: `q * (1 - q)`.
/// Compression parameter δ controls accuracy vs memory (default 100 → ~200 centroids max).
#[derive(Clone)]
pub(crate) struct TDigest {
centroids: Vec<Centroid>,
count: u64,
min: f64,
max: f64,
compression: f64,
}
#[derive(Clone, Copy)]
struct Centroid {
mean: f64,
weight: f64,
}
impl Default for TDigest {
fn default() -> Self {
Self::new(100.0)
}
}
impl TDigest {
pub fn new(compression: f64) -> Self {
Self {
centroids: Vec::new(),
count: 0,
min: f64::INFINITY,
max: f64::NEG_INFINITY,
compression,
}
}
pub fn count(&self) -> u64 {
self.count
}
pub fn reset(&mut self) {
self.centroids.clear();
self.count = 0;
self.min = f64::INFINITY;
self.max = f64::NEG_INFINITY;
}
pub fn add(&mut self, value: f64) {
if value.is_nan() {
return;
}
self.count += 1;
if value < self.min {
self.min = value;
}
if value > self.max {
self.max = value;
}
if self.centroids.is_empty() {
self.centroids.push(Centroid {
mean: value,
weight: 1.0,
});
return;
}
// Single binary search: unclamped position doubles as insert point
let search = self.centroids.binary_search_by(|c| {
c.mean
.partial_cmp(&value)
.unwrap_or(std::cmp::Ordering::Equal)
});
let insert_pos = match search {
Ok(i) | Err(i) => i,
};
// Find nearest centroid from insert_pos
let nearest = if insert_pos >= self.centroids.len() {
self.centroids.len() - 1
} else if insert_pos == 0 {
0
} else if (value - self.centroids[insert_pos - 1].mean).abs()
< (value - self.centroids[insert_pos].mean).abs()
{
insert_pos - 1
} else {
insert_pos
};
// Compute quantile of nearest centroid
let cum_weight: f64 = self.centroids[..nearest]
.iter()
.map(|c| c.weight)
.sum::<f64>()
+ self.centroids[nearest].weight / 2.0;
let q = cum_weight / self.count as f64;
let limit = (4.0 * self.compression * q * (1.0 - q)).floor().max(1.0);
if self.centroids[nearest].weight + 1.0 <= limit {
// Merge into nearest centroid
let c = &mut self.centroids[nearest];
c.mean = (c.mean * c.weight + value) / (c.weight + 1.0);
c.weight += 1.0;
} else {
// Insert new centroid at sorted position (reuse insert_pos)
self.centroids.insert(
insert_pos,
Centroid {
mean: value,
weight: 1.0,
},
);
}
// Compress if too many centroids
let max_centroids = (2.0 * self.compression) as usize;
if self.centroids.len() > max_centroids {
self.compress();
}
}
fn compress(&mut self) {
if self.centroids.len() <= 1 {
return;
}
let total = self.count as f64;
let mut cum = 0.0;
let mut write_idx = 0;
for read_idx in 1..self.centroids.len() {
let c = self.centroids[read_idx];
let last = &mut self.centroids[write_idx];
let q = (cum + last.weight / 2.0) / total;
let limit = (4.0 * self.compression * q * (1.0 - q)).floor().max(1.0);
if last.weight + c.weight <= limit {
let new_weight = last.weight + c.weight;
last.mean = (last.mean * last.weight + c.mean * c.weight) / new_weight;
last.weight = new_weight;
} else {
cum += last.weight;
write_idx += 1;
self.centroids[write_idx] = c;
}
}
self.centroids.truncate(write_idx + 1);
}
/// Batch quantile query in a single pass. `qs` must be sorted ascending.
pub fn quantiles(&self, qs: &[f64], out: &mut [f64]) {
if self.centroids.is_empty() {
out.iter_mut().for_each(|o| *o = 0.0);
return;
}
if self.centroids.len() == 1 {
let mean = self.centroids[0].mean;
for (i, &q) in qs.iter().enumerate() {
out[i] = if q <= 0.0 {
self.min
} else if q >= 1.0 {
self.max
} else {
mean
};
}
return;
}
let total = self.count as f64;
let mut cum = 0.0;
let mut ci = 0;
for (qi, &q) in qs.iter().enumerate() {
if q <= 0.0 {
out[qi] = self.min;
continue;
}
if q >= 1.0 {
out[qi] = self.max;
continue;
}
let target = q * total;
// Advance centroids until the current centroid's midpoint exceeds target
while ci < self.centroids.len() {
let mid = cum + self.centroids[ci].weight / 2.0;
if target < mid {
break;
}
cum += self.centroids[ci].weight;
ci += 1;
}
if ci >= self.centroids.len() {
// Past all centroids — interpolate between last centroid and max
let last = self.centroids.last().unwrap();
let last_mid = total - last.weight / 2.0;
let remaining = total - last_mid;
out[qi] = if remaining == 0.0 {
self.max
} else {
last.mean + (self.max - last.mean) * ((target - last_mid) / remaining)
};
} else if ci == 0 {
// Before first centroid — interpolate between min and first centroid
let c = &self.centroids[0];
let first_mid = c.weight / 2.0;
out[qi] = if first_mid == 0.0 {
self.min
} else {
self.min + (c.mean - self.min) * (target / first_mid)
};
} else {
// Between centroid ci-1 and ci
let c = &self.centroids[ci];
let prev = &self.centroids[ci - 1];
let mid = cum + c.weight / 2.0;
let prev_center = cum - prev.weight / 2.0;
let frac = if mid == prev_center {
0.5
} else {
(target - prev_center) / (mid - prev_center)
};
out[qi] = prev.mean + (c.mean - prev.mean) * frac;
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
fn quantile(td: &TDigest, q: f64) -> f64 {
let mut out = [0.0];
td.quantiles(&[q], &mut out);
out[0]
}
#[test]
fn basic_quantiles() {
let mut td = TDigest::default();
for i in 1..=1000 {
td.add(i as f64);
}
assert_eq!(td.count(), 1000);
let median = quantile(&td, 0.5);
assert!((median - 500.0).abs() < 10.0, "median was {median}");
let p99 = quantile(&td, 0.99);
assert!((p99 - 990.0).abs() < 15.0, "p99 was {p99}");
let p01 = quantile(&td, 0.01);
assert!((p01 - 10.0).abs() < 15.0, "p01 was {p01}");
}
#[test]
fn empty_digest() {
let td = TDigest::default();
assert_eq!(td.count(), 0);
assert_eq!(quantile(&td, 0.5), 0.0);
}
#[test]
fn single_value() {
let mut td = TDigest::default();
td.add(42.0);
assert_eq!(quantile(&td, 0.0), 42.0);
assert_eq!(quantile(&td, 0.5), 42.0);
assert_eq!(quantile(&td, 1.0), 42.0);
}
#[test]
fn reset_works() {
let mut td = TDigest::default();
for i in 0..100 {
td.add(i as f64);
}
assert_eq!(td.count(), 100);
td.reset();
assert_eq!(td.count(), 0);
assert_eq!(quantile(&td, 0.5), 0.0);
}
}

View File

@@ -8,7 +8,7 @@ use vecdb::{
use crate::{
blocks, indexes,
internal::{ComputedFromHeightStdDevExtended, Price, PriceTimesRatioBp32Cents, TDigest},
internal::{ComputedFromHeightStdDevExtended, ExpandingPercentiles, Price, PriceTimesRatioBp32Cents},
};
use super::{super::ComputedFromHeight, ComputedFromHeightRatio};
@@ -36,7 +36,7 @@ pub struct ComputedFromHeightRatioExtension<M: StorageMode = Rw> {
pub ratio_sd_1y: ComputedFromHeightStdDevExtended<M>,
#[traversable(skip)]
tdigest: TDigest,
expanding_pct: ExpandingPercentiles,
}
const VERSION: Version = Version::new(4);
@@ -99,7 +99,7 @@ impl ComputedFromHeightRatioExtension {
ratio_pct5_price: import_price!("ratio_pct5"),
ratio_pct2_price: import_price!("ratio_pct2"),
ratio_pct1_price: import_price!("ratio_pct1"),
tdigest: TDigest::default(),
expanding_pct: ExpandingPercentiles::default(),
})
}
@@ -142,14 +142,12 @@ impl ComputedFromHeightRatioExtension {
let ratio_len = ratio_source.len();
if ratio_len > start {
let tdigest_count = self.tdigest.count() as usize;
if tdigest_count != start {
self.tdigest.reset();
let pct_count = self.expanding_pct.count() as usize;
if pct_count != start {
self.expanding_pct.reset();
if start > 0 {
let historical = ratio_source.collect_range_at(0, start);
for &v in &historical {
self.tdigest.add(*v as f64);
}
self.expanding_pct.add_bulk(&historical);
}
}
@@ -164,11 +162,11 @@ impl ComputedFromHeightRatioExtension {
&mut self.ratio_pct99.bps.height,
];
const PCTS: [f64; 6] = [0.01, 0.02, 0.05, 0.95, 0.98, 0.99];
let mut out = [0.0f64; 6];
let mut out = [0u32; 6];
for (offset, &ratio) in new_ratios.iter().enumerate() {
self.tdigest.add(*ratio as f64);
self.tdigest.quantiles(&PCTS, &mut out);
self.expanding_pct.add(*ratio);
self.expanding_pct.quantiles(&PCTS, &mut out);
let idx = start + offset;
for (vec, &val) in pct_vecs.iter_mut().zip(out.iter()) {
vec.truncate_push_at(idx, BasisPoints32::from(val))?;

View File

@@ -1,12 +1,13 @@
use std::ops::{Add, AddAssign, Div};
use brk_types::{BasisPoints16, BasisPointsSigned16, BasisPointsSigned32, StoredF32};
use brk_types::{BasisPoints16, BasisPoints32, BasisPointsSigned16, BasisPointsSigned32, StoredF32};
use schemars::JsonSchema;
use serde::Serialize;
use vecdb::{Formattable, PcoVecValue, UnaryTransform};
use crate::internal::{
Bp16ToFloat, Bp16ToPercent, Bps16ToFloat, Bps16ToPercent, Bps32ToFloat, Bps32ToPercent,
Bp16ToFloat, Bp16ToPercent, Bp32ToFloat, Bp32ToPercent, Bps16ToFloat, Bps16ToPercent,
Bps32ToFloat, Bps32ToPercent,
};
pub trait ComputedVecValue
@@ -48,6 +49,11 @@ impl BpsType for BasisPoints16 {
type ToPercent = Bp16ToPercent;
}
impl BpsType for BasisPoints32 {
type ToRatio = Bp32ToFloat;
type ToPercent = Bp32ToPercent;
}
impl BpsType for BasisPointsSigned16 {
type ToRatio = Bps16ToFloat;
type ToPercent = Bps16ToPercent;

View File

@@ -48,6 +48,15 @@ impl UnaryTransform<BasisPoints16, StoredF32> for Bp16ToPercent {
}
}
pub struct Bp32ToPercent;
impl UnaryTransform<BasisPoints32, StoredF32> for Bp32ToPercent {
#[inline(always)]
fn apply(bp: BasisPoints32) -> StoredF32 {
StoredF32::from(bp.inner() as f32 / 100.0)
}
}
pub struct Bps16ToPercent;
impl UnaryTransform<BasisPointsSigned16, StoredF32> for Bps16ToPercent {

View File

@@ -10,7 +10,7 @@ pub use arithmetic::{
ReturnI8, ReturnU16,
};
pub use bps::{
Bp16ToFloat, Bp16ToPercent, Bp32ToFloat, Bps16ToFloat, Bps16ToPercent, Bps32ToFloat,
Bp16ToFloat, Bp16ToPercent, Bp32ToFloat, Bp32ToPercent, Bps16ToFloat, Bps16ToPercent, Bps32ToFloat,
Bps32ToPercent,
};
pub use currency::{
@@ -23,8 +23,9 @@ pub use derived::{
RatioCents64, TimesSqrt,
};
pub use ratio::{
NegRatioDollarsBps16, RatioCentsBp16, RatioCentsSignedCentsBps16, RatioCentsSignedDollarsBps16,
RatioDiffCentsBps32, RatioDiffDollarsBps32, RatioDiffF32Bps32, RatioDollarsBp16,
RatioDollarsBp32, RatioDollarsBps16, RatioSatsBp16, RatioU32Bp16, RatioU64Bp16,
NegRatioDollarsBps32, RatioCentsBp16, RatioCentsBp32, RatioCentsSignedCentsBps32,
RatioCentsSignedDollarsBps32, RatioDiffCentsBps32, RatioDiffDollarsBps32, RatioDiffF32Bps32,
RatioDollarsBp16, RatioDollarsBp32, RatioDollarsBps32, RatioSatsBp16, RatioU32Bp16,
RatioU64Bp16,
};
pub use specialized::{BlockCountTarget, OhlcCentsToDollars, OhlcCentsToSats};

View File

@@ -1,6 +1,6 @@
use brk_types::{
BasisPoints16, BasisPoints32, BasisPointsSigned16, BasisPointsSigned32, Cents, CentsSigned,
Dollars, Sats, StoredF32, StoredU32, StoredU64,
BasisPoints16, BasisPoints32, BasisPointsSigned32, Cents, CentsSigned, Dollars, Sats, StoredF32,
StoredU32, StoredU64,
};
use vecdb::BinaryTransform;
@@ -43,6 +43,19 @@ impl BinaryTransform<Cents, Cents, BasisPoints16> for RatioCentsBp16 {
}
}
pub struct RatioCentsBp32;
impl BinaryTransform<Cents, Cents, BasisPoints32> for RatioCentsBp32 {
#[inline(always)]
fn apply(numerator: Cents, denominator: Cents) -> BasisPoints32 {
if denominator == Cents::ZERO {
BasisPoints32::ZERO
} else {
BasisPoints32::from(numerator.inner() as f64 / denominator.inner() as f64)
}
}
}
pub struct RatioU32Bp16;
impl BinaryTransform<StoredU32, StoredU32, BasisPoints16> for RatioU32Bp16 {
@@ -70,57 +83,57 @@ impl BinaryTransform<Dollars, Dollars, BasisPoints16> for RatioDollarsBp16 {
}
}
pub struct RatioDollarsBps16;
pub struct RatioDollarsBps32;
impl BinaryTransform<Dollars, Dollars, BasisPointsSigned16> for RatioDollarsBps16 {
impl BinaryTransform<Dollars, Dollars, BasisPointsSigned32> for RatioDollarsBps32 {
#[inline(always)]
fn apply(numerator: Dollars, denominator: Dollars) -> BasisPointsSigned16 {
fn apply(numerator: Dollars, denominator: Dollars) -> BasisPointsSigned32 {
let ratio = *(numerator / denominator);
if ratio.is_finite() {
BasisPointsSigned16::from(ratio)
BasisPointsSigned32::from(ratio)
} else {
BasisPointsSigned16::ZERO
BasisPointsSigned32::ZERO
}
}
}
pub struct NegRatioDollarsBps16;
pub struct NegRatioDollarsBps32;
impl BinaryTransform<Dollars, Dollars, BasisPointsSigned16> for NegRatioDollarsBps16 {
impl BinaryTransform<Dollars, Dollars, BasisPointsSigned32> for NegRatioDollarsBps32 {
#[inline(always)]
fn apply(numerator: Dollars, denominator: Dollars) -> BasisPointsSigned16 {
fn apply(numerator: Dollars, denominator: Dollars) -> BasisPointsSigned32 {
let ratio = *(numerator / denominator);
if ratio.is_finite() {
BasisPointsSigned16::from(-ratio)
BasisPointsSigned32::from(-ratio)
} else {
BasisPointsSigned16::ZERO
BasisPointsSigned32::ZERO
}
}
}
pub struct RatioCentsSignedCentsBps16;
pub struct RatioCentsSignedCentsBps32;
impl BinaryTransform<CentsSigned, Cents, BasisPointsSigned16> for RatioCentsSignedCentsBps16 {
impl BinaryTransform<CentsSigned, Cents, BasisPointsSigned32> for RatioCentsSignedCentsBps32 {
#[inline(always)]
fn apply(numerator: CentsSigned, denominator: Cents) -> BasisPointsSigned16 {
fn apply(numerator: CentsSigned, denominator: Cents) -> BasisPointsSigned32 {
if denominator == Cents::ZERO {
BasisPointsSigned16::ZERO
BasisPointsSigned32::ZERO
} else {
BasisPointsSigned16::from(numerator.inner() as f64 / denominator.inner() as f64)
BasisPointsSigned32::from(numerator.inner() as f64 / denominator.inner() as f64)
}
}
}
pub struct RatioCentsSignedDollarsBps16;
pub struct RatioCentsSignedDollarsBps32;
impl BinaryTransform<CentsSigned, Dollars, BasisPointsSigned16> for RatioCentsSignedDollarsBps16 {
impl BinaryTransform<CentsSigned, Dollars, BasisPointsSigned32> for RatioCentsSignedDollarsBps32 {
#[inline(always)]
fn apply(numerator: CentsSigned, denominator: Dollars) -> BasisPointsSigned16 {
fn apply(numerator: CentsSigned, denominator: Dollars) -> BasisPointsSigned32 {
let d: f64 = denominator.into();
if d > 0.0 {
BasisPointsSigned16::from(numerator.inner() as f64 / 100.0 / d)
BasisPointsSigned32::from(numerator.inner() as f64 / 100.0 / d)
} else {
BasisPointsSigned16::ZERO
BasisPointsSigned32::ZERO
}
}
}