global: snapshot

This commit is contained in:
nym21
2025-12-13 10:52:00 +01:00
parent 3526a177fc
commit 158b0254ed
73 changed files with 2230 additions and 195 deletions

View File

@@ -150,10 +150,10 @@ pub fn read_runs(crate_path: &Path, filename: &str) -> Result<Vec<Run>> {
}
let csv_path = run_path.join(filename);
if csv_path.exists() {
if let Ok(data) = read_csv(&csv_path) {
runs.push(Run { id: run_id, data });
}
if csv_path.exists()
&& let Ok(data) = read_csv(&csv_path)
{
runs.push(Run { id: run_id, data });
}
}
@@ -180,14 +180,14 @@ pub fn read_dual_runs(crate_path: &Path, filename: &str) -> Result<Vec<DualRun>>
}
let csv_path = run_path.join(filename);
if csv_path.exists() {
if let Ok((primary, secondary)) = read_dual_csv(&csv_path) {
runs.push(DualRun {
id: run_id,
primary,
secondary,
});
}
if csv_path.exists()
&& let Ok((primary, secondary)) = read_dual_csv(&csv_path)
{
runs.push(DualRun {
id: run_id,
primary,
secondary,
});
}
}
@@ -219,19 +219,18 @@ fn read_dual_csv(path: &Path) -> Result<(Vec<DataPoint>, Vec<DataPoint>)> {
for line in content.lines().skip(1) {
let mut parts = line.split(',');
if let (Some(ts), Some(v1), Some(v2)) = (parts.next(), parts.next(), parts.next()) {
if let (Ok(timestamp_ms), Ok(val1), Ok(val2)) =
if let (Some(ts), Some(v1), Some(v2)) = (parts.next(), parts.next(), parts.next())
&& let (Ok(timestamp_ms), Ok(val1), Ok(val2)) =
(ts.parse(), v1.parse::<f64>(), v2.parse::<f64>())
{
primary.push(DataPoint {
timestamp_ms,
value: val1,
});
secondary.push(DataPoint {
timestamp_ms,
value: val2,
});
}
{
primary.push(DataPoint {
timestamp_ms,
value: val1,
});
secondary.push(DataPoint {
timestamp_ms,
value: val2,
});
}
}

View File

@@ -36,16 +36,22 @@ fn run() -> Result<()> {
dbg!(
indexer
.vecs
.tx.txindex_to_txid
.tx
.txindex_to_txid
.read_once(txindex)
.unwrap()
.to_string()
);
let first_txinindex = indexer.vecs.tx.txindex_to_first_txinindex.read_once(txindex)?;
let first_txinindex = indexer
.vecs
.tx
.txindex_to_first_txinindex
.read_once(txindex)?;
dbg!(first_txinindex);
let first_txoutindex = indexer
.vecs
.tx.txindex_to_first_txoutindex
.tx
.txindex_to_first_txoutindex
.read_once(txindex)?;
dbg!(first_txoutindex);
let input_count = *computer.indexes.txindex_to_input_count.read_once(txindex)?;
@@ -55,35 +61,42 @@ fn run() -> Result<()> {
.txindex_to_output_count
.read_once(txindex)?;
dbg!(output_count);
dbg!(
let _ = dbg!(
computer
.indexes
.txinindex_to_txoutindex
.read_once(first_txinindex)
);
dbg!(
let _ = dbg!(
computer
.indexes
.txinindex_to_txoutindex
.read_once(first_txinindex + 1)
);
dbg!(computer.chain.txinindex_to_value.read_once(first_txinindex));
dbg!(
let _ = dbg!(computer.chain.txinindex_to_value.read_once(first_txinindex));
let _ = dbg!(
computer
.chain
.txinindex_to_value
.read_once(first_txinindex + 1)
);
dbg!(indexer.vecs.txout.txoutindex_to_value.read_once(first_txoutindex));
dbg!(
let _ = dbg!(
indexer
.vecs
.txout.txoutindex_to_value
.txout
.txoutindex_to_value
.read_once(first_txoutindex)
);
let _ = dbg!(
indexer
.vecs
.txout
.txoutindex_to_value
.read_once(first_txoutindex + 1)
);
dbg!(computer.chain.txindex_to_input_value.read_once(txindex));
dbg!(computer.chain.txindex_to_input_value.read_once(txindex));
dbg!(computer.chain.txindex_to_output_value.read_once(txindex));
let _ = dbg!(computer.chain.txindex_to_input_value.read_once(txindex));
let _ = dbg!(computer.chain.txindex_to_input_value.read_once(txindex));
let _ = dbg!(computer.chain.txindex_to_output_value.read_once(txindex));
// dbg!(computer.indexes.txindex_to_txindex.ge(txindex));
dbg!(
computer

View File

@@ -7,7 +7,7 @@ use brk_types::{
Bitcoin, CheckedSub, DateIndex, DecadeIndex, DifficultyEpoch, Dollars, FeeRate, HalvingEpoch,
Height, MonthIndex, ONE_DAY_IN_SEC_F64, QuarterIndex, Sats, SemesterIndex, StoredBool,
StoredF32, StoredF64, StoredU32, StoredU64, Timestamp, TxInIndex, TxIndex, TxOutIndex,
TxVersion, Version, WeekIndex, Weight, YearIndex,
TxVersion, VSize, Version, WeekIndex, Weight, YearIndex,
};
use vecdb::{
Database, EagerVec, Exit, GenericStoredVec, ImportableVec, IterableCloneableVec, IterableVec,
@@ -95,14 +95,14 @@ pub struct Vecs {
pub indexes_to_tx_v1: ComputedVecsFromHeight<StoredU64>,
pub indexes_to_tx_v2: ComputedVecsFromHeight<StoredU64>,
pub indexes_to_tx_v3: ComputedVecsFromHeight<StoredU64>,
pub indexes_to_tx_vsize: ComputedVecsFromTxindex<StoredU64>,
pub indexes_to_tx_vsize: ComputedVecsFromTxindex<VSize>,
pub indexes_to_tx_weight: ComputedVecsFromTxindex<Weight>,
pub indexes_to_unknownoutput_count: ComputedVecsFromHeight<StoredU64>,
pub txinindex_to_value: EagerVec<PcoVec<TxInIndex, Sats>>,
pub indexes_to_input_count: ComputedVecsFromTxindex<StoredU64>,
pub txindex_to_is_coinbase: LazyVecFrom2<TxIndex, StoredBool, TxIndex, Height, Height, TxIndex>,
pub indexes_to_output_count: ComputedVecsFromTxindex<StoredU64>,
pub txindex_to_vsize: LazyVecFrom1<TxIndex, StoredU64, TxIndex, Weight>,
pub txindex_to_vsize: LazyVecFrom1<TxIndex, VSize, TxIndex, Weight>,
pub txindex_to_weight: LazyVecFrom2<TxIndex, Weight, TxIndex, StoredU32, TxIndex, StoredU32>,
pub txindex_to_fee: EagerVec<PcoVec<TxIndex, Sats>>,
pub txindex_to_fee_rate: EagerVec<PcoVec<TxIndex, FeeRate>>,
@@ -269,11 +269,7 @@ impl Vecs {
"vsize",
version + Version::ZERO,
txindex_to_weight.boxed_clone(),
|index: TxIndex, iter| {
iter.get(index).map(|weight| {
StoredU64::from(bitcoin::Weight::from(weight).to_vbytes_ceil() as usize)
})
},
|index: TxIndex, iter| iter.get(index).map(VSize::from),
);
let txindex_to_is_coinbase = LazyVecFrom2::init(

View File

@@ -56,7 +56,7 @@ impl ComputedValueVecsFromTxindex {
let bitcoin_txindex = LazyVecFrom1::init(
&name_btc,
version + VERSION,
source_vec.map_or_else(|| sats.txindex.u().boxed_clone(), |s| s),
source_vec.unwrap_or_else(|| sats.txindex.u().boxed_clone()),
|txindex: TxIndex, iter| iter.get_at(txindex.to_usize()).map(Bitcoin::from),
);

View File

@@ -22,7 +22,7 @@ mod market;
mod pools;
mod price;
mod stateful;
mod stateful_new;
// mod stateful_new;
mod states;
mod traits;
mod utils;

View File

@@ -64,7 +64,7 @@ mod address_cohorts;
mod address_indexes;
mod addresstype;
mod common;
mod flushable;
// mod flushable;
mod range_map;
mod readers;
mod r#trait;
@@ -102,6 +102,7 @@ pub struct Vecs {
// States
// ---
pub chain_state: BytesVec<Height, SupplyState>,
pub txoutindex_to_txinindex: BytesVec<TxOutIndex, TxInIndex>,
pub any_address_indexes: AnyAddressIndexesVecs,
pub addresses_data: AddressesDataVecs,
pub utxo_cohorts: utxo_cohorts::Vecs,
@@ -179,6 +180,10 @@ impl Vecs {
ImportOptions::new(&db, "chain", v0)
.with_saved_stamped_changes(SAVED_STAMPED_CHANGES),
)?,
txoutindex_to_txinindex: BytesVec::forced_import_with(
ImportOptions::new(&db, "txinindex", v0)
.with_saved_stamped_changes(SAVED_STAMPED_CHANGES),
)?,
height_to_unspendable_supply: EagerVec::forced_import(&db, "unspendable_supply", v0)?,
indexes_to_unspendable_supply: ComputedValueVecsFromHeight::forced_import(
@@ -382,13 +387,20 @@ impl Vecs {
.map(|price| price.timeindexes_to_price_close.dateindex.u());
let height_to_date_fixed = &indexes.height_to_date_fixed;
let height_to_first_p2aaddressindex = &indexer.vecs.address.height_to_first_p2aaddressindex;
let height_to_first_p2pk33addressindex = &indexer.vecs.address.height_to_first_p2pk33addressindex;
let height_to_first_p2pk65addressindex = &indexer.vecs.address.height_to_first_p2pk65addressindex;
let height_to_first_p2pkhaddressindex = &indexer.vecs.address.height_to_first_p2pkhaddressindex;
let height_to_first_p2shaddressindex = &indexer.vecs.address.height_to_first_p2shaddressindex;
let height_to_first_p2traddressindex = &indexer.vecs.address.height_to_first_p2traddressindex;
let height_to_first_p2wpkhaddressindex = &indexer.vecs.address.height_to_first_p2wpkhaddressindex;
let height_to_first_p2wshaddressindex = &indexer.vecs.address.height_to_first_p2wshaddressindex;
let height_to_first_p2pk33addressindex =
&indexer.vecs.address.height_to_first_p2pk33addressindex;
let height_to_first_p2pk65addressindex =
&indexer.vecs.address.height_to_first_p2pk65addressindex;
let height_to_first_p2pkhaddressindex =
&indexer.vecs.address.height_to_first_p2pkhaddressindex;
let height_to_first_p2shaddressindex =
&indexer.vecs.address.height_to_first_p2shaddressindex;
let height_to_first_p2traddressindex =
&indexer.vecs.address.height_to_first_p2traddressindex;
let height_to_first_p2wpkhaddressindex =
&indexer.vecs.address.height_to_first_p2wpkhaddressindex;
let height_to_first_p2wshaddressindex =
&indexer.vecs.address.height_to_first_p2wshaddressindex;
let height_to_first_txindex = &indexer.vecs.tx.height_to_first_txindex;
let height_to_txindex_count = chain.indexes_to_tx_count.height.u();
let height_to_first_txinindex = &indexer.vecs.txin.height_to_first_txinindex;
@@ -489,6 +501,7 @@ impl Vecs {
.unwrap_or_default(),
)
.min(chain_state_starting_height)
.min(Height::from(self.txoutindex_to_txinindex.stamp()).incremented())
.min(self.any_address_indexes.min_stamped_height())
.min(self.addresses_data.min_stamped_height())
.min(Height::from(self.height_to_unspendable_supply.len()))
@@ -507,6 +520,7 @@ impl Vecs {
let starting_height = if starting_height.is_not_zero() {
let mut set = [self.chain_state.rollback_before(stamp)?]
.into_iter()
.chain([self.txoutindex_to_txinindex.rollback_before(stamp)?])
.chain(self.any_address_indexes.rollback_before(stamp)?)
.chain(self.addresses_data.rollback_before(stamp)?)
.map(Height::from)
@@ -589,6 +603,7 @@ impl Vecs {
chain_state = vec![];
self.txoutindex_to_txinindex.reset()?;
self.any_address_indexes.reset()?;
self.addresses_data.reset()?;
@@ -760,6 +775,7 @@ impl Vecs {
addresstype_to_typedindex_to_sent_data,
mut stored_or_new_addresstype_to_typeindex_to_addressdatawithsource,
mut combined_txindex_vecs,
txoutindex_to_txinindex_updates,
) = thread::scope(|scope| {
scope.spawn(|| {
self.utxo_cohorts
@@ -860,6 +876,7 @@ impl Vecs {
addresstype_to_typedindex_to_sent_data,
sending_addresstype_to_typeindex_to_addressdatawithsource,
input_txindex_vecs,
txoutindex_to_txinindex_updates,
) = (first_txinindex + 1..first_txinindex + usize::from(input_count))
.into_par_iter()
.map(|i| {
@@ -884,7 +901,15 @@ impl Vecs {
let prev_height = *txoutindex_range_to_height.get(txoutindex).unwrap();
if input_type.is_not_address() {
return (txindex, prev_height, value, input_type, None);
return (
txinindex,
txoutindex,
txindex,
prev_height,
value,
input_type,
None,
);
}
let typeindex = txoutindex_to_typeindex
@@ -902,6 +927,8 @@ impl Vecs {
);
(
txinindex,
txoutindex,
txindex,
prev_height,
value,
@@ -916,6 +943,7 @@ impl Vecs {
HeightToAddressTypeToVec::<(TypeIndex, Sats)>::default(),
AddressTypeToTypeIndexMap::default(),
AddressTypeToTypeIndexMap::<TxIndexVec>::default(),
Vec::<(TxOutIndex, TxInIndex)>::new(),
)
},
|(
@@ -923,8 +951,11 @@ impl Vecs {
mut height_to_addresstype_to_typedindex_to_data,
mut addresstype_to_typeindex_to_addressdatawithsource,
mut txindex_vecs,
mut txoutindex_to_txinindex_updates,
),
(
txinindex,
txoutindex,
txindex,
prev_height,
value,
@@ -936,6 +967,8 @@ impl Vecs {
.or_default()
.iterate(value, output_type);
txoutindex_to_txinindex_updates.push((txoutindex, txinindex));
if let Some((typeindex, addressdata_opt)) =
typeindex_with_addressdata_opt
{
@@ -966,6 +999,7 @@ impl Vecs {
height_to_addresstype_to_typedindex_to_data,
addresstype_to_typeindex_to_addressdatawithsource,
txindex_vecs,
txoutindex_to_txinindex_updates,
)
},
)
@@ -976,6 +1010,7 @@ impl Vecs {
HeightToAddressTypeToVec::<(TypeIndex, Sats)>::default(),
AddressTypeToTypeIndexMap::default(),
AddressTypeToTypeIndexMap::<TxIndexVec>::default(),
Vec::<(TxOutIndex, TxInIndex)>::new(),
)
},
|(
@@ -983,12 +1018,14 @@ impl Vecs {
addresstype_to_typedindex_to_data,
addresstype_to_typeindex_to_addressdatawithsource,
txindex_vecs,
txoutindex_to_txinindex_updates,
),
(
height_to_transacted2,
addresstype_to_typedindex_to_data2,
addresstype_to_typeindex_to_addressdatawithsource2,
txindex_vecs2,
txoutindex_to_txinindex_updates2,
)| {
let (mut height_to_transacted, height_to_transacted_consumed) =
if height_to_transacted.len() > height_to_transacted2.len() {
@@ -1028,12 +1065,32 @@ impl Vecs {
.merge_mut(v);
});
let (
mut txoutindex_to_txinindex_updates,
txoutindex_to_txinindex_updates_consumed,
) = if txoutindex_to_txinindex_updates.len()
> txoutindex_to_txinindex_updates2.len()
{
(
txoutindex_to_txinindex_updates,
txoutindex_to_txinindex_updates2,
)
} else {
(
txoutindex_to_txinindex_updates2,
txoutindex_to_txinindex_updates,
)
};
txoutindex_to_txinindex_updates
.extend(txoutindex_to_txinindex_updates_consumed);
(
height_to_transacted,
addresstype_to_typedindex_to_data,
addresstype_to_typeindex_to_addressdatawithsource
.merge(addresstype_to_typeindex_to_addressdatawithsource2),
txindex_vecs.merge_vec(txindex_vecs2),
txoutindex_to_txinindex_updates,
)
},
);
@@ -1051,6 +1108,7 @@ impl Vecs {
addresstype_to_typedindex_to_sent_data,
addresstype_to_typeindex_to_addressdatawithsource,
combined_txindex_vecs,
txoutindex_to_txinindex_updates,
)
});
@@ -1159,6 +1217,12 @@ impl Vecs {
self.utxo_cohorts.send(height_to_sent, &mut chain_state);
});
// Update txoutindex_to_txinindex
self.update_txoutindex_to_txinindex(
usize::from(output_count),
txoutindex_to_txinindex_updates,
)?;
self.height_to_unspendable_supply
.truncate_push(height, unspendable_supply)?;
@@ -1599,6 +1663,8 @@ impl Vecs {
let stamp = Stamp::from(height);
self.txoutindex_to_txinindex
.stamped_flush_maybe_with_changes(stamp, with_changes)?;
self.any_address_indexes
.stamped_flush_maybe_with_changes(stamp, with_changes)?;
self.addresses_data
@@ -1613,4 +1679,24 @@ impl Vecs {
Ok(())
}
/// Update txoutindex_to_txinindex for a block.
///
/// 1. Push UNSPENT for all new outputs in the block
/// 2. Update spent outputs with their spending txinindex
pub fn update_txoutindex_to_txinindex(
&mut self,
output_count: usize,
updates: Vec<(TxOutIndex, TxInIndex)>,
) -> Result<()> {
// Push UNSPENT for all new outputs in this block
for _ in 0..output_count {
self.txoutindex_to_txinindex.push(TxInIndex::UNSPENT);
}
// Update spent outputs with their spending txinindex
for (txoutindex, txinindex) in updates {
self.txoutindex_to_txinindex.update(txoutindex, txinindex)?;
}
Ok(())
}
}

View File

@@ -13,7 +13,7 @@ use std::thread;
use brk_error::Result;
use brk_grouper::ByAddressType;
use brk_indexer::Indexer;
use brk_types::{DateIndex, Dollars, Height, OutputType, Sats, Timestamp, TypeIndex};
use brk_types::{DateIndex, Dollars, Height, OutputType, Sats, Timestamp, TxInIndex, TxOutIndex, TypeIndex};
use log::info;
use rayon::prelude::*;
use vecdb::{AnyStoredVec, Exit, GenericStoredVec, IterableVec, TypedVecIterator, VecIndex};
@@ -248,6 +248,7 @@ pub fn process_blocks(
sent_data: Default::default(),
address_data: Default::default(),
txindex_vecs: Default::default(),
txoutindex_to_txinindex_updates: Default::default(),
}
};
@@ -346,6 +347,12 @@ pub fn process_blocks(
vecs.utxo_cohorts.send(height_to_sent, chain_state);
});
// Update txoutindex_to_txinindex
vecs.update_txoutindex_to_txinindex(
output_count,
inputs_result.txoutindex_to_txinindex_updates,
)?;
// Push to height-indexed vectors
vecs.height_to_unspendable_supply
.truncate_push(height, unspendable_supply)?;
@@ -503,7 +510,9 @@ fn flush_checkpoint(
exit,
)?;
// Flush chain state with stamp
// Flush chain state and txoutindex_to_txinindex with stamp
vecs.txoutindex_to_txinindex
.stamped_flush_with_changes(height.into())?;
vecs.chain_state.stamped_flush_with_changes(height.into())?;
Ok(())

View File

@@ -33,6 +33,8 @@ pub struct InputsResult {
pub address_data: AddressTypeToTypeIndexMap<LoadedAddressDataWithSource>,
/// Transaction indexes per address for tx_count tracking.
pub txindex_vecs: AddressTypeToTypeIndexMap<TxIndexVec>,
/// Updates to txoutindex_to_txinindex: (spent txoutindex, spending txinindex).
pub txoutindex_to_txinindex_updates: Vec<(TxOutIndex, TxInIndex)>,
}
/// Process inputs (spent UTXOs) for a block in parallel.
@@ -64,7 +66,7 @@ pub fn process_inputs(
any_address_indexes: &AnyAddressIndexesVecs,
addresses_data: &AddressesDataVecs,
) -> InputsResult {
let (height_to_sent, sent_data, address_data, txindex_vecs) = (first_txinindex
let (height_to_sent, sent_data, address_data, txindex_vecs, txoutindex_to_txinindex_updates) = (first_txinindex
..first_txinindex + input_count)
.into_par_iter()
.map(|i| {
@@ -88,7 +90,7 @@ pub fn process_inputs(
// Non-address inputs don't need typeindex or address lookup
if input_type.is_not_address() {
return (prev_height, value, input_type, None);
return (txinindex, txoutindex, prev_height, value, input_type, None);
}
let typeindex =
@@ -107,6 +109,8 @@ pub fn process_inputs(
);
(
txinindex,
txoutindex,
prev_height,
value,
input_type,
@@ -120,15 +124,18 @@ pub fn process_inputs(
HeightToAddressTypeToVec::default(),
AddressTypeToTypeIndexMap::<LoadedAddressDataWithSource>::default(),
AddressTypeToTypeIndexMap::<TxIndexVec>::default(),
Vec::<(TxOutIndex, TxInIndex)>::new(),
)
},
|(mut height_to_sent, mut sent_data, mut address_data, mut txindex_vecs),
(prev_height, value, output_type, addr_info)| {
|(mut height_to_sent, mut sent_data, mut address_data, mut txindex_vecs, mut txoutindex_to_txinindex_updates),
(txinindex, txoutindex, prev_height, value, output_type, addr_info)| {
height_to_sent
.entry(prev_height)
.or_default()
.iterate(value, output_type);
txoutindex_to_txinindex_updates.push((txoutindex, txinindex));
if let Some((typeindex, txindex, value, addr_data_opt)) = addr_info {
sent_data
.entry(prev_height)
@@ -149,7 +156,7 @@ pub fn process_inputs(
.push(txindex);
}
(height_to_sent, sent_data, address_data, txindex_vecs)
(height_to_sent, sent_data, address_data, txindex_vecs, txoutindex_to_txinindex_updates)
},
)
.reduce(
@@ -159,9 +166,10 @@ pub fn process_inputs(
HeightToAddressTypeToVec::default(),
AddressTypeToTypeIndexMap::<LoadedAddressDataWithSource>::default(),
AddressTypeToTypeIndexMap::<TxIndexVec>::default(),
Vec::<(TxOutIndex, TxInIndex)>::new(),
)
},
|(mut h1, mut s1, a1, tx1), (h2, s2, a2, tx2)| {
|(mut h1, mut s1, a1, tx1, updates1), (h2, s2, a2, tx2, updates2)| {
// Merge height_to_sent maps
for (k, v) in h2 {
*h1.entry(k).or_default() += v;
@@ -170,7 +178,15 @@ pub fn process_inputs(
// Merge sent_data maps
s1.merge_mut(s2);
(h1, s1, a1.merge(a2), tx1.merge_vec(tx2))
// Merge txoutindex_to_txinindex updates (extend longest with shortest)
let (mut updates, updates_consumed) = if updates1.len() > updates2.len() {
(updates1, updates2)
} else {
(updates2, updates1)
};
updates.extend(updates_consumed);
(h1, s1, a1.merge(a2), tx1.merge_vec(tx2), updates)
},
);
@@ -179,6 +195,7 @@ pub fn process_inputs(
sent_data,
address_data,
txindex_vecs,
txoutindex_to_txinindex_updates,
}
}

View File

@@ -5,10 +5,10 @@ use std::path::Path;
use brk_error::Result;
use brk_indexer::Indexer;
use brk_traversable::Traversable;
use brk_types::{Dollars, Height, Sats, StoredU64, Version};
use brk_types::{Dollars, Height, Sats, StoredU64, TxInIndex, TxOutIndex, Version};
use vecdb::{
BytesVec, Database, EagerVec, Exit, ImportableVec, IterableCloneableVec, LazyVecFrom1,
PAGE_SIZE, PcoVec,
AnyStoredVec, BytesVec, Database, EagerVec, Exit, ImportableVec, IterableCloneableVec,
LazyVecFrom1, PAGE_SIZE, PcoVec,
};
use crate::{
@@ -35,6 +35,7 @@ pub struct Vecs {
// States
// ---
pub chain_state: BytesVec<Height, SupplyState>,
pub txoutindex_to_txinindex: BytesVec<TxOutIndex, TxInIndex>,
pub any_address_indexes: AnyAddressIndexesVecs,
pub addresses_data: AddressesDataVecs,
pub utxo_cohorts: UTXOCohorts,
@@ -83,6 +84,10 @@ impl Vecs {
vecdb::ImportOptions::new(&db, "chain", v0)
.with_saved_stamped_changes(SAVED_STAMPED_CHANGES),
)?,
txoutindex_to_txinindex: BytesVec::forced_import_with(
vecdb::ImportOptions::new(&db, "txinindex", v0)
.with_saved_stamped_changes(SAVED_STAMPED_CHANGES),
)?,
height_to_unspendable_supply: EagerVec::forced_import(&db, "unspendable_supply", v0)?,
height_to_opreturn_supply: EagerVec::forced_import(&db, "opreturn_supply", v0)?,
@@ -185,9 +190,7 @@ impl Vecs {
starting_indexes: &mut Indexes,
exit: &Exit,
) -> Result<()> {
use super::compute::{
StartMode, determine_start_mode, process_blocks,
};
use super::compute::{StartMode, determine_start_mode, process_blocks};
use crate::states::BlockState;
use vecdb::{AnyVec, GenericStoredVec, Stamp, TypedVecIterator, VecIndex};
@@ -201,6 +204,7 @@ impl Vecs {
let stateful_min = utxo_min
.min(address_min)
.min(Height::from(self.chain_state.len()))
.min(Height::from(self.txoutindex_to_txinindex.stamp()).incremented())
.min(self.any_address_indexes.min_stamped_height())
.min(self.addresses_data.min_stamped_height())
.min(Height::from(self.height_to_unspendable_supply.len()))
@@ -215,6 +219,7 @@ impl Vecs {
// Rollback state vectors
let _ = self.chain_state.rollback_before(stamp);
let _ = self.txoutindex_to_txinindex.rollback_before(stamp);
let _ = self.any_address_indexes.rollback_before(stamp);
let _ = self.addresses_data.rollback_before(stamp);
@@ -252,6 +257,7 @@ impl Vecs {
}
StartMode::Fresh => {
// Reset all state
self.txoutindex_to_txinindex.reset()?;
self.any_address_indexes.reset()?;
self.addresses_data.reset()?;
@@ -271,7 +277,14 @@ impl Vecs {
};
// 3. Get last height from indexer
let last_height = Height::from(indexer.vecs.block.height_to_blockhash.len().saturating_sub(1));
let last_height = Height::from(
indexer
.vecs
.block
.height_to_blockhash
.len()
.saturating_sub(1),
);
// 4. Process blocks
if starting_height <= last_height {
@@ -401,4 +414,24 @@ impl Vecs {
self.db.compact()?;
Ok(())
}
/// Update txoutindex_to_txinindex for a block.
///
/// 1. Push UNSPENT for all new outputs in the block
/// 2. Update spent outputs with their spending txinindex
pub fn update_txoutindex_to_txinindex(
&mut self,
output_count: usize,
updates: Vec<(TxOutIndex, TxInIndex)>,
) -> Result<()> {
// Push UNSPENT for all new outputs in this block
for _ in 0..output_count {
self.txoutindex_to_txinindex.push(TxInIndex::UNSPENT);
}
// Update spent outputs with their spending txinindex
for (txoutindex, txinindex) in updates {
self.txoutindex_to_txinindex.update(txoutindex, txinindex)?;
}
Ok(())
}
}

View File

@@ -28,7 +28,7 @@ pub fn init(path: Option<&Path>) -> io::Result<()> {
}
Builder::from_env(Env::default().default_filter_or(
"info,bitcoin=off,bitcoincore-rpc=off,fjall=off,brk_fjall=off,lsm_tree=off,rolldown=off,rmcp=off,brk_rmcp=off,tracing=off,aide=off,rustls=off",
"info,bitcoin=off,bitcoincore-rpc=off,fjall=off,brk_fjall=off,lsm_tree=off,brk_rolldown=off,rolldown=off,rmcp=off,brk_rmcp=off,tracing=off,aide=off,rustls=off",
// "debug,fjall=trace,bitcoin=off,bitcoincore-rpc=off,rolldown=off,rmcp=off,brk_rmcp=off,tracing=off,aide=off,rustls=off",
))
.format(move |buf, record| {
@@ -94,11 +94,11 @@ fn write(
dash: impl Display,
args: impl Display,
) -> Result<(), std::io::Error> {
writeln!(buf, "{date_time} {dash} {level} {args}")
// writeln!(buf, "{date_time} {dash} {level} {args}")
// Don't remove, used to know the target of unwanted logs
// writeln!(
// buf,
// "{} {} {} {} {}",
// date_time, _target, level, dash, args
// )
writeln!(
buf,
"{} {} {} {} {}",
date_time, _target, level, dash, args
)
}

View File

@@ -1,8 +1,16 @@
use std::{sync::Arc, thread, time::Duration};
use std::{
collections::BTreeMap,
sync::Arc,
thread,
time::Duration,
};
use brk_error::Result;
use brk_rpc::Client;
use brk_types::{AddressBytes, AddressMempoolStats, Transaction, Txid};
use brk_types::{
AddressBytes, AddressMempoolStats, FeeRate, MempoolInfo, RecommendedFees, TxWithHex, Txid,
VSize,
};
use derive_deref::Deref;
use log::error;
use parking_lot::{RwLock, RwLockReadGuard};
@@ -10,6 +18,9 @@ use rustc_hash::{FxHashMap, FxHashSet};
const MAX_FETCHES_PER_CYCLE: usize = 10_000;
/// Target block vsize (1MB = 1_000_000 vbytes, but using 4MW weight / 4 = 1MW vbytes max)
const BLOCK_VSIZE_TARGET: u64 = 1_000_000;
///
/// Mempool monitor
///
@@ -26,7 +37,11 @@ impl Mempool {
pub struct MempoolInner {
client: Client,
txs: RwLock<FxHashMap<Txid, Transaction>>,
info: RwLock<MempoolInfo>,
fees: RwLock<RecommendedFees>,
/// Map of fee rate -> total vsize at that fee rate, used for fee estimation
fee_rates: RwLock<BTreeMap<FeeRate, VSize>>,
txs: RwLock<FxHashMap<Txid, TxWithHex>>,
addresses: RwLock<FxHashMap<AddressBytes, (AddressMempoolStats, FxHashSet<Txid>)>>,
}
@@ -34,12 +49,23 @@ impl MempoolInner {
pub fn new(client: Client) -> Self {
Self {
client,
info: RwLock::new(MempoolInfo::default()),
fees: RwLock::new(RecommendedFees::default()),
fee_rates: RwLock::new(BTreeMap::new()),
txs: RwLock::new(FxHashMap::default()),
addresses: RwLock::new(FxHashMap::default()),
}
}
pub fn get_txs(&self) -> RwLockReadGuard<'_, FxHashMap<Txid, Transaction>> {
pub fn get_info(&self) -> MempoolInfo {
self.info.read().clone()
}
pub fn get_fees(&self) -> RecommendedFees {
self.fees.read().clone()
}
pub fn get_txs(&self) -> RwLockReadGuard<'_, FxHashMap<Txid, TxWithHex>> {
self.txs.read()
}
@@ -84,13 +110,17 @@ impl MempoolInner {
})
.collect::<FxHashMap<_, _>>();
let mut info = self.info.write();
let mut txs = self.txs.write();
let mut addresses = self.addresses.write();
txs.retain(|txid, tx| {
txs.retain(|txid, tx_with_hex| {
if txids.contains(txid) {
return true;
}
let tx = tx_with_hex.tx();
info.remove(tx);
tx.input
.iter()
.flat_map(|txin| txin.prevout.as_ref())
@@ -113,7 +143,10 @@ impl MempoolInner {
false
});
new_txs.iter().for_each(|(txid, tx)| {
new_txs.iter().for_each(|(txid, tx_with_hex)| {
let tx = tx_with_hex.tx();
info.add(tx);
tx.input
.iter()
.flat_map(|txin| txin.prevout.as_ref())

View File

@@ -0,0 +1,59 @@
use brk_types::{FeeRate, Outpoint, Sats, Transaction, Txid, VSize};
use rustc_hash::FxHashSet;
/// A mempool transaction with its dependency metadata
#[derive(Debug, Clone)]
pub struct MempoolEntry {
pub txid: Txid,
pub fee: Sats,
pub vsize: VSize,
/// Outpoints this tx spends (inputs)
pub spends: Vec<Outpoint>,
/// Txids of unconfirmed ancestors (parents, grandparents, etc.)
pub ancestors: FxHashSet<Txid>,
/// Cumulative fee of this tx + all ancestors
pub ancestor_fee: Sats,
/// Cumulative vsize of this tx + all ancestors
pub ancestor_vsize: VSize,
}
impl MempoolEntry {
pub fn new(tx: &Transaction) -> Self {
let txid = tx.txid.clone();
let fee = tx.fee;
let vsize = tx.vsize();
let spends = tx
.input
.iter()
.map(|txin| Outpoint::new(txin.txid.clone(), txin.vout))
.collect();
Self {
txid,
fee,
vsize,
spends,
ancestors: FxHashSet::default(),
ancestor_fee: fee,
ancestor_vsize: vsize,
}
}
/// Individual fee rate (without ancestors)
#[inline]
pub fn fee_rate(&self) -> FeeRate {
FeeRate::from((self.fee, self.vsize))
}
/// Ancestor fee rate (fee + ancestors_fee) / (vsize + ancestors_vsize)
/// This is the effective mining priority
#[inline]
pub fn ancestor_fee_rate(&self) -> FeeRate {
FeeRate::from((self.ancestor_fee, self.ancestor_vsize))
}
}

View File

@@ -0,0 +1,174 @@
use brk_types::{Outpoint, Sats, Transaction, Txid, VSize};
use rustc_hash::{FxHashMap, FxHashSet};
use super::MempoolEntry;
/// Transaction dependency graph for the mempool
///
/// Tracks parent-child relationships and computes ancestor feerates
/// for proper CPFP (Child-Pays-For-Parent) handling.
#[derive(Debug, Default)]
pub struct TxGraph {
/// All mempool entries by txid
entries: FxHashMap<Txid, MempoolEntry>,
/// Maps outpoint -> txid that created it (for finding parents)
outpoint_to_tx: FxHashMap<Outpoint, Txid>,
/// Maps txid -> txids that spend its outputs (children)
children: FxHashMap<Txid, FxHashSet<Txid>>,
}
impl TxGraph {
pub fn new() -> Self {
Self::default()
}
pub fn entries(&self) -> &FxHashMap<Txid, MempoolEntry> {
&self.entries
}
pub fn len(&self) -> usize {
self.entries.len()
}
pub fn is_empty(&self) -> bool {
self.entries.is_empty()
}
/// Add a transaction to the graph
pub fn insert(&mut self, tx: &Transaction) {
let mut entry = MempoolEntry::new(tx);
// Find in-mempool parents and build ancestor set
let parents = self.find_parents(&entry.spends);
entry.ancestors = self.compute_ancestors(&parents);
// Compute ancestor fee/vsize
let (ancestor_fee, ancestor_vsize) = self.sum_ancestors(&entry.ancestors);
entry.ancestor_fee = entry.fee + ancestor_fee;
entry.ancestor_vsize = entry.vsize + ancestor_vsize;
// Register this tx's outputs
for (vout, _) in tx.output.iter().enumerate() {
let outpoint = Outpoint::new(entry.txid.clone(), vout as u32);
self.outpoint_to_tx.insert(outpoint, entry.txid.clone());
}
// Register as child of parents
for parent in &parents {
self.children
.entry(parent.clone())
.or_default()
.insert(entry.txid.clone());
}
self.entries.insert(entry.txid.clone(), entry);
}
/// Remove a transaction from the graph
pub fn remove(&mut self, txid: &Txid) -> Option<MempoolEntry> {
let entry = self.entries.remove(txid)?;
// Remove from outpoint index
// Note: We don't know the vout count, so we remove all entries pointing to this txid
self.outpoint_to_tx.retain(|_, tx| tx != txid);
// Remove from children index
self.children.remove(txid);
for children_set in self.children.values_mut() {
children_set.remove(txid);
}
// Update descendants' ancestor data
self.update_descendants_after_removal(txid, &entry);
Some(entry)
}
/// Check if a txid is in the mempool
pub fn contains(&self, txid: &Txid) -> bool {
self.entries.contains_key(txid)
}
/// Get all txids currently in the graph
pub fn txids(&self) -> impl Iterator<Item = &Txid> {
self.entries.keys()
}
/// Find which inputs reference in-mempool transactions (parents)
fn find_parents(&self, spends: &[Outpoint]) -> Vec<Txid> {
spends
.iter()
.filter_map(|outpoint| self.outpoint_to_tx.get(outpoint).cloned())
.collect()
}
/// Compute full ancestor set (transitive closure)
fn compute_ancestors(&self, parents: &[Txid]) -> FxHashSet<Txid> {
let mut ancestors = FxHashSet::default();
let mut stack: Vec<Txid> = parents.to_vec();
while let Some(txid) = stack.pop() {
if ancestors.insert(txid.clone()) {
if let Some(entry) = self.entries.get(&txid) {
stack.extend(entry.ancestors.iter().cloned());
}
}
}
ancestors
}
/// Sum fee and vsize of all ancestors
fn sum_ancestors(&self, ancestors: &FxHashSet<Txid>) -> (Sats, VSize) {
ancestors.iter().fold(
(Sats::default(), VSize::default()),
|(fee, vsize), txid| {
if let Some(entry) = self.entries.get(txid) {
(fee + entry.fee, vsize + entry.vsize)
} else {
(fee, vsize)
}
},
)
}
/// Update all descendants after removing a transaction
fn update_descendants_after_removal(&mut self, removed: &Txid, removed_entry: &MempoolEntry) {
// Find all descendants
let descendants = self.find_descendants(removed);
// Update each descendant's ancestor set and cumulative values
for desc_txid in descendants {
if let Some(desc) = self.entries.get_mut(&desc_txid) {
// Remove the removed tx from ancestors
desc.ancestors.remove(removed);
// Subtract the removed tx's contribution
desc.ancestor_fee = desc.ancestor_fee - removed_entry.fee;
desc.ancestor_vsize = desc.ancestor_vsize - removed_entry.vsize;
}
}
}
/// Find all descendants of a transaction (children, grandchildren, etc.)
fn find_descendants(&self, txid: &Txid) -> Vec<Txid> {
let mut descendants = Vec::new();
let mut stack = vec![txid.clone()];
let mut visited = FxHashSet::default();
while let Some(current) = stack.pop() {
if let Some(children) = self.children.get(&current) {
for child in children {
if visited.insert(child.clone()) {
descendants.push(child.clone());
stack.push(child.clone());
}
}
}
}
descendants
}
}

View File

@@ -0,0 +1,7 @@
mod entry;
mod graph;
mod projected_blocks;
pub use entry::MempoolEntry;
pub use graph::TxGraph;
pub use projected_blocks::ProjectedBlocks;

View File

@@ -0,0 +1,129 @@
use brk_types::{FeeRate, RecommendedFees, Sats, Txid, VSize};
use rustc_hash::FxHashSet;
use super::TxGraph;
/// Maximum block weight in weight units (4 million)
const MAX_BLOCK_WEIGHT: u64 = 4_000_000;
/// Target block vsize (weight / 4)
const BLOCK_VSIZE_TARGET: u64 = MAX_BLOCK_WEIGHT / 4;
/// Number of projected blocks to build
const NUM_PROJECTED_BLOCKS: usize = 8;
/// A projected future block built from mempool transactions
#[derive(Debug, Clone, Default)]
pub struct ProjectedBlock {
pub txids: Vec<Txid>,
pub total_vsize: VSize,
pub total_fee: Sats,
pub min_fee_rate: FeeRate,
pub max_fee_rate: FeeRate,
pub median_fee_rate: FeeRate,
}
/// Projected mempool blocks for fee estimation
#[derive(Debug, Clone, Default)]
pub struct ProjectedBlocks {
pub blocks: Vec<ProjectedBlock>,
}
impl ProjectedBlocks {
/// Build projected blocks from a transaction graph
///
/// Simulates how miners would construct blocks by selecting
/// transactions with highest ancestor fee rates first.
pub fn build(graph: &TxGraph) -> Self {
if graph.is_empty() {
return Self::default();
}
// Collect entries sorted by ancestor fee rate (descending)
let mut sorted: Vec<_> = graph
.entries()
.iter()
.map(|(txid, entry)| (txid.clone(), entry.ancestor_fee_rate(), entry.vsize, entry.fee))
.collect();
sorted.sort_by(|a, b| b.1.cmp(&a.1));
// Build blocks greedily
let mut blocks = Vec::with_capacity(NUM_PROJECTED_BLOCKS);
let mut current_block = ProjectedBlock::default();
let mut included: FxHashSet<Txid> = FxHashSet::default();
for (txid, fee_rate, vsize, fee) in sorted {
// Skip if already included (as part of ancestor package)
if included.contains(&txid) {
continue;
}
// Would this tx fit in the current block?
let new_vsize = current_block.total_vsize + vsize;
if u64::from(new_vsize) > BLOCK_VSIZE_TARGET {
// Finalize current block if it has transactions
if !current_block.txids.is_empty() {
Self::finalize_block(&mut current_block);
blocks.push(current_block);
if blocks.len() >= NUM_PROJECTED_BLOCKS {
break;
}
}
// Start new block
current_block = ProjectedBlock::default();
}
// Add to current block
current_block.txids.push(txid.clone());
current_block.total_vsize += vsize;
current_block.total_fee += fee;
included.insert(txid);
// Track fee rate bounds
if current_block.max_fee_rate == FeeRate::default() {
current_block.max_fee_rate = fee_rate;
}
current_block.min_fee_rate = fee_rate;
}
// Don't forget the last block
if !current_block.txids.is_empty() && blocks.len() < NUM_PROJECTED_BLOCKS {
Self::finalize_block(&mut current_block);
blocks.push(current_block);
}
Self { blocks }
}
/// Compute recommended fees from projected blocks
pub fn recommended_fees(&self) -> RecommendedFees {
RecommendedFees {
fastest_fee: self.fee_for_block(0),
half_hour_fee: self.fee_for_block(2), // ~3 blocks
hour_fee: self.fee_for_block(5), // ~6 blocks
economy_fee: self.fee_for_block(7), // ~12 blocks, but we only have 8
minimum_fee: 1.0,
}
}
/// Get the minimum fee rate needed to get into block N
fn fee_for_block(&self, block_index: usize) -> f64 {
self.blocks
.get(block_index)
.map(|b| f64::from(b.min_fee_rate))
.unwrap_or(1.0)
.max(1.0) // Never recommend below 1 sat/vB
}
fn finalize_block(block: &mut ProjectedBlock) {
// Compute median fee rate from min/max as approximation
// (true median would require storing all fee rates)
let min = f64::from(block.min_fee_rate);
let max = f64::from(block.max_fee_rate);
block.median_fee_rate = FeeRate::from((min + max) / 2.0);
}
}

View File

@@ -6,13 +6,13 @@ use brk_indexer::Indexer;
use brk_monitor::Mempool;
use brk_reader::Reader;
use brk_types::{
Address, AddressStats, Height, Index, IndexInfo, Limit, Metric, MetricCount, Transaction,
TreeNode, TxidPath,
Address, AddressStats, BlockInfo, BlockStatus, Height, Index, IndexInfo, Limit, MempoolInfo,
Metric, MetricCount, Transaction, TreeNode, TxStatus, Txid, TxidPath, Utxo,
};
use tokio::task::spawn_blocking;
use crate::{
Output, PaginatedIndexParam, PaginatedMetrics, PaginationParam, Params, ParamsOpt, Query,
Output, PaginatedIndexParam, PaginatedMetrics, PaginationParam, Params, Query,
vecs::{IndexToVec, MetricToVec, Vecs},
};
@@ -42,11 +42,71 @@ impl AsyncQuery {
spawn_blocking(move || query.get_address(address)).await?
}
pub async fn get_address_txids(
&self,
address: Address,
after_txid: Option<Txid>,
limit: usize,
) -> Result<Vec<Txid>> {
let query = self.0.clone();
spawn_blocking(move || query.get_address_txids(address, after_txid, limit)).await?
}
pub async fn get_address_utxos(&self, address: Address) -> Result<Vec<Utxo>> {
let query = self.0.clone();
spawn_blocking(move || query.get_address_utxos(address)).await?
}
pub async fn get_transaction(&self, txid: TxidPath) -> Result<Transaction> {
let query = self.0.clone();
spawn_blocking(move || query.get_transaction(txid)).await?
}
pub async fn get_transaction_status(&self, txid: TxidPath) -> Result<TxStatus> {
let query = self.0.clone();
spawn_blocking(move || query.get_transaction_status(txid)).await?
}
pub async fn get_transaction_hex(&self, txid: TxidPath) -> Result<String> {
let query = self.0.clone();
spawn_blocking(move || query.get_transaction_hex(txid)).await?
}
pub async fn get_block(&self, hash: String) -> Result<BlockInfo> {
let query = self.0.clone();
spawn_blocking(move || query.get_block(&hash)).await?
}
pub async fn get_block_by_height(&self, height: Height) -> Result<BlockInfo> {
let query = self.0.clone();
spawn_blocking(move || query.get_block_by_height(height)).await?
}
pub async fn get_block_status(&self, hash: String) -> Result<BlockStatus> {
let query = self.0.clone();
spawn_blocking(move || query.get_block_status(&hash)).await?
}
pub async fn get_blocks(&self, start_height: Option<Height>) -> Result<Vec<BlockInfo>> {
let query = self.0.clone();
spawn_blocking(move || query.get_blocks(start_height)).await?
}
pub async fn get_block_txids(&self, hash: String) -> Result<Vec<Txid>> {
let query = self.0.clone();
spawn_blocking(move || query.get_block_txids(&hash)).await?
}
pub async fn get_mempool_info(&self) -> Result<MempoolInfo> {
let query = self.0.clone();
spawn_blocking(move || query.get_mempool_info()).await?
}
pub async fn get_mempool_txids(&self) -> Result<Vec<Txid>> {
let query = self.0.clone();
spawn_blocking(move || query.get_mempool_txids()).await?
}
pub async fn match_metric(&self, metric: Metric, limit: Limit) -> Result<Vec<&'static str>> {
let query = self.0.clone();
spawn_blocking(move || Ok(query.match_metric(&metric, limit))).await?

View File

@@ -0,0 +1,8 @@
mod addr;
mod resolve;
mod txids;
mod utxos;
pub use addr::*;
pub use txids::*;
pub use utxos::*;

View File

@@ -0,0 +1,27 @@
use std::str::FromStr;
use brk_error::{Error, Result};
use brk_types::{Address, AddressBytes, AddressHash, OutputType, TypeIndex};
use crate::Query;
/// Resolve an address string to its output type and type_index
pub fn resolve_address(address: &Address, query: &Query) -> Result<(OutputType, TypeIndex)> {
let stores = &query.indexer().stores;
let bytes = AddressBytes::from_str(&address.address)?;
let outputtype = OutputType::from(&bytes);
let hash = AddressHash::from(&bytes);
let Ok(Some(type_index)) = stores
.addresstype_to_addresshash_to_addressindex
.get(outputtype)
.unwrap()
.get(&hash)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownAddress);
};
Ok((outputtype, type_index))
}

View File

@@ -0,0 +1,60 @@
use brk_error::{Error, Result};
use brk_types::{Address, AddressIndexTxIndex, TxIndex, Txid, Unit};
use vecdb::TypedVecIterator;
use super::resolve::resolve_address;
use crate::Query;
/// Get transaction IDs for an address, newest first
pub fn get_address_txids(
address: Address,
after_txid: Option<Txid>,
limit: usize,
query: &Query,
) -> Result<Vec<Txid>> {
let indexer = query.indexer();
let stores = &indexer.stores;
let (outputtype, type_index) = resolve_address(&address, query)?;
let store = stores
.addresstype_to_addressindex_and_txindex
.get(outputtype)
.unwrap();
let prefix = u32::from(type_index).to_be_bytes();
let after_txindex = if let Some(after_txid) = after_txid {
let txindex = stores
.txidprefix_to_txindex
.get(&after_txid.into())
.map_err(|_| Error::Str("Failed to look up after_txid"))?
.ok_or(Error::Str("after_txid not found"))?
.into_owned();
Some(txindex)
} else {
None
};
let txindices: Vec<TxIndex> = store
.prefix(prefix)
.rev()
.filter(|(key, _): &(AddressIndexTxIndex, Unit)| {
if let Some(after) = after_txindex {
TxIndex::from(key.txindex()) < after
} else {
true
}
})
.take(limit)
.map(|(key, _)| TxIndex::from(key.txindex()))
.collect();
let mut txindex_to_txid_iter = indexer.vecs.tx.txindex_to_txid.iter()?;
let txids: Vec<Txid> = txindices
.into_iter()
.map(|txindex| txindex_to_txid_iter.get_unwrap(txindex))
.collect();
Ok(txids)
}

View File

@@ -0,0 +1,65 @@
use brk_error::Result;
use brk_types::{
Address, AddressIndexOutPoint, Sats, TxIndex, TxStatus, Txid, Unit, Utxo, Vout,
};
use vecdb::TypedVecIterator;
use super::resolve::resolve_address;
use crate::Query;
/// Get UTXOs for an address
pub fn get_address_utxos(address: Address, query: &Query) -> Result<Vec<Utxo>> {
let indexer = query.indexer();
let stores = &indexer.stores;
let vecs = &indexer.vecs;
let (outputtype, type_index) = resolve_address(&address, query)?;
let store = stores
.addresstype_to_addressindex_and_unspentoutpoint
.get(outputtype)
.unwrap();
let prefix = u32::from(type_index).to_be_bytes();
// Collect outpoints (txindex, vout)
let outpoints: Vec<(TxIndex, Vout)> = store
.prefix(prefix)
.map(|(key, _): (AddressIndexOutPoint, Unit)| (key.txindex(), key.vout()))
.collect();
// Create iterators for looking up tx data
let mut txindex_to_txid_iter = vecs.tx.txindex_to_txid.iter()?;
let mut txindex_to_height_iter = vecs.tx.txindex_to_height.iter()?;
let mut txindex_to_first_txoutindex_iter = vecs.tx.txindex_to_first_txoutindex.iter()?;
let mut txoutindex_to_value_iter = vecs.txout.txoutindex_to_value.iter()?;
let mut height_to_blockhash_iter = vecs.block.height_to_blockhash.iter()?;
let mut height_to_timestamp_iter = vecs.block.height_to_timestamp.iter()?;
let utxos: Vec<Utxo> = outpoints
.into_iter()
.map(|(txindex, vout)| {
let txid: Txid = txindex_to_txid_iter.get_unwrap(txindex);
let height = txindex_to_height_iter.get_unwrap(txindex);
let first_txoutindex = txindex_to_first_txoutindex_iter.get_unwrap(txindex);
let txoutindex = first_txoutindex + vout;
let value: Sats = txoutindex_to_value_iter.get_unwrap(txoutindex);
let block_hash = height_to_blockhash_iter.get_unwrap(height);
let block_time = height_to_timestamp_iter.get_unwrap(height);
Utxo {
txid,
vout,
status: TxStatus {
confirmed: true,
block_height: Some(height),
block_hash: Some(block_hash),
block_time: Some(block_time),
},
value,
}
})
.collect();
Ok(utxos)
}

View File

@@ -0,0 +1,19 @@
use brk_error::{Error, Result};
use brk_types::{BlockHash, BlockHashPrefix, Height};
use crate::Query;
/// Resolve a block hash to height
pub fn get_height_by_hash(hash: &str, query: &Query) -> Result<Height> {
let indexer = query.indexer();
let blockhash: BlockHash = hash.parse().map_err(|_| Error::Str("Invalid block hash"))?;
let prefix = BlockHashPrefix::from(&blockhash);
indexer
.stores
.blockhashprefix_to_height
.get(&prefix)?
.map(|h| *h)
.ok_or(Error::Str("Block not found"))
}

View File

@@ -0,0 +1,62 @@
use brk_error::{Error, Result};
use brk_types::{BlockInfo, Height, TxIndex};
use vecdb::{AnyVec, GenericStoredVec, VecIndex};
use crate::Query;
/// Get block info by height
pub fn get_block_by_height(height: Height, query: &Query) -> Result<BlockInfo> {
let indexer = query.indexer();
let max_height = max_height(query);
if height > max_height {
return Err(Error::Str("Block height out of range"));
}
let blockhash = indexer.vecs.block.height_to_blockhash.read_once(height)?;
let difficulty = indexer.vecs.block.height_to_difficulty.read_once(height)?;
let timestamp = indexer.vecs.block.height_to_timestamp.read_once(height)?;
let size = indexer.vecs.block.height_to_total_size.read_once(height)?;
let weight = indexer.vecs.block.height_to_weight.read_once(height)?;
let tx_count = tx_count_at_height(height, max_height, query)?;
Ok(BlockInfo {
id: blockhash,
height,
tx_count,
size: *size,
weight,
timestamp,
difficulty: *difficulty,
})
}
fn max_height(query: &Query) -> Height {
Height::from(
query
.indexer()
.vecs
.block
.height_to_blockhash
.len()
.saturating_sub(1),
)
}
fn tx_count_at_height(height: Height, max_height: Height, query: &Query) -> Result<u32> {
let indexer = query.indexer();
let computer = query.computer();
let first_txindex = indexer.vecs.tx.height_to_first_txindex.read_once(height)?;
let next_first_txindex = if height < max_height {
indexer
.vecs
.tx
.height_to_first_txindex
.read_once(height.incremented())?
} else {
TxIndex::from(computer.indexes.txindex_to_txindex.len())
};
Ok((next_first_txindex.to_usize() - first_txindex.to_usize()) as u32)
}

View File

@@ -0,0 +1,27 @@
use brk_error::Result;
use brk_types::{BlockInfo, Height};
use crate::Query;
use super::info::get_block_by_height;
const DEFAULT_BLOCK_COUNT: u32 = 10;
/// Get a list of blocks, optionally starting from a specific height
pub fn get_blocks(start_height: Option<Height>, query: &Query) -> Result<Vec<BlockInfo>> {
let max_height = query.get_height();
let start = start_height.unwrap_or(max_height);
let start = start.min(max_height);
let start_u32: u32 = start.into();
let count = DEFAULT_BLOCK_COUNT.min(start_u32 + 1);
let mut blocks = Vec::with_capacity(count as usize);
for i in 0..count {
let height = Height::from(start_u32 - i);
blocks.push(get_block_by_height(height, query)?);
}
Ok(blocks)
}

View File

@@ -0,0 +1,11 @@
mod height_by_hash;
mod info;
mod list;
mod status;
mod txids;
pub use height_by_hash::*;
pub use info::*;
pub use list::*;
pub use status::*;
pub use txids::*;

View File

@@ -0,0 +1,37 @@
use brk_error::Result;
use brk_types::{BlockStatus, Height};
use vecdb::{AnyVec, GenericStoredVec};
use crate::Query;
/// Get block status by height
pub fn get_block_status_by_height(height: Height, query: &Query) -> Result<BlockStatus> {
let indexer = query.indexer();
let max_height = Height::from(
indexer
.vecs
.block
.height_to_blockhash
.len()
.saturating_sub(1),
);
if height > max_height {
return Ok(BlockStatus::not_in_best_chain());
}
let next_best = if height < max_height {
Some(
indexer
.vecs
.block
.height_to_blockhash
.read_once(height.incremented())?,
)
} else {
None
};
Ok(BlockStatus::in_best_chain(height, next_best))
}

View File

@@ -0,0 +1,38 @@
use brk_error::{Error, Result};
use brk_types::{Height, TxIndex, Txid};
use vecdb::{AnyVec, GenericStoredVec};
use crate::Query;
/// Get all txids in a block by height
pub fn get_block_txids(height: Height, query: &Query) -> Result<Vec<Txid>> {
let indexer = query.indexer();
let max_height = query.get_height();
if height > max_height {
return Err(Error::Str("Block height out of range"));
}
let first_txindex = indexer.vecs.tx.height_to_first_txindex.read_once(height)?;
let next_first_txindex = indexer
.vecs
.tx
.height_to_first_txindex
.read_once(height.incremented())
.unwrap_or_else(|_| TxIndex::from(indexer.vecs.tx.txindex_to_txid.len()));
let first: usize = first_txindex.into();
let next: usize = next_first_txindex.into();
let count = next - first;
let txids: Vec<Txid> = indexer
.vecs
.tx
.txindex_to_txid
.iter()?
.skip(first)
.take(count)
.collect();
Ok(txids)
}

View File

@@ -0,0 +1,10 @@
use brk_error::{Error, Result};
use brk_types::MempoolInfo;
use crate::Query;
/// Get mempool statistics
pub fn get_mempool_info(query: &Query) -> Result<MempoolInfo> {
let mempool = query.mempool().ok_or(Error::Str("Mempool not available"))?;
Ok(mempool.get_info())
}

View File

@@ -0,0 +1,5 @@
mod info;
mod txids;
pub use info::*;
pub use txids::*;

View File

@@ -0,0 +1,11 @@
use brk_error::{Error, Result};
use brk_types::Txid;
use crate::Query;
/// Get all mempool transaction IDs
pub fn get_mempool_txids(query: &Query) -> Result<Vec<Txid>> {
let mempool = query.mempool().ok_or(Error::Str("Mempool not available"))?;
let txs = mempool.get_txs();
Ok(txs.keys().cloned().collect())
}

View File

@@ -1,5 +1,9 @@
mod addresses;
mod transactions;
mod addr;
mod block;
mod mempool;
mod tx;
pub use addresses::*;
pub use transactions::*;
pub use addr::*;
pub use block::*;
pub use mempool::*;
pub use tx::*;

View File

@@ -0,0 +1,50 @@
use std::str::FromStr;
use bitcoin::hex::DisplayHex;
use brk_error::{Error, Result};
use brk_types::{TxIndex, Txid, TxidPath, TxidPrefix};
use vecdb::GenericStoredVec;
use crate::Query;
pub fn get_transaction_hex(TxidPath { txid }: TxidPath, query: &Query) -> Result<String> {
let Ok(txid) = bitcoin::Txid::from_str(&txid) else {
return Err(Error::InvalidTxid);
};
let txid = Txid::from(txid);
// First check mempool for unconfirmed transactions
if let Some(mempool) = query.mempool()
&& let Some(tx_with_hex) = mempool.get_txs().get(&txid)
{
return Ok(tx_with_hex.hex().to_string());
}
// Look up confirmed transaction by txid prefix
let prefix = TxidPrefix::from(&txid);
let indexer = query.indexer();
let Ok(Some(txindex)) = indexer
.stores
.txidprefix_to_txindex
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
get_transaction_hex_by_index(txindex, query)
}
pub fn get_transaction_hex_by_index(txindex: TxIndex, query: &Query) -> Result<String> {
let indexer = query.indexer();
let reader = query.reader();
let computer = query.computer();
let total_size = indexer.vecs.tx.txindex_to_total_size.read_once(txindex)?;
let position = computer.blks.txindex_to_position.read_once(txindex)?;
let buffer = reader.read_raw_bytes(position, *total_size as usize)?;
Ok(buffer.to_lower_hex_string())
}

View File

@@ -0,0 +1,7 @@
mod hex;
mod status;
mod tx;
pub use hex::*;
pub use status::*;
pub use tx::*;

View File

@@ -0,0 +1,46 @@
use std::str::FromStr;
use brk_error::{Error, Result};
use brk_types::{TxStatus, Txid, TxidPath, TxidPrefix};
use vecdb::GenericStoredVec;
use crate::Query;
pub fn get_transaction_status(TxidPath { txid }: TxidPath, query: &Query) -> Result<TxStatus> {
let Ok(txid) = bitcoin::Txid::from_str(&txid) else {
return Err(Error::InvalidTxid);
};
let txid = Txid::from(txid);
// First check mempool for unconfirmed transactions
if let Some(mempool) = query.mempool()
&& mempool.get_txs().contains_key(&txid)
{
return Ok(TxStatus::UNCONFIRMED);
}
// Look up confirmed transaction by txid prefix
let prefix = TxidPrefix::from(&txid);
let indexer = query.indexer();
let Ok(Some(txindex)) = indexer
.stores
.txidprefix_to_txindex
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
// Get block info for status
let height = indexer.vecs.tx.txindex_to_height.read_once(txindex)?;
let block_hash = indexer.vecs.block.height_to_blockhash.read_once(height)?;
let block_time = indexer.vecs.block.height_to_timestamp.read_once(height)?;
Ok(TxStatus {
confirmed: true,
block_height: Some(height),
block_hash: Some(block_hash),
block_time: Some(block_time),
})
}

View File

@@ -1,12 +1,7 @@
use std::{
fs::File,
io::{Cursor, Read, Seek, SeekFrom},
str::FromStr,
};
use std::{io::Cursor, str::FromStr};
use bitcoin::consensus::Decodable;
use brk_error::{Error, Result};
use brk_reader::XORIndex;
use brk_types::{
Sats, Transaction, TxIn, TxIndex, TxOut, TxStatus, Txid, TxidPath, TxidPrefix, Vout, Weight,
};
@@ -20,6 +15,15 @@ pub fn get_transaction(TxidPath { txid }: TxidPath, query: &Query) -> Result<Tra
};
let txid = Txid::from(txid);
// First check mempool for unconfirmed transactions
if let Some(mempool) = query.mempool()
&& let Some(tx_with_hex) = mempool.get_txs().get(&txid)
{
return Ok(tx_with_hex.tx().clone());
}
// Look up confirmed transaction by txid prefix
let prefix = TxidPrefix::from(&txid);
let indexer = query.indexer();
let Ok(Some(txindex)) = indexer
@@ -45,7 +49,11 @@ pub fn get_transaction_by_index(txindex: TxIndex, query: &Query) -> Result<Trans
let version = indexer.vecs.tx.txindex_to_txversion.read_once(txindex)?;
let lock_time = indexer.vecs.tx.txindex_to_rawlocktime.read_once(txindex)?;
let total_size = indexer.vecs.tx.txindex_to_total_size.read_once(txindex)?;
let first_txinindex = indexer.vecs.tx.txindex_to_first_txinindex.read_once(txindex)?;
let first_txinindex = indexer
.vecs
.tx
.txindex_to_first_txinindex
.read_once(txindex)?;
let position = computer.blks.txindex_to_position.read_once(txindex)?;
// Get block info for status
@@ -53,39 +61,15 @@ pub fn get_transaction_by_index(txindex: TxIndex, query: &Query) -> Result<Trans
let block_time = indexer.vecs.block.height_to_timestamp.read_once(height)?;
// Read and decode the raw transaction from blk file
let blk_index_to_blk_path = reader.blk_index_to_blk_path();
let Some(blk_path) = blk_index_to_blk_path.get(&position.blk_index()) else {
return Err(Error::Str("Failed to get the correct blk file"));
};
let mut xori = XORIndex::default();
xori.add_assign(position.offset() as usize);
let Ok(mut file) = File::open(blk_path) else {
return Err(Error::Str("Failed to open blk file"));
};
if file
.seek(SeekFrom::Start(position.offset() as u64))
.is_err()
{
return Err(Error::Str("Failed to seek position in file"));
}
let mut buffer = vec![0u8; *total_size as usize];
if file.read_exact(&mut buffer).is_err() {
return Err(Error::Str("Failed to read the transaction (read exact)"));
}
xori.bytes(&mut buffer, reader.xor_bytes());
let buffer = reader.read_raw_bytes(position, *total_size as usize)?;
let mut cursor = Cursor::new(buffer);
let Ok(tx) = bitcoin::Transaction::consensus_decode(&mut cursor) else {
return Err(Error::Str("Failed decode the transaction"));
};
let tx = bitcoin::Transaction::consensus_decode(&mut cursor)
.map_err(|_| Error::Str("Failed to decode transaction"))?;
// For iterating through inputs, we need iterators (multiple lookups)
let mut txindex_to_txid_iter = indexer.vecs.tx.txindex_to_txid.iter()?;
let mut txindex_to_first_txoutindex_iter = indexer.vecs.tx.txindex_to_first_txoutindex.iter()?;
let mut txindex_to_first_txoutindex_iter =
indexer.vecs.tx.txindex_to_first_txoutindex.iter()?;
let mut txinindex_to_outpoint_iter = indexer.vecs.txin.txinindex_to_outpoint.iter()?;
let mut txoutindex_to_value_iter = indexer.vecs.txout.txoutindex_to_value.iter()?;
@@ -144,6 +128,12 @@ pub fn get_transaction_by_index(txindex: TxIndex, query: &Query) -> Result<Trans
// Calculate weight before consuming tx.output
let weight = Weight::from(tx.weight());
// Calculate sigop cost
// Note: Using |_| None means P2SH and SegWit sigops won't be counted accurately
// since we don't provide the prevout scripts. This matches mempool tx behavior.
// For accurate counting, we'd need to reconstruct prevout scripts from indexed data.
let total_sigop_cost = tx.total_sigop_cost(|_| None);
// Build outputs
let output: Vec<TxOut> = tx.output.into_iter().map(TxOut::from).collect();
@@ -162,8 +152,8 @@ pub fn get_transaction_by_index(txindex: TxIndex, query: &Query) -> Result<Trans
lock_time,
total_size: *total_size as usize,
weight,
total_sigop_cost: 0, // Would need to calculate from scripts
fee: Sats::ZERO, // Will be computed below
total_sigop_cost,
fee: Sats::ZERO, // Will be computed below
input,
output,
status,

View File

@@ -1,4 +1,5 @@
#![doc = include_str!("../README.md")]
#![allow(clippy::module_inception)]
use std::{collections::BTreeMap, sync::Arc};
@@ -9,8 +10,8 @@ use brk_monitor::Mempool;
use brk_reader::Reader;
use brk_traversable::TreeNode;
use brk_types::{
Address, AddressStats, Format, Height, Index, IndexInfo, Limit, Metric, MetricCount,
Transaction, TxidPath,
Address, AddressStats, BlockInfo, BlockStatus, Format, Height, Index, IndexInfo, Limit,
MempoolInfo, Metric, MetricCount, Transaction, TxStatus, Txid, TxidPath, Utxo,
};
use vecdb::{AnyExportableVec, AnyStoredVec};
@@ -31,7 +32,12 @@ pub use params::{Params, ParamsDeprec, ParamsOpt};
use vecs::Vecs;
use crate::{
chain::{get_address, get_transaction},
chain::{
get_address, get_address_txids, get_address_utxos, get_block_by_height,
get_block_status_by_height, get_block_txids, get_blocks, get_height_by_hash,
get_mempool_info, get_mempool_txids, get_transaction, get_transaction_hex,
get_transaction_status,
},
vecs::{IndexToVec, MetricToVec},
};
@@ -74,10 +80,62 @@ impl Query {
get_address(address, self)
}
pub fn get_address_txids(
&self,
address: Address,
after_txid: Option<Txid>,
limit: usize,
) -> Result<Vec<Txid>> {
get_address_txids(address, after_txid, limit, self)
}
pub fn get_address_utxos(&self, address: Address) -> Result<Vec<Utxo>> {
get_address_utxos(address, self)
}
pub fn get_transaction(&self, txid: TxidPath) -> Result<Transaction> {
get_transaction(txid, self)
}
pub fn get_transaction_status(&self, txid: TxidPath) -> Result<TxStatus> {
get_transaction_status(txid, self)
}
pub fn get_transaction_hex(&self, txid: TxidPath) -> Result<String> {
get_transaction_hex(txid, self)
}
pub fn get_block(&self, hash: &str) -> Result<BlockInfo> {
let height = get_height_by_hash(hash, self)?;
get_block_by_height(height, self)
}
pub fn get_block_by_height(&self, height: Height) -> Result<BlockInfo> {
get_block_by_height(height, self)
}
pub fn get_block_status(&self, hash: &str) -> Result<BlockStatus> {
let height = get_height_by_hash(hash, self)?;
get_block_status_by_height(height, self)
}
pub fn get_blocks(&self, start_height: Option<Height>) -> Result<Vec<BlockInfo>> {
get_blocks(start_height, self)
}
pub fn get_block_txids(&self, hash: &str) -> Result<Vec<Txid>> {
let height = get_height_by_hash(hash, self)?;
get_block_txids(height, self)
}
pub fn get_mempool_info(&self) -> Result<MempoolInfo> {
get_mempool_info(self)
}
pub fn get_mempool_txids(&self) -> Result<Vec<Txid>> {
get_mempool_txids(self)
}
pub fn match_metric(&self, metric: &Metric, limit: Limit) -> Vec<&'static str> {
self.vecs().matches(metric, limit)
}

View File

@@ -3,7 +3,7 @@
use std::{
collections::BTreeMap,
fs::{self, File},
io::Read,
io::{Read, Seek, SeekFrom},
mem,
ops::ControlFlow,
path::PathBuf,
@@ -83,6 +83,26 @@ impl ReaderInner {
self.xor_bytes
}
/// Read raw bytes from a blk file at the given position with XOR decoding
pub fn read_raw_bytes(&self, position: BlkPosition, size: usize) -> Result<Vec<u8>> {
let blk_paths = self.blk_index_to_blk_path();
let blk_path = blk_paths
.get(&position.blk_index())
.ok_or("Blk file not found")?;
let mut file = File::open(blk_path)?;
file.seek(SeekFrom::Start(position.offset() as u64))?;
let mut buffer = vec![0u8; size];
file.read_exact(&mut buffer)?;
let mut xori = XORIndex::default();
xori.add_assign(position.offset() as usize);
xori.bytes(&mut buffer, self.xor_bytes);
Ok(buffer)
}
///
/// Returns a crossbeam channel receiver that receives `Block` from an **inclusive** range (`start` and `end`)
///

View File

@@ -3,14 +3,13 @@ use std::path::{Path, PathBuf};
use std::thread::sleep;
use std::{mem, sync::Arc, time::Duration};
use bitcoin::block::Header;
use bitcoin::consensus::encode;
use bitcoincore_rpc::json::{
GetBlockHeaderResult, GetBlockResult, GetBlockchainInfoResult, GetTxOutResult,
use bitcoin::{block::Header, consensus::encode};
use bitcoincore_rpc::{
json::{GetBlockHeaderResult, GetBlockResult, GetBlockchainInfoResult, GetTxOutResult},
{Client as CoreClient, Error as RpcError, RpcApi},
};
use bitcoincore_rpc::{Client as CoreClient, Error as RpcError, RpcApi};
use brk_error::Result;
use brk_types::{BlockHash, Height, Sats, Transaction, TxIn, TxOut, TxStatus, Txid, Vout};
use brk_types::{BlockHash, Height, Sats, Transaction, TxIn, TxOut, TxStatus, TxWithHex, Txid, Vout};
pub use bitcoincore_rpc::Auth;
@@ -120,11 +119,13 @@ impl Client {
Ok(tx)
}
pub fn get_mempool_transaction<'a, T>(&self, txid: &'a T) -> Result<Transaction>
pub fn get_mempool_transaction<'a, T>(&self, txid: &'a T) -> Result<TxWithHex>
where
&'a T: Into<&'a bitcoin::Txid>,
{
let mut tx = self.get_raw_transaction(txid, None as Option<&'a BlockHash>)?;
// Get hex first, then deserialize from it
let hex = self.get_raw_transaction_hex(txid, None as Option<&'a BlockHash>)?;
let mut tx = encode::deserialize_hex::<bitcoin::Transaction>(&hex)?;
let input = mem::take(&mut tx.input)
.into_iter()
@@ -170,12 +171,12 @@ impl Client {
fee: Sats::default(),
input,
output: tx.output.into_iter().map(TxOut::from).collect(),
status: TxStatus::UNCOMFIRMED,
status: TxStatus::UNCONFIRMED,
};
tx.compute_fee();
Ok(tx)
Ok(TxWithHex::new(tx, hex))
}
pub fn get_tx_out(

View File

@@ -1,11 +1,11 @@
use aide::axum::{ApiRouter, routing::get_with};
use axum::{
extract::{Path, State},
extract::{Path, Query, State},
http::HeaderMap,
response::{Redirect, Response},
routing::get,
};
use brk_types::{Address, AddressStats};
use brk_types::{Address, AddressStats, AddressTxidsParam, Txid, Utxo};
use crate::{
VERSION,
@@ -46,5 +46,52 @@ impl AddressRoutes for ApiRouter<AppState> {
.server_error()
),
)
.api_route(
"/api/address/{address}/txs",
get_with(async |
headers: HeaderMap,
Path(address): Path<Address>,
Query(params): Query<AddressTxidsParam>,
State(state): State<AppState>
| {
let etag = format!("{VERSION}-{}", state.get_height().await);
if headers.has_etag(&etag) {
return Response::new_not_modified();
}
state.get_address_txids(address, params.after_txid, params.limit).await.to_json_response(&etag)
}, |op| op
.addresses_tag()
.summary("Address transaction IDs")
.description("Get transaction IDs for an address, newest first. Use after_txid for pagination.")
.ok_response::<Vec<Txid>>()
.not_modified()
.bad_request()
.not_found()
.server_error()
),
)
.api_route(
"/api/address/{address}/utxo",
get_with(async |
headers: HeaderMap,
Path(address): Path<Address>,
State(state): State<AppState>
| {
let etag = format!("{VERSION}-{}", state.get_height().await);
if headers.has_etag(&etag) {
return Response::new_not_modified();
}
state.get_address_utxos(address).await.to_json_response(&etag)
}, |op| op
.addresses_tag()
.summary("Address UTXOs")
.description("Get unspent transaction outputs for an address.")
.ok_response::<Vec<Utxo>>()
.not_modified()
.bad_request()
.not_found()
.server_error()
),
)
}
}

View File

@@ -0,0 +1,165 @@
use aide::axum::{ApiRouter, routing::get_with};
use axum::{
extract::{Path, State},
http::HeaderMap,
response::{Redirect, Response},
routing::get,
};
use brk_types::{BlockHashPath, BlockInfo, BlockStatus, Height, HeightPath, StartHeightPath, Txid};
use crate::{
VERSION,
extended::{HeaderMapExtended, ResponseExtended, ResultExtended, TransformResponseExtended},
};
use super::AppState;
pub trait BlockRoutes {
fn add_block_routes(self) -> Self;
}
impl BlockRoutes for ApiRouter<AppState> {
fn add_block_routes(self) -> Self {
self.route("/api/block", get(Redirect::temporary("/api/blocks")))
.route(
"/api/blocks",
get(Redirect::temporary("/api#tag/blocks")),
)
.api_route(
"/api/block/{hash}",
get_with(
async |headers: HeaderMap,
Path(path): Path<BlockHashPath>,
State(state): State<AppState>| {
let etag = format!("{VERSION}-{}", state.get_height().await);
if headers.has_etag(&etag) {
return Response::new_not_modified();
}
state.get_block(path.hash).await.to_json_response(&etag)
},
|op| {
op.blocks_tag()
.summary("Block information")
.description(
"Retrieve block information by block hash. Returns block metadata including height, timestamp, difficulty, size, weight, and transaction count.",
)
.ok_response::<BlockInfo>()
.not_modified()
.bad_request()
.not_found()
.server_error()
},
),
)
.api_route(
"/api/block/{hash}/status",
get_with(
async |headers: HeaderMap,
Path(path): Path<BlockHashPath>,
State(state): State<AppState>| {
let etag = format!("{VERSION}-{}", state.get_height().await);
if headers.has_etag(&etag) {
return Response::new_not_modified();
}
state
.get_block_status(path.hash)
.await
.to_json_response(&etag)
},
|op| {
op.blocks_tag()
.summary("Block status")
.description(
"Retrieve the status of a block. Returns whether the block is in the best chain and, if so, its height and the hash of the next block.",
)
.ok_response::<BlockStatus>()
.not_modified()
.bad_request()
.not_found()
.server_error()
},
),
)
.api_route(
"/api/block-height/{height}",
get_with(
async |headers: HeaderMap,
Path(path): Path<HeightPath>,
State(state): State<AppState>| {
let etag = format!("{VERSION}-{}", state.get_height().await);
if headers.has_etag(&etag) {
return Response::new_not_modified();
}
state
.get_block_by_height(Height::from(path.height))
.await
.to_json_response(&etag)
},
|op| {
op.blocks_tag()
.summary("Block by height")
.description(
"Retrieve block information by block height. Returns block metadata including hash, timestamp, difficulty, size, weight, and transaction count.",
)
.ok_response::<BlockInfo>()
.not_modified()
.bad_request()
.not_found()
.server_error()
},
),
)
.api_route(
"/api/blocks/{start_height}",
get_with(
async |headers: HeaderMap,
Path(path): Path<StartHeightPath>,
State(state): State<AppState>| {
let etag = format!("{VERSION}-{}", state.get_height().await);
if headers.has_etag(&etag) {
return Response::new_not_modified();
}
let start_height = path.start_height.map(Height::from);
state.get_blocks(start_height).await.to_json_response(&etag)
},
|op| {
op.blocks_tag()
.summary("Recent blocks")
.description(
"Retrieve the last 10 blocks, optionally starting from a specific height. Returns block metadata for each block.",
)
.ok_response::<Vec<BlockInfo>>()
.not_modified()
.bad_request()
.server_error()
},
),
)
.api_route(
"/api/block/{hash}/txids",
get_with(
async |headers: HeaderMap,
Path(path): Path<BlockHashPath>,
State(state): State<AppState>| {
let etag = format!("{VERSION}-{}", state.get_height().await);
if headers.has_etag(&etag) {
return Response::new_not_modified();
}
state.get_block_txids(path.hash).await.to_json_response(&etag)
},
|op| {
op.blocks_tag()
.summary("Block transaction IDs")
.description(
"Retrieve all transaction IDs in a block by block hash.",
)
.ok_response::<Vec<Txid>>()
.not_modified()
.bad_request()
.not_found()
.server_error()
},
),
)
}
}

View File

@@ -0,0 +1,66 @@
use aide::axum::{ApiRouter, routing::get_with};
use axum::{
extract::State,
http::HeaderMap,
response::{Redirect, Response},
routing::get,
};
use brk_types::{MempoolInfo, Txid};
use crate::{
VERSION,
extended::{HeaderMapExtended, ResponseExtended, ResultExtended, TransformResponseExtended},
};
use super::AppState;
pub trait MempoolRoutes {
fn add_mempool_routes(self) -> Self;
}
impl MempoolRoutes for ApiRouter<AppState> {
fn add_mempool_routes(self) -> Self {
self
.route("/api/mempool", get(Redirect::temporary("/api#tag/mempool")))
.api_route(
"/api/mempool/info",
get_with(
async |headers: HeaderMap, State(state): State<AppState>| {
let etag = format!("{VERSION}-{}", state.get_height().await);
if headers.has_etag(&etag) {
return Response::new_not_modified();
}
state.get_mempool_info().await.to_json_response(&etag)
},
|op| {
op.mempool_tag()
.summary("Mempool statistics")
.description("Get current mempool statistics including transaction count, total vsize, and total fees.")
.ok_response::<MempoolInfo>()
.not_modified()
.server_error()
},
),
)
.api_route(
"/api/mempool/txids",
get_with(
async |headers: HeaderMap, State(state): State<AppState>| {
let etag = format!("{VERSION}-{}", state.get_height().await);
if headers.has_etag(&etag) {
return Response::new_not_modified();
}
state.get_mempool_txids().await.to_json_response(&etag)
},
|op| {
op.mempool_tag()
.summary("Mempool transaction IDs")
.description("Get all transaction IDs currently in the mempool.")
.ok_response::<Vec<Txid>>()
.not_modified()
.server_error()
},
),
)
}
}

View File

@@ -14,13 +14,18 @@ use brk_types::Health;
use crate::{
VERSION,
api::{addresses::AddressRoutes, metrics::ApiMetricsRoutes, transactions::TxRoutes},
api::{
addresses::AddressRoutes, blocks::BlockRoutes, mempool::MempoolRoutes,
metrics::ApiMetricsRoutes, transactions::TxRoutes,
},
extended::{HeaderMapExtended, ResponseExtended, TransformResponseExtended},
};
use super::AppState;
mod addresses;
mod blocks;
mod mempool;
mod metrics;
mod openapi;
mod transactions;
@@ -34,6 +39,8 @@ pub trait ApiRoutes {
impl ApiRoutes for ApiRouter<AppState> {
fn add_api_routes(self) -> Self {
self.add_addresses_routes()
.add_block_routes()
.add_mempool_routes()
.add_tx_routes()
.add_metrics_routes()
.route("/api/server", get(Redirect::temporary("/api#tag/server")))

View File

@@ -5,7 +5,7 @@ use axum::{
response::{Redirect, Response},
routing::get,
};
use brk_types::{Transaction, TxidPath};
use brk_types::{Transaction, TxStatus, TxidPath};
use crate::{
VERSION,
@@ -50,5 +50,59 @@ impl TxRoutes for ApiRouter<AppState> {
.server_error(),
),
)
.api_route(
"/api/tx/{txid}/status",
get_with(
async |
headers: HeaderMap,
Path(txid): Path<TxidPath>,
State(state): State<AppState>
| {
let etag = format!("{VERSION}-{}", state.get_height().await);
if headers.has_etag(&etag) {
return Response::new_not_modified();
}
state.get_transaction_status(txid).await.to_json_response(&etag)
},
|op| op
.transactions_tag()
.summary("Transaction status")
.description(
"Retrieve the confirmation status of a transaction. Returns whether the transaction is confirmed and, if so, the block height, hash, and timestamp.",
)
.ok_response::<TxStatus>()
.not_modified()
.bad_request()
.not_found()
.server_error(),
),
)
.api_route(
"/api/tx/{txid}/hex",
get_with(
async |
headers: HeaderMap,
Path(txid): Path<TxidPath>,
State(state): State<AppState>
| {
let etag = format!("{VERSION}-{}", state.get_height().await);
if headers.has_etag(&etag) {
return Response::new_not_modified();
}
state.get_transaction_hex(txid).await.to_text_response(&etag)
},
|op| op
.transactions_tag()
.summary("Transaction hex")
.description(
"Retrieve the raw transaction as a hex-encoded string. Returns the serialized transaction in hexadecimal format.",
)
.ok_response::<String>()
.not_modified()
.bad_request()
.not_found()
.server_error(),
),
)
}
}

View File

@@ -18,6 +18,8 @@ where
fn new_json_with<T>(status: StatusCode, value: T, etag: &str) -> Self
where
T: Serialize;
fn new_text(value: &str, etag: &str) -> Self;
fn new_text_with(status: StatusCode, value: &str, etag: &str) -> Self;
}
impl ResponseExtended for Response<Body> {
@@ -49,4 +51,21 @@ impl ResponseExtended for Response<Body> {
headers.insert_etag(etag);
response
}
fn new_text(value: &str, etag: &str) -> Self {
Self::new_text_with(StatusCode::default(), value, etag)
}
fn new_text_with(status: StatusCode, value: &str, etag: &str) -> Self {
let mut response = Response::builder()
.body(value.to_string().into())
.unwrap();
*response.status_mut() = status;
let headers = response.headers_mut();
headers.insert_cors();
headers.insert_content_type_text_plain();
headers.insert_cache_control_must_revalidate();
headers.insert_etag(etag);
response
}
}

View File

@@ -9,6 +9,9 @@ pub trait ResultExtended<T> {
fn to_json_response(self, etag: &str) -> Response
where
T: Serialize;
fn to_text_response(self, etag: &str) -> Response
where
T: AsRef<str>;
}
impl<T> ResultExtended<T> for Result<T> {
@@ -37,4 +40,14 @@ impl<T> ResultExtended<T> for Result<T> {
Err((status, message)) => Response::new_json_with(status, &message, etag),
}
}
fn to_text_response(self, etag: &str) -> Response
where
T: AsRef<str>,
{
match self.with_status() {
Ok(value) => Response::new_text(value.as_ref(), etag),
Err((status, message)) => Response::new_text_with(status, &message, etag),
}
}
}

View File

@@ -5,6 +5,7 @@ use schemars::JsonSchema;
pub trait TransformResponseExtended<'t> {
fn addresses_tag(self) -> Self;
fn blocks_tag(self) -> Self;
fn mempool_tag(self) -> Self;
fn metrics_tag(self) -> Self;
fn mining_tag(self) -> Self;
fn server_tag(self) -> Self;
@@ -38,6 +39,10 @@ impl<'t> TransformResponseExtended<'t> for TransformOperation<'t> {
self.tag("Blocks")
}
fn mempool_tag(self) -> Self {
self.tag("Mempool")
}
fn metrics_tag(self) -> Self {
self.tag("Metrics")
}

View File

@@ -221,6 +221,17 @@ where
.map(|(k, v)| (K::from(ByteView::from(&*k)), V::from(ByteView::from(&*v))))
}
#[inline]
pub fn prefix<P: AsRef<[u8]>>(
&self,
prefix: P,
) -> impl DoubleEndedIterator<Item = (K, V)> + '_ {
self.keyspace
.prefix(prefix)
.map(|res| res.into_inner().unwrap())
.map(|(k, v)| (K::from(ByteView::from(&*k)), V::from(ByteView::from(&*v))))
}
#[inline]
fn has(&self, height: Height) -> bool {
self.meta.has(height)

View File

@@ -1,6 +1,6 @@
use std::fmt;
use std::{fmt, str::FromStr};
use bitcoin::{ScriptBuf, opcodes, script::Builder};
use bitcoin::ScriptBuf;
use brk_error::Error;
use derive_deref::Deref;
use schemars::JsonSchema;
@@ -53,37 +53,12 @@ impl TryFrom<(&ScriptBuf, OutputType)> for Address {
impl TryFrom<&AddressBytes> for Address {
type Error = Error;
fn try_from(bytes: &AddressBytes) -> Result<Self, Self::Error> {
// P2PK addresses are represented as raw pubkey hex, not as a script
let address = match bytes {
AddressBytes::P2PK65(_) => Self::from(bytes_to_hex(bytes.as_slice())),
AddressBytes::P2PK33(_) => Self::from(bytes_to_hex(bytes.as_slice())),
AddressBytes::P2PKH(b) => Self::try_from(
&Builder::new()
.push_opcode(opcodes::all::OP_DUP)
.push_opcode(opcodes::all::OP_HASH160)
.push_slice(****b)
.push_opcode(opcodes::all::OP_EQUALVERIFY)
.push_opcode(opcodes::all::OP_CHECKSIG)
.into_script(),
)?,
AddressBytes::P2SH(b) => Self::try_from(
&Builder::new()
.push_opcode(opcodes::all::OP_HASH160)
.push_slice(****b)
.push_opcode(opcodes::all::OP_EQUAL)
.into_script(),
)?,
AddressBytes::P2WPKH(b) => {
Self::try_from(&Builder::new().push_int(0).push_slice(****b).into_script())?
}
AddressBytes::P2WSH(b) => {
Self::try_from(&Builder::new().push_int(0).push_slice(****b).into_script())?
}
AddressBytes::P2TR(b) => {
Self::try_from(&Builder::new().push_int(1).push_slice(****b).into_script())?
}
AddressBytes::P2A(b) => {
Self::try_from(&Builder::new().push_int(1).push_slice(****b).into_script())?
AddressBytes::P2PK65(_) | AddressBytes::P2PK33(_) => {
Self::from(bytes_to_hex(bytes.as_slice()))
}
_ => Self::try_from(&bytes.to_script_pubkey())?,
};
Ok(address)
}
@@ -106,3 +81,21 @@ impl Serialize for Address {
serializer.collect_str(&self.address)
}
}
impl FromStr for Address {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let _ = AddressBytes::address_to_script(s)?;
Ok(Self {
address: s.to_string(),
})
}
}
impl Address {
/// Get the script for this address
pub fn script(&self) -> Result<ScriptBuf, Error> {
AddressBytes::address_to_script(&self.address)
}
}

View File

@@ -1,4 +1,6 @@
use bitcoin::ScriptBuf;
use std::str::FromStr;
use bitcoin::{Network, PublicKey, opcodes, script::Builder, ScriptBuf};
use brk_error::Error;
use super::{
@@ -35,6 +37,44 @@ impl AddressBytes {
pub fn hash(&self) -> u64 {
rapidhash::v3::rapidhash_v3(self.as_slice()).to_le()
}
/// Reconstruct the script_pubkey from the address bytes
pub fn to_script_pubkey(&self) -> ScriptBuf {
match self {
AddressBytes::P2PK65(b) => Builder::new()
.push_slice(****b)
.push_opcode(opcodes::all::OP_CHECKSIG)
.into_script(),
AddressBytes::P2PK33(b) => Builder::new()
.push_slice(****b)
.push_opcode(opcodes::all::OP_CHECKSIG)
.into_script(),
AddressBytes::P2PKH(b) => Builder::new()
.push_opcode(opcodes::all::OP_DUP)
.push_opcode(opcodes::all::OP_HASH160)
.push_slice(****b)
.push_opcode(opcodes::all::OP_EQUALVERIFY)
.push_opcode(opcodes::all::OP_CHECKSIG)
.into_script(),
AddressBytes::P2SH(b) => Builder::new()
.push_opcode(opcodes::all::OP_HASH160)
.push_slice(****b)
.push_opcode(opcodes::all::OP_EQUAL)
.into_script(),
AddressBytes::P2WPKH(b) => {
Builder::new().push_int(0).push_slice(****b).into_script()
}
AddressBytes::P2WSH(b) => {
Builder::new().push_int(0).push_slice(****b).into_script()
}
AddressBytes::P2TR(b) => {
Builder::new().push_int(1).push_slice(****b).into_script()
}
AddressBytes::P2A(b) => {
Builder::new().push_int(1).push_slice(****b).into_script()
}
}
}
}
impl TryFrom<&ScriptBuf> for AddressBytes {
@@ -166,3 +206,30 @@ impl From<P2ABytes> for AddressBytes {
Self::P2A(Box::new(value))
}
}
impl AddressBytes {
/// Parse an address string to a ScriptBuf
pub fn address_to_script(address: &str) -> Result<ScriptBuf, Error> {
if let Ok(address) = bitcoin::Address::from_str(address) {
if !address.is_valid_for_network(Network::Bitcoin) {
return Err(Error::InvalidNetwork);
}
let address = address.assume_checked();
Ok(address.script_pubkey())
} else if let Ok(pubkey) = PublicKey::from_str(address) {
Ok(ScriptBuf::new_p2pk(&pubkey))
} else {
Err(Error::InvalidAddress)
}
}
}
impl FromStr for AddressBytes {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let script = Self::address_to_script(s)?;
let outputtype = OutputType::from(&script);
Self::try_from((&script, outputtype))
}
}

View File

@@ -6,7 +6,7 @@ use vecdb::Bytes;
use crate::{AddressIndexTxIndex, Vout};
use super::{OutPoint, TypeIndex};
use super::{OutPoint, TxIndex, TypeIndex};
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Serialize)]
#[repr(C)]
@@ -15,6 +15,18 @@ pub struct AddressIndexOutPoint {
vout: Vout, // u16
}
impl AddressIndexOutPoint {
#[inline]
pub fn txindex(&self) -> TxIndex {
self.addressindextxindex.txindex()
}
#[inline]
pub fn vout(&self) -> Vout {
self.vout
}
}
impl Hash for AddressIndexOutPoint {
fn hash<H: Hasher>(&self, state: &mut H) {
let mut buf = [0u8; 10];

View File

@@ -14,8 +14,16 @@ impl AddressIndexTxIndex {
(self.0 >> 32) as u32
}
pub fn txindex(&self) -> u32 {
self.0 as u32
pub fn txindex(&self) -> TxIndex {
TxIndex::from(self.0 as u32)
}
pub fn min_for_address(addressindex: TypeIndex) -> Self {
Self(u64::from(addressindex) << 32)
}
pub fn max_for_address(addressindex: TypeIndex) -> Self {
Self((u64::from(addressindex) << 32) | u64::MAX >> 32)
}
}

View File

@@ -0,0 +1,17 @@
use schemars::JsonSchema;
use serde::Deserialize;
use crate::Txid;
#[derive(Debug, Default, Deserialize, JsonSchema)]
pub struct AddressTxidsParam {
/// Txid to paginate from (return transactions before this one)
pub after_txid: Option<Txid>,
/// Maximum number of results to return. Defaults to 25 if not specified.
#[serde(default = "default_limit")]
pub limit: usize,
}
fn default_limit() -> usize {
25
}

View File

@@ -19,6 +19,13 @@ impl TryFrom<&str> for BlockHash {
}
}
impl FromStr for BlockHash {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Self::try_from(s)
}
}
impl From<bitcoin::BlockHash> for BlockHash {
#[inline]
fn from(value: bitcoin::BlockHash) -> Self {

View File

@@ -0,0 +1,9 @@
use schemars::JsonSchema;
use serde::Deserialize;
#[derive(Deserialize, JsonSchema)]
pub struct BlockHashPath {
/// Bitcoin block hash
#[schemars(example = &"000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")]
pub hash: String,
}

View File

@@ -0,0 +1,29 @@
use schemars::JsonSchema;
use serde::Serialize;
use crate::{BlockHash, Height, Timestamp, Weight};
/// Block information returned by the API
#[derive(Debug, Clone, Serialize, JsonSchema)]
pub struct BlockInfo {
/// Block hash
pub id: BlockHash,
/// Block height
pub height: Height,
/// Number of transactions in the block
pub tx_count: u32,
/// Block size in bytes
pub size: u64,
/// Block weight in weight units
pub weight: Weight,
/// Block timestamp (Unix time)
pub timestamp: Timestamp,
/// Block difficulty as a floating point number
pub difficulty: f64,
}

View File

@@ -0,0 +1,37 @@
use schemars::JsonSchema;
use serde::Serialize;
use crate::{BlockHash, Height};
/// Block status indicating whether block is in the best chain
#[derive(Debug, Clone, Serialize, JsonSchema)]
pub struct BlockStatus {
/// Whether this block is in the best chain
pub in_best_chain: bool,
/// Block height (only if in best chain)
#[serde(skip_serializing_if = "Option::is_none")]
pub height: Option<Height>,
/// Hash of the next block in the best chain (only if in best chain and not tip)
#[serde(skip_serializing_if = "Option::is_none")]
pub next_best: Option<BlockHash>,
}
impl BlockStatus {
pub fn in_best_chain(height: Height, next_best: Option<BlockHash>) -> Self {
Self {
in_best_chain: true,
height: Some(height),
next_best,
}
}
pub fn not_in_best_chain() -> Self {
Self {
in_best_chain: false,
height: None,
next_best: None,
}
}
}

View File

@@ -3,17 +3,18 @@ use std::{
ops::{Add, AddAssign, Div},
};
use schemars::JsonSchema;
use serde::Serialize;
use vecdb::{Formattable, Pco};
use super::{Sats, StoredU64};
use super::{Sats, VSize};
#[derive(Debug, Clone, Copy, Serialize, Pco)]
#[derive(Debug, Default, Clone, Copy, Serialize, Pco, JsonSchema)]
pub struct FeeRate(f64);
impl From<(Sats, StoredU64)> for FeeRate {
impl From<(Sats, VSize)> for FeeRate {
#[inline]
fn from((sats, vsize): (Sats, StoredU64)) -> Self {
fn from((sats, vsize): (Sats, VSize)) -> Self {
if sats.is_zero() {
return Self(0.0);
}

View File

@@ -0,0 +1,9 @@
use schemars::JsonSchema;
use serde::Deserialize;
#[derive(Deserialize, JsonSchema)]
pub struct HeightPath {
/// Bitcoin block height
#[schemars(example = 0)]
pub height: u32,
}

View File

@@ -9,6 +9,7 @@ mod addresshash;
mod addressindexoutpoint;
mod addressindextxindex;
mod addressmempoolstats;
mod addresstxidsparam;
mod addressstats;
mod anyaddressindex;
mod bitcoin;
@@ -16,7 +17,10 @@ mod blkmetadata;
mod blkposition;
mod block;
mod blockhash;
mod blockhashpath;
mod blockhashprefix;
mod blockinfo;
mod blockstatus;
mod bytes;
mod cents;
mod date;
@@ -32,12 +36,14 @@ mod format;
mod halvingepoch;
mod health;
mod height;
mod heightpath;
mod index;
mod indexinfo;
mod limit;
mod loadedaddressdata;
mod loadedaddressindex;
mod metric;
mod mempoolinfo;
mod metriccount;
mod metrics;
mod monthindex;
@@ -67,12 +73,14 @@ mod poolid;
mod pools;
mod quarterindex;
mod rawlocktime;
mod recommendedfees;
mod sats;
mod semesterindex;
mod stored_bool;
mod stored_f32;
mod stored_f64;
mod stored_i16;
mod startheightpath;
mod stored_string;
mod stored_u16;
mod stored_u32;
@@ -91,11 +99,14 @@ mod txout;
mod txoutindex;
mod txstatus;
mod txversion;
mod txwithhex;
mod typeindex;
mod unit;
mod unknownoutputindex;
mod utxo;
mod vin;
mod vout;
mod vsize;
mod weekindex;
mod weight;
mod yearindex;
@@ -108,13 +119,17 @@ pub use addressindexoutpoint::*;
pub use addressindextxindex::*;
pub use addressmempoolstats::*;
pub use addressstats::*;
pub use addresstxidsparam::*;
pub use anyaddressindex::*;
pub use bitcoin::*;
pub use blkmetadata::*;
pub use blkposition::*;
pub use block::*;
pub use blockhash::*;
pub use blockhashpath::*;
pub use blockhashprefix::*;
pub use blockinfo::*;
pub use blockstatus::*;
pub use bytes::*;
pub use cents::*;
pub use date::*;
@@ -130,11 +145,13 @@ pub use format::*;
pub use halvingepoch::*;
pub use health::*;
pub use height::*;
pub use heightpath::*;
pub use index::*;
pub use indexinfo::*;
pub use limit::*;
pub use loadedaddressdata::*;
pub use loadedaddressindex::*;
pub use mempoolinfo::*;
pub use metric::*;
pub use metriccount::*;
pub use metrics::*;
@@ -165,12 +182,14 @@ pub use poolid::*;
pub use pools::*;
pub use quarterindex::*;
pub use rawlocktime::*;
pub use recommendedfees::*;
pub use sats::*;
pub use semesterindex::*;
pub use stored_bool::*;
pub use stored_f32::*;
pub use stored_f64::*;
pub use stored_i16::*;
pub use startheightpath::*;
pub use stored_string::*;
pub use stored_u8::*;
pub use stored_u16::*;
@@ -189,11 +208,14 @@ pub use txout::*;
pub use txoutindex::*;
pub use txstatus::*;
pub use txversion::*;
pub use txwithhex::*;
pub use typeindex::*;
pub use unit::*;
pub use unknownoutputindex::*;
pub use utxo::*;
pub use vin::*;
pub use vout::*;
pub use vsize::*;
pub use weekindex::*;
pub use weight::*;
pub use yearindex::*;

View File

@@ -0,0 +1,33 @@
use schemars::JsonSchema;
use serde::Serialize;
use crate::{Sats, Transaction, VSize};
/// Mempool statistics
#[derive(Debug, Default, Clone, Serialize, JsonSchema)]
pub struct MempoolInfo {
/// Number of transactions in the mempool
pub count: usize,
/// Total virtual size of all transactions in the mempool (vbytes)
pub vsize: VSize,
/// Total fees of all transactions in the mempool (satoshis)
pub total_fee: Sats,
}
impl MempoolInfo {
/// Increment stats for a newly added transaction
#[inline]
pub fn add(&mut self, tx: &Transaction) {
self.count += 1;
self.vsize += tx.vsize();
self.total_fee += tx.fee;
}
/// Decrement stats for a removed transaction
#[inline]
pub fn remove(&mut self, tx: &Transaction) {
self.count -= 1;
self.vsize -= tx.vsize();
self.total_fee -= tx.fee;
}
}

View File

@@ -5,6 +5,8 @@ use serde::Serialize;
use strum::Display;
use vecdb::{Bytes, Formattable};
use crate::AddressBytes;
#[derive(
Debug, Clone, Copy, Display, PartialEq, Eq, PartialOrd, Ord, Serialize, JsonSchema, Hash,
)]
@@ -879,6 +881,22 @@ impl From<AddressType> for OutputType {
}
}
impl From<&AddressBytes> for OutputType {
#[inline]
fn from(bytes: &AddressBytes) -> Self {
match bytes {
AddressBytes::P2PK65(_) => Self::P2PK65,
AddressBytes::P2PK33(_) => Self::P2PK33,
AddressBytes::P2PKH(_) => Self::P2PKH,
AddressBytes::P2SH(_) => Self::P2SH,
AddressBytes::P2WPKH(_) => Self::P2WPKH,
AddressBytes::P2WSH(_) => Self::P2WSH,
AddressBytes::P2TR(_) => Self::P2TR,
AddressBytes::P2A(_) => Self::P2A,
}
}
}
impl TryFrom<OutputType> for AddressType {
type Error = Error;
fn try_from(value: OutputType) -> Result<Self, Self::Error> {

View File

@@ -0,0 +1,20 @@
use schemars::JsonSchema;
use serde::Serialize;
use crate::FeeRate;
/// Recommended fee rates in sat/vB
#[derive(Debug, Default, Clone, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct RecommendedFees {
/// Fee rate for fastest confirmation (next block)
pub fastest_fee: FeeRate,
/// Fee rate for confirmation within ~30 minutes (3 blocks)
pub half_hour_fee: FeeRate,
/// Fee rate for confirmation within ~1 hour (6 blocks)
pub hour_fee: FeeRate,
/// Fee rate for economical confirmation
pub economy_fee: FeeRate,
/// Minimum relay fee rate
pub minimum_fee: FeeRate,
}

View File

@@ -0,0 +1,9 @@
use schemars::JsonSchema;
use serde::Deserialize;
#[derive(Deserialize, JsonSchema)]
pub struct StartHeightPath {
/// Starting block height (optional, defaults to latest)
#[schemars(example = 800000)]
pub start_height: Option<u32>,
}

View File

@@ -1,9 +1,9 @@
use crate::{RawLockTime, Sats, TxIn, TxIndex, TxOut, TxStatus, TxVersion, Txid, Weight};
use crate::{FeeRate, RawLockTime, Sats, TxIn, TxIndex, TxOut, TxStatus, TxVersion, Txid, VSize, Weight};
use schemars::JsonSchema;
use serde::Serialize;
use vecdb::CheckedSub;
#[derive(Debug, Serialize, JsonSchema)]
#[derive(Debug, Clone, Serialize, JsonSchema)]
/// Transaction information compatible with mempool.space API format
pub struct Transaction {
#[schemars(example = TxIndex::new(0))]
@@ -62,4 +62,16 @@ impl Transaction {
pub fn compute_fee(&mut self) {
self.fee = Self::fee(self).unwrap_or_default();
}
/// Virtual size in vbytes (weight / 4, rounded up)
#[inline]
pub fn vsize(&self) -> VSize {
VSize::from(self.weight)
}
/// Fee rate in sat/vB
#[inline]
pub fn fee_rate(&self) -> FeeRate {
FeeRate::from((self.fee, self.vsize()))
}
}

View File

@@ -1,9 +1,9 @@
use std::{fmt, mem};
use std::{fmt, mem, str::FromStr};
use bitcoin::hashes::Hash;
use derive_deref::Deref;
use schemars::JsonSchema;
use serde::{Serialize, Serializer};
use serde::{Deserialize, Deserializer, Serialize, Serializer, de};
use vecdb::{Bytes, Formattable};
/// Transaction ID (hash)
@@ -71,6 +71,17 @@ impl Serialize for Txid {
}
}
impl<'de> Deserialize<'de> for Txid {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
let bitcoin_txid = bitcoin::Txid::from_str(&s).map_err(de::Error::custom)?;
Ok(Self::from(bitcoin_txid))
}
}
impl Formattable for Txid {
#[inline(always)]
fn may_need_escaping() -> bool {

View File

@@ -3,7 +3,7 @@ use bitcoin::{Script, ScriptBuf};
use schemars::JsonSchema;
use serde::{Serialize, Serializer, ser::SerializeStruct};
#[derive(Debug, JsonSchema)]
#[derive(Debug, Clone, JsonSchema)]
/// Transaction input
pub struct TxIn {
/// Transaction ID of the output being spent

View File

@@ -12,6 +12,10 @@ use super::Vin;
pub struct TxInIndex(u64);
impl TxInIndex {
/// Sentinel value indicating an unspent output.
/// Used in `txoutindex_to_txinindex` mapping.
pub const UNSPENT: Self = Self(u64::MAX);
pub fn new(index: u64) -> Self {
Self(index)
}
@@ -19,6 +23,10 @@ impl TxInIndex {
pub fn incremented(self) -> Self {
Self(*self + 1)
}
pub fn is_unspent(self) -> bool {
self == Self::UNSPENT
}
}
impl Add<TxInIndex> for TxInIndex {

View File

@@ -3,7 +3,7 @@ use bitcoin::ScriptBuf;
use schemars::JsonSchema;
use serde::{Serialize, Serializer, ser::SerializeStruct};
#[derive(Debug, JsonSchema)]
#[derive(Debug, Clone, JsonSchema)]
/// Transaction output
pub struct TxOut {
/// Script pubkey (locking script)

View File

@@ -3,7 +3,7 @@ use serde::Serialize;
use crate::{BlockHash, Height, Timestamp};
#[derive(Debug, Serialize, JsonSchema)]
#[derive(Debug, Clone, Serialize, JsonSchema)]
/// Transaction confirmation status
pub struct TxStatus {
/// Whether the transaction is confirmed
@@ -24,7 +24,7 @@ pub struct TxStatus {
}
impl TxStatus {
pub const UNCOMFIRMED: Self = Self {
pub const UNCONFIRMED: Self = Self {
confirmed: false,
block_hash: None,
block_height: None,

View File

@@ -0,0 +1,26 @@
use crate::Transaction;
/// A transaction with its raw hex representation
#[derive(Debug, Clone)]
pub struct TxWithHex {
tx: Transaction,
hex: String,
}
impl TxWithHex {
pub fn new(tx: Transaction, hex: String) -> Self {
Self { tx, hex }
}
pub fn tx(&self) -> &Transaction {
&self.tx
}
pub fn hex(&self) -> &str {
&self.hex
}
pub fn into_parts(self) -> (Transaction, String) {
(self.tx, self.hex)
}
}

View File

@@ -0,0 +1,13 @@
use schemars::JsonSchema;
use serde::Serialize;
use crate::{Sats, TxStatus, Txid, Vout};
/// Unspent transaction output
#[derive(Debug, Clone, Serialize, JsonSchema)]
pub struct Utxo {
pub txid: Txid,
pub vout: Vout,
pub status: TxStatus,
pub value: Sats,
}

View File

@@ -0,0 +1,110 @@
use std::ops::{Add, AddAssign, Div, Sub, SubAssign};
use derive_deref::Deref;
use schemars::JsonSchema;
use serde::Serialize;
use vecdb::{Formattable, Pco};
use crate::Weight;
/// Virtual size in vbytes (weight / 4, rounded up)
#[derive(Debug, Default, Deref, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Pco, JsonSchema)]
pub struct VSize(u64);
impl VSize {
#[inline]
pub const fn new(value: u64) -> Self {
Self(value)
}
}
impl From<u64> for VSize {
#[inline]
fn from(value: u64) -> Self {
Self(value)
}
}
impl From<VSize> for u64 {
#[inline]
fn from(value: VSize) -> Self {
value.0
}
}
impl From<Weight> for VSize {
#[inline]
fn from(weight: Weight) -> Self {
Self(weight.to_vbytes_ceil())
}
}
impl From<usize> for VSize {
#[inline]
fn from(value: usize) -> Self {
Self(value as u64)
}
}
impl From<f64> for VSize {
#[inline]
fn from(value: f64) -> Self {
debug_assert!(value >= 0.0 && value.fract() == 0.0, "VSize must be a non-negative integer");
Self(value as u64)
}
}
impl From<VSize> for f64 {
#[inline]
fn from(value: VSize) -> Self {
value.0 as f64
}
}
impl Add for VSize {
type Output = Self;
fn add(self, rhs: Self) -> Self::Output {
Self(self.0 + rhs.0)
}
}
impl AddAssign for VSize {
fn add_assign(&mut self, rhs: Self) {
*self = *self + rhs
}
}
impl Sub for VSize {
type Output = Self;
fn sub(self, rhs: Self) -> Self::Output {
Self(self.0 - rhs.0)
}
}
impl SubAssign for VSize {
fn sub_assign(&mut self, rhs: Self) {
*self = *self - rhs
}
}
impl Div<usize> for VSize {
type Output = Self;
fn div(self, rhs: usize) -> Self::Output {
Self(self.0 / rhs as u64)
}
}
impl std::fmt::Display for VSize {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut buf = itoa::Buffer::new();
let str = buf.format(self.0);
f.write_str(str)
}
}
impl Formattable for VSize {
#[inline(always)]
fn may_need_escaping() -> bool {
false
}
}

View File

@@ -8,6 +8,12 @@ use vecdb::{Formattable, Pco};
#[derive(Debug, Deref, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Pco, JsonSchema)]
pub struct Weight(u64);
impl Weight {
pub fn to_vbytes_ceil(&self) -> u64 {
bitcoin::Weight::from(*self).to_vbytes_ceil()
}
}
impl From<bitcoin::Weight> for Weight {
#[inline]
fn from(value: bitcoin::Weight) -> Self {