server: snapshot

This commit is contained in:
nym21
2025-12-15 16:32:45 +01:00
parent 882a3525af
commit 825a4a77c0
100 changed files with 2677 additions and 3438 deletions

View File

@@ -1,22 +1,11 @@
use std::collections::BTreeMap;
use brk_computer::Computer;
use brk_error::Result;
use brk_indexer::Indexer;
use brk_mempool::Mempool;
use brk_reader::Reader;
use brk_types::{
Address, AddressStats, BlockInfo, BlockStatus, BlockTimestamp, DifficultyAdjustment,
HashrateSummary, Height, Index, IndexInfo, Limit, MempoolBlock, MempoolInfo, Metric,
MetricCount, PoolDetail, PoolInfo, PoolSlug, PoolsSummary, RecommendedFees, TimePeriod,
Timestamp, Transaction, TreeNode, TxOutspend, TxStatus, Txid, TxidPath, Utxo, Vout,
};
use tokio::task::spawn_blocking;
use crate::{
Output, PaginatedIndexParam, PaginatedMetrics, PaginationParam, Params, Query,
vecs::{IndexToVec, MetricToVec, Vecs},
};
use crate::Query;
#[derive(Clone)]
pub struct AsyncQuery(Query);
@@ -31,282 +20,37 @@ impl AsyncQuery {
Self(Query::build(reader, indexer, computer, mempool))
}
/// Run a blocking query operation on a spawn_blocking thread.
/// Use this for I/O-heavy or CPU-intensive operations.
///
/// # Example
/// ```ignore
/// let address_stats = query.run(move |q| q.address(address)).await?;
/// ```
pub async fn run<F, T>(&self, f: F) -> Result<T>
where
F: FnOnce(&Query) -> Result<T> + Send + 'static,
T: Send + 'static,
{
let query = self.0.clone();
spawn_blocking(move || f(&query)).await?
}
/// Run a cheap sync operation directly without spawn_blocking.
/// Use this for simple accessors that don't do I/O.
///
/// # Example
/// ```ignore
/// let height = query.sync(|q| q.height());
/// ```
pub fn sync<F, T>(&self, f: F) -> T
where
F: FnOnce(&Query) -> T,
{
f(&self.0)
}
pub fn inner(&self) -> &Query {
&self.0
}
pub async fn get_height(&self) -> Height {
self.0.get_height()
}
pub async fn get_address(&self, address: Address) -> Result<AddressStats> {
let query = self.0.clone();
spawn_blocking(move || query.get_address(address)).await?
}
pub async fn get_address_txids(
&self,
address: Address,
after_txid: Option<Txid>,
limit: usize,
) -> Result<Vec<Txid>> {
let query = self.0.clone();
spawn_blocking(move || query.get_address_txids(address, after_txid, limit)).await?
}
pub async fn get_address_utxos(&self, address: Address) -> Result<Vec<Utxo>> {
let query = self.0.clone();
spawn_blocking(move || query.get_address_utxos(address)).await?
}
pub async fn get_address_mempool_txids(&self, address: Address) -> Result<Vec<Txid>> {
let query = self.0.clone();
spawn_blocking(move || query.get_address_mempool_txids(address)).await?
}
pub async fn get_transaction(&self, txid: TxidPath) -> Result<Transaction> {
let query = self.0.clone();
spawn_blocking(move || query.get_transaction(txid)).await?
}
pub async fn get_transaction_status(&self, txid: TxidPath) -> Result<TxStatus> {
let query = self.0.clone();
spawn_blocking(move || query.get_transaction_status(txid)).await?
}
pub async fn get_transaction_hex(&self, txid: TxidPath) -> Result<String> {
let query = self.0.clone();
spawn_blocking(move || query.get_transaction_hex(txid)).await?
}
pub async fn get_tx_outspend(&self, txid: TxidPath, vout: Vout) -> Result<TxOutspend> {
let query = self.0.clone();
spawn_blocking(move || query.get_tx_outspend(txid, vout)).await?
}
pub async fn get_tx_outspends(&self, txid: TxidPath) -> Result<Vec<TxOutspend>> {
let query = self.0.clone();
spawn_blocking(move || query.get_tx_outspends(txid)).await?
}
pub async fn get_block(&self, hash: String) -> Result<BlockInfo> {
let query = self.0.clone();
spawn_blocking(move || query.get_block(&hash)).await?
}
pub async fn get_block_by_height(&self, height: Height) -> Result<BlockInfo> {
let query = self.0.clone();
spawn_blocking(move || query.get_block_by_height(height)).await?
}
pub async fn get_block_by_timestamp(&self, timestamp: Timestamp) -> Result<BlockTimestamp> {
self.0.get_block_by_timestamp(timestamp)
}
pub async fn get_block_status(&self, hash: String) -> Result<BlockStatus> {
let query = self.0.clone();
spawn_blocking(move || query.get_block_status(&hash)).await?
}
pub async fn get_blocks(&self, start_height: Option<Height>) -> Result<Vec<BlockInfo>> {
let query = self.0.clone();
spawn_blocking(move || query.get_blocks(start_height)).await?
}
pub async fn get_block_txids(&self, hash: String) -> Result<Vec<Txid>> {
let query = self.0.clone();
spawn_blocking(move || query.get_block_txids(&hash)).await?
}
pub async fn get_block_txs(
&self,
hash: String,
start_index: usize,
) -> Result<Vec<Transaction>> {
let query = self.0.clone();
spawn_blocking(move || query.get_block_txs(&hash, start_index)).await?
}
pub async fn get_block_txid_at_index(&self, hash: String, index: usize) -> Result<Txid> {
self.0.get_block_txid_at_index(&hash, index)
}
pub async fn get_block_raw(&self, hash: String) -> Result<Vec<u8>> {
let query = self.0.clone();
spawn_blocking(move || query.get_block_raw(&hash)).await?
}
pub async fn get_mempool_info(&self) -> Result<MempoolInfo> {
self.0.get_mempool_info()
}
pub async fn get_mempool_txids(&self) -> Result<Vec<Txid>> {
self.0.get_mempool_txids()
}
pub async fn get_recommended_fees(&self) -> Result<RecommendedFees> {
self.0.get_recommended_fees()
}
pub async fn get_mempool_blocks(&self) -> Result<Vec<MempoolBlock>> {
self.0.get_mempool_blocks()
}
pub async fn get_difficulty_adjustment(&self) -> Result<DifficultyAdjustment> {
let query = self.0.clone();
spawn_blocking(move || query.get_difficulty_adjustment()).await?
}
pub async fn get_mining_pools(&self, time_period: TimePeriod) -> Result<PoolsSummary> {
let query = self.0.clone();
spawn_blocking(move || query.get_mining_pools(time_period)).await?
}
pub async fn get_all_pools(&self) -> Result<Vec<PoolInfo>> {
Ok(self.0.get_all_pools())
}
pub async fn get_pool_detail(&self, slug: PoolSlug) -> Result<PoolDetail> {
let query = self.0.clone();
spawn_blocking(move || query.get_pool_detail(slug)).await?
}
pub async fn get_hashrate(&self, time_period: Option<TimePeriod>) -> Result<HashrateSummary> {
let query = self.0.clone();
spawn_blocking(move || query.get_hashrate(time_period)).await?
}
pub async fn get_difficulty_adjustments(
&self,
time_period: Option<TimePeriod>,
) -> Result<Vec<brk_types::DifficultyAdjustmentEntry>> {
let query = self.0.clone();
spawn_blocking(move || query.get_difficulty_adjustments(time_period)).await?
}
pub async fn get_block_fees(
&self,
time_period: TimePeriod,
) -> Result<Vec<brk_types::BlockFeesEntry>> {
let query = self.0.clone();
spawn_blocking(move || query.get_block_fees(time_period)).await?
}
pub async fn get_block_rewards(
&self,
time_period: TimePeriod,
) -> Result<Vec<brk_types::BlockRewardsEntry>> {
let query = self.0.clone();
spawn_blocking(move || query.get_block_rewards(time_period)).await?
}
pub async fn get_block_fee_rates(
&self,
time_period: TimePeriod,
) -> Result<Vec<brk_types::BlockFeeRatesEntry>> {
let query = self.0.clone();
spawn_blocking(move || query.get_block_fee_rates(time_period)).await?
}
pub async fn get_block_sizes_weights(
&self,
time_period: TimePeriod,
) -> Result<brk_types::BlockSizesWeights> {
let query = self.0.clone();
spawn_blocking(move || query.get_block_sizes_weights(time_period)).await?
}
pub async fn get_reward_stats(&self, block_count: usize) -> Result<brk_types::RewardStats> {
let query = self.0.clone();
spawn_blocking(move || query.get_reward_stats(block_count)).await?
}
pub async fn match_metric(&self, metric: Metric, limit: Limit) -> Result<Vec<&'static str>> {
let query = self.0.clone();
spawn_blocking(move || Ok(query.match_metric(&metric, limit))).await?
}
// pub async fn search_metric_with_index(
// &self,
// metric: &str,
// index: Index,
// // params: &Params,
// ) -> Result<Vec<(String, &&dyn AnyExportableVec)>> {
// let query = self.0.clone();
// spawn_blocking(move || query.search_metric_with_index(metric, index)).await?
// }
// pub async fn format(
// &self,
// metrics: Vec<(String, &&dyn AnyExportableVec)>,
// params: &ParamsOpt,
// ) -> Result<Output> {
// let query = self.0.clone();
// spawn_blocking(move || query.format(metrics, params)).await?
// }
pub async fn search_and_format(&self, params: Params) -> Result<Output> {
let query = self.0.clone();
spawn_blocking(move || query.search_and_format(params)).await?
}
pub async fn metric_to_index_to_vec(&self) -> &BTreeMap<&str, IndexToVec<'_>> {
self.0.metric_to_index_to_vec()
}
pub async fn index_to_metric_to_vec(&self) -> &BTreeMap<Index, MetricToVec<'_>> {
self.0.index_to_metric_to_vec()
}
pub async fn metric_count(&self) -> MetricCount {
self.0.metric_count()
}
pub async fn distinct_metric_count(&self) -> usize {
self.0.distinct_metric_count()
}
pub async fn total_metric_count(&self) -> usize {
self.0.total_metric_count()
}
pub async fn get_indexes(&self) -> &[IndexInfo] {
self.0.get_indexes()
}
pub async fn get_metrics(&self, pagination: PaginationParam) -> PaginatedMetrics {
self.0.get_metrics(pagination)
}
pub async fn get_metrics_catalog(&self) -> &TreeNode {
self.0.get_metrics_catalog()
}
pub async fn get_index_to_vecids(&self, paginated_index: PaginatedIndexParam) -> Vec<&str> {
self.0.get_index_to_vecids(paginated_index)
}
pub async fn metric_to_indexes(&self, metric: Metric) -> Option<&Vec<Index>> {
self.0.metric_to_indexes(metric)
}
#[inline]
pub async fn reader(&self) -> &Reader {
self.0.reader()
}
#[inline]
pub async fn indexer(&self) -> &Indexer {
self.0.indexer()
}
#[inline]
pub async fn computer(&self) -> &Computer {
self.0.computer()
}
#[inline]
pub async fn vecs(&self) -> &'static Vecs<'static> {
self.0.vecs()
}
}

View File

@@ -1,87 +0,0 @@
use std::str::FromStr;
use bitcoin::{Network, PublicKey, ScriptBuf};
use brk_error::{Error, Result};
use brk_types::{
Address, AddressBytes, AddressChainStats, AddressHash, AddressStats, AnyAddressDataIndexEnum,
OutputType,
};
use vecdb::TypedVecIterator;
use crate::Query;
pub fn get_address(Address { address }: Address, query: &Query) -> Result<AddressStats> {
let indexer = query.indexer();
let computer = query.computer();
let stores = &indexer.stores;
let script = if let Ok(address) = bitcoin::Address::from_str(&address) {
if !address.is_valid_for_network(Network::Bitcoin) {
return Err(Error::InvalidNetwork);
}
let address = address.assume_checked();
address.script_pubkey()
} else if let Ok(pubkey) = PublicKey::from_str(&address) {
ScriptBuf::new_p2pk(&pubkey)
} else {
return Err(Error::InvalidAddress);
};
let outputtype = OutputType::from(&script);
let Ok(bytes) = AddressBytes::try_from((&script, outputtype)) else {
return Err(Error::Str("Failed to convert the address to bytes"));
};
let addresstype = outputtype;
let hash = AddressHash::from(&bytes);
let Ok(Some(type_index)) = stores
.addresstype_to_addresshash_to_addressindex
.get(addresstype)
.unwrap()
.get(&hash)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownAddress);
};
let any_address_index = computer
.stateful
.any_address_indexes
.get_anyaddressindex_once(outputtype, type_index)?;
let address_data = match any_address_index.to_enum() {
AnyAddressDataIndexEnum::Loaded(index) => computer
.stateful
.addresses_data
.loaded
.iter()?
.get_unwrap(index),
AnyAddressDataIndexEnum::Empty(index) => computer
.stateful
.addresses_data
.empty
.iter()?
.get_unwrap(index)
.into(),
};
Ok(AddressStats {
address: address.into(),
chain_stats: AddressChainStats {
type_index,
funded_txo_count: address_data.funded_txo_count,
funded_txo_sum: address_data.received,
spent_txo_count: address_data.spent_txo_count,
spent_txo_sum: address_data.sent,
tx_count: address_data.tx_count,
},
mempool_stats: query.mempool().map(|mempool| {
mempool
.get_addresses()
.get(&bytes)
.map(|(stats, _)| stats)
.cloned()
.unwrap_or_default()
}),
})
}

View File

@@ -1,24 +0,0 @@
use std::str::FromStr;
use brk_error::{Error, Result};
use brk_types::{Address, AddressBytes, Txid};
use crate::Query;
/// Maximum number of mempool txids to return
const MAX_MEMPOOL_TXIDS: usize = 50;
/// Get mempool transaction IDs for an address
pub fn get_address_mempool_txids(address: Address, query: &Query) -> Result<Vec<Txid>> {
let mempool = query.mempool().ok_or(Error::Str("Mempool not available"))?;
let bytes = AddressBytes::from_str(&address.address)?;
let addresses = mempool.get_addresses();
let txids: Vec<Txid> = addresses
.get(&bytes)
.map(|(_, txids)| txids.iter().take(MAX_MEMPOOL_TXIDS).cloned().collect())
.unwrap_or_default();
Ok(txids)
}

View File

@@ -1,12 +0,0 @@
mod addr;
mod mempool_txids;
mod resolve;
mod txids;
mod utxos;
mod validate;
pub use addr::*;
pub use mempool_txids::*;
pub use txids::*;
pub use utxos::*;
pub use validate::*;

View File

@@ -1,27 +0,0 @@
use std::str::FromStr;
use brk_error::{Error, Result};
use brk_types::{Address, AddressBytes, AddressHash, OutputType, TypeIndex};
use crate::Query;
/// Resolve an address string to its output type and type_index
pub fn resolve_address(address: &Address, query: &Query) -> Result<(OutputType, TypeIndex)> {
let stores = &query.indexer().stores;
let bytes = AddressBytes::from_str(&address.address)?;
let outputtype = OutputType::from(&bytes);
let hash = AddressHash::from(&bytes);
let Ok(Some(type_index)) = stores
.addresstype_to_addresshash_to_addressindex
.get(outputtype)
.unwrap()
.get(&hash)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownAddress);
};
Ok((outputtype, type_index))
}

View File

@@ -1,60 +0,0 @@
use brk_error::{Error, Result};
use brk_types::{Address, AddressIndexTxIndex, TxIndex, Txid, Unit};
use vecdb::TypedVecIterator;
use super::resolve::resolve_address;
use crate::Query;
/// Get transaction IDs for an address, newest first
pub fn get_address_txids(
address: Address,
after_txid: Option<Txid>,
limit: usize,
query: &Query,
) -> Result<Vec<Txid>> {
let indexer = query.indexer();
let stores = &indexer.stores;
let (outputtype, type_index) = resolve_address(&address, query)?;
let store = stores
.addresstype_to_addressindex_and_txindex
.get(outputtype)
.unwrap();
let prefix = u32::from(type_index).to_be_bytes();
let after_txindex = if let Some(after_txid) = after_txid {
let txindex = stores
.txidprefix_to_txindex
.get(&after_txid.into())
.map_err(|_| Error::Str("Failed to look up after_txid"))?
.ok_or(Error::Str("after_txid not found"))?
.into_owned();
Some(txindex)
} else {
None
};
let txindices: Vec<TxIndex> = store
.prefix(prefix)
.rev()
.filter(|(key, _): &(AddressIndexTxIndex, Unit)| {
if let Some(after) = after_txindex {
key.txindex() < after
} else {
true
}
})
.take(limit)
.map(|(key, _)| key.txindex())
.collect();
let mut txindex_to_txid_iter = indexer.vecs.tx.txindex_to_txid.iter()?;
let txids: Vec<Txid> = txindices
.into_iter()
.map(|txindex| txindex_to_txid_iter.get_unwrap(txindex))
.collect();
Ok(txids)
}

View File

@@ -1,65 +0,0 @@
use brk_error::Result;
use brk_types::{
Address, AddressIndexOutPoint, Sats, TxIndex, TxStatus, Txid, Unit, Utxo, Vout,
};
use vecdb::TypedVecIterator;
use super::resolve::resolve_address;
use crate::Query;
/// Get UTXOs for an address
pub fn get_address_utxos(address: Address, query: &Query) -> Result<Vec<Utxo>> {
let indexer = query.indexer();
let stores = &indexer.stores;
let vecs = &indexer.vecs;
let (outputtype, type_index) = resolve_address(&address, query)?;
let store = stores
.addresstype_to_addressindex_and_unspentoutpoint
.get(outputtype)
.unwrap();
let prefix = u32::from(type_index).to_be_bytes();
// Collect outpoints (txindex, vout)
let outpoints: Vec<(TxIndex, Vout)> = store
.prefix(prefix)
.map(|(key, _): (AddressIndexOutPoint, Unit)| (key.txindex(), key.vout()))
.collect();
// Create iterators for looking up tx data
let mut txindex_to_txid_iter = vecs.tx.txindex_to_txid.iter()?;
let mut txindex_to_height_iter = vecs.tx.txindex_to_height.iter()?;
let mut txindex_to_first_txoutindex_iter = vecs.tx.txindex_to_first_txoutindex.iter()?;
let mut txoutindex_to_value_iter = vecs.txout.txoutindex_to_value.iter()?;
let mut height_to_blockhash_iter = vecs.block.height_to_blockhash.iter()?;
let mut height_to_timestamp_iter = vecs.block.height_to_timestamp.iter()?;
let utxos: Vec<Utxo> = outpoints
.into_iter()
.map(|(txindex, vout)| {
let txid: Txid = txindex_to_txid_iter.get_unwrap(txindex);
let height = txindex_to_height_iter.get_unwrap(txindex);
let first_txoutindex = txindex_to_first_txoutindex_iter.get_unwrap(txindex);
let txoutindex = first_txoutindex + vout;
let value: Sats = txoutindex_to_value_iter.get_unwrap(txoutindex);
let block_hash = height_to_blockhash_iter.get_unwrap(height);
let block_time = height_to_timestamp_iter.get_unwrap(height);
Utxo {
txid,
vout,
status: TxStatus {
confirmed: true,
block_height: Some(height),
block_hash: Some(block_hash),
block_time: Some(block_time),
},
value,
}
})
.collect();
Ok(utxos)
}

View File

@@ -1,41 +0,0 @@
use bitcoin::hex::DisplayHex;
use brk_types::{AddressBytes, AddressValidation, OutputType};
/// Validate a Bitcoin address and return details
pub fn validate_address(address: &str) -> AddressValidation {
let Ok(script) = AddressBytes::address_to_script(address) else {
return AddressValidation::invalid();
};
let output_type = OutputType::from(&script);
let script_hex = script.as_bytes().to_lower_hex_string();
let is_script = matches!(output_type, OutputType::P2SH);
let is_witness = matches!(
output_type,
OutputType::P2WPKH | OutputType::P2WSH | OutputType::P2TR | OutputType::P2A
);
let (witness_version, witness_program) = if is_witness {
let version = script.witness_version().map(|v| v.to_num());
// Witness program is after the version byte and push opcode
let program = if script.len() > 2 {
Some(script.as_bytes()[2..].to_lower_hex_string())
} else {
None
};
(version, program)
} else {
(None, None)
};
AddressValidation {
isvalid: true,
address: Some(address.to_string()),
script_pub_key: Some(script_hex),
isscript: Some(is_script),
iswitness: Some(is_witness),
witness_version,
witness_program,
}
}

View File

@@ -1,80 +0,0 @@
use brk_error::{Error, Result};
use brk_types::{BlockTimestamp, Date, DateIndex, Height, Timestamp};
use jiff::Timestamp as JiffTimestamp;
use vecdb::{GenericStoredVec, TypedVecIterator};
use crate::Query;
/// Get the block closest to a given timestamp using dateindex for fast lookup
pub fn get_block_by_timestamp(timestamp: Timestamp, query: &Query) -> Result<BlockTimestamp> {
let indexer = query.indexer();
let computer = query.computer();
let max_height = query.get_height();
let max_height_usize: usize = max_height.into();
if max_height_usize == 0 {
return Err(Error::Str("No blocks indexed"));
}
let target = timestamp;
let date = Date::from(target);
let dateindex = DateIndex::try_from(date).unwrap_or_default();
// Get first height of the target date
let first_height_of_day = computer
.indexes
.dateindex_to_first_height
.read_once(dateindex)
.unwrap_or(Height::from(0usize));
let start: usize = usize::from(first_height_of_day).min(max_height_usize);
// Use iterator for efficient sequential access
let mut timestamp_iter = indexer.vecs.block.height_to_timestamp.iter()?;
// Search forward from start to find the last block <= target timestamp
let mut best_height = start;
let mut best_ts = timestamp_iter.get_unwrap(Height::from(start));
for h in (start + 1)..=max_height_usize {
let height = Height::from(h);
let block_ts = timestamp_iter.get_unwrap(height);
if block_ts <= target {
best_height = h;
best_ts = block_ts;
} else {
break;
}
}
// Check one block before start in case we need to go backward
if start > 0 && best_ts > target {
let prev_height = Height::from(start - 1);
let prev_ts = timestamp_iter.get_unwrap(prev_height);
if prev_ts <= target {
best_height = start - 1;
best_ts = prev_ts;
}
}
let height = Height::from(best_height);
let blockhash = indexer
.vecs
.block
.height_to_blockhash
.iter()?
.get_unwrap(height);
// Convert timestamp to ISO 8601 format
let ts_secs: i64 = (*best_ts).into();
let iso_timestamp = JiffTimestamp::from_second(ts_secs)
.map(|t| t.to_string())
.unwrap_or_else(|_| best_ts.to_string());
Ok(BlockTimestamp {
height,
hash: blockhash,
timestamp: iso_timestamp,
})
}

View File

@@ -1,19 +0,0 @@
use brk_error::{Error, Result};
use brk_types::{BlockHash, BlockHashPrefix, Height};
use crate::Query;
/// Resolve a block hash to height
pub fn get_height_by_hash(hash: &str, query: &Query) -> Result<Height> {
let indexer = query.indexer();
let blockhash: BlockHash = hash.parse().map_err(|_| Error::Str("Invalid block hash"))?;
let prefix = BlockHashPrefix::from(&blockhash);
indexer
.stores
.blockhashprefix_to_height
.get(&prefix)?
.map(|h| *h)
.ok_or(Error::Str("Block not found"))
}

View File

@@ -1,62 +0,0 @@
use brk_error::{Error, Result};
use brk_types::{BlockInfo, Height, TxIndex};
use vecdb::{AnyVec, GenericStoredVec, VecIndex};
use crate::Query;
/// Get block info by height
pub fn get_block_by_height(height: Height, query: &Query) -> Result<BlockInfo> {
let indexer = query.indexer();
let max_height = max_height(query);
if height > max_height {
return Err(Error::Str("Block height out of range"));
}
let blockhash = indexer.vecs.block.height_to_blockhash.read_once(height)?;
let difficulty = indexer.vecs.block.height_to_difficulty.read_once(height)?;
let timestamp = indexer.vecs.block.height_to_timestamp.read_once(height)?;
let size = indexer.vecs.block.height_to_total_size.read_once(height)?;
let weight = indexer.vecs.block.height_to_weight.read_once(height)?;
let tx_count = tx_count_at_height(height, max_height, query)?;
Ok(BlockInfo {
id: blockhash,
height,
tx_count,
size: *size,
weight,
timestamp,
difficulty: *difficulty,
})
}
fn max_height(query: &Query) -> Height {
Height::from(
query
.indexer()
.vecs
.block
.height_to_blockhash
.len()
.saturating_sub(1),
)
}
fn tx_count_at_height(height: Height, max_height: Height, query: &Query) -> Result<u32> {
let indexer = query.indexer();
let computer = query.computer();
let first_txindex = indexer.vecs.tx.height_to_first_txindex.read_once(height)?;
let next_first_txindex = if height < max_height {
indexer
.vecs
.tx
.height_to_first_txindex
.read_once(height.incremented())?
} else {
TxIndex::from(computer.indexes.txindex_to_txindex.len())
};
Ok((next_first_txindex.to_usize() - first_txindex.to_usize()) as u32)
}

View File

@@ -1,27 +0,0 @@
use brk_error::Result;
use brk_types::{BlockInfo, Height};
use crate::Query;
use super::info::get_block_by_height;
const DEFAULT_BLOCK_COUNT: u32 = 10;
/// Get a list of blocks, optionally starting from a specific height
pub fn get_blocks(start_height: Option<Height>, query: &Query) -> Result<Vec<BlockInfo>> {
let max_height = query.get_height();
let start = start_height.unwrap_or(max_height);
let start = start.min(max_height);
let start_u32: u32 = start.into();
let count = DEFAULT_BLOCK_COUNT.min(start_u32 + 1);
let mut blocks = Vec::with_capacity(count as usize);
for i in 0..count {
let height = Height::from(start_u32 - i);
blocks.push(get_block_by_height(height, query)?);
}
Ok(blocks)
}

View File

@@ -1,19 +0,0 @@
mod by_timestamp;
mod height_by_hash;
mod info;
mod list;
mod raw;
mod status;
mod txid_at_index;
mod txids;
mod txs;
pub use by_timestamp::*;
pub use height_by_hash::*;
pub use info::*;
pub use list::*;
pub use raw::*;
pub use status::*;
pub use txid_at_index::*;
pub use txids::*;
pub use txs::*;

View File

@@ -1,29 +0,0 @@
use brk_error::{Error, Result};
use brk_types::Height;
use vecdb::{AnyVec, GenericStoredVec};
use crate::Query;
/// Get raw block bytes by height
pub fn get_block_raw(height: Height, query: &Query) -> Result<Vec<u8>> {
let indexer = query.indexer();
let computer = query.computer();
let reader = query.reader();
let max_height = Height::from(
indexer
.vecs
.block
.height_to_blockhash
.len()
.saturating_sub(1),
);
if height > max_height {
return Err(Error::Str("Block height out of range"));
}
let position = computer.blks.height_to_position.read_once(height)?;
let size = indexer.vecs.block.height_to_total_size.read_once(height)?;
reader.read_raw_bytes(position, *size as usize)
}

View File

@@ -1,37 +0,0 @@
use brk_error::Result;
use brk_types::{BlockStatus, Height};
use vecdb::{AnyVec, GenericStoredVec};
use crate::Query;
/// Get block status by height
pub fn get_block_status_by_height(height: Height, query: &Query) -> Result<BlockStatus> {
let indexer = query.indexer();
let max_height = Height::from(
indexer
.vecs
.block
.height_to_blockhash
.len()
.saturating_sub(1),
);
if height > max_height {
return Ok(BlockStatus::not_in_best_chain());
}
let next_best = if height < max_height {
Some(
indexer
.vecs
.block
.height_to_blockhash
.read_once(height.incremented())?,
)
} else {
None
};
Ok(BlockStatus::in_best_chain(height, next_best))
}

View File

@@ -1,36 +0,0 @@
use brk_error::{Error, Result};
use brk_types::{Height, TxIndex, Txid};
use vecdb::{AnyVec, GenericStoredVec, TypedVecIterator};
use crate::Query;
/// Get a single txid at a specific index within a block
pub fn get_block_txid_at_index(height: Height, index: usize, query: &Query) -> Result<Txid> {
let indexer = query.indexer();
let max_height = query.get_height();
if height > max_height {
return Err(Error::Str("Block height out of range"));
}
let first_txindex = indexer.vecs.tx.height_to_first_txindex.read_once(height)?;
let next_first_txindex = indexer
.vecs
.tx
.height_to_first_txindex
.read_once(height.incremented())
.unwrap_or_else(|_| TxIndex::from(indexer.vecs.tx.txindex_to_txid.len()));
let first: usize = first_txindex.into();
let next: usize = next_first_txindex.into();
let tx_count = next - first;
if index >= tx_count {
return Err(Error::Str("Transaction index out of range"));
}
let txindex = TxIndex::from(first + index);
let txid = indexer.vecs.tx.txindex_to_txid.iter()?.get_unwrap(txindex);
Ok(txid)
}

View File

@@ -1,38 +0,0 @@
use brk_error::{Error, Result};
use brk_types::{Height, TxIndex, Txid};
use vecdb::{AnyVec, GenericStoredVec};
use crate::Query;
/// Get all txids in a block by height
pub fn get_block_txids(height: Height, query: &Query) -> Result<Vec<Txid>> {
let indexer = query.indexer();
let max_height = query.get_height();
if height > max_height {
return Err(Error::Str("Block height out of range"));
}
let first_txindex = indexer.vecs.tx.height_to_first_txindex.read_once(height)?;
let next_first_txindex = indexer
.vecs
.tx
.height_to_first_txindex
.read_once(height.incremented())
.unwrap_or_else(|_| TxIndex::from(indexer.vecs.tx.txindex_to_txid.len()));
let first: usize = first_txindex.into();
let next: usize = next_first_txindex.into();
let count = next - first;
let txids: Vec<Txid> = indexer
.vecs
.tx
.txindex_to_txid
.iter()?
.skip(first)
.take(count)
.collect();
Ok(txids)
}

View File

@@ -1,45 +0,0 @@
use brk_error::{Error, Result};
use brk_types::{Height, Transaction, TxIndex};
use vecdb::{AnyVec, GenericStoredVec};
use crate::{Query, chain::tx::get_transaction_by_index};
pub const BLOCK_TXS_PAGE_SIZE: usize = 25;
/// Get paginated transactions in a block by height
pub fn get_block_txs(height: Height, start_index: usize, query: &Query) -> Result<Vec<Transaction>> {
let indexer = query.indexer();
let max_height = query.get_height();
if height > max_height {
return Err(Error::Str("Block height out of range"));
}
let first_txindex = indexer.vecs.tx.height_to_first_txindex.read_once(height)?;
let next_first_txindex = indexer
.vecs
.tx
.height_to_first_txindex
.read_once(height.incremented())
.unwrap_or_else(|_| TxIndex::from(indexer.vecs.tx.txindex_to_txid.len()));
let first: usize = first_txindex.into();
let next: usize = next_first_txindex.into();
let tx_count = next - first;
if start_index >= tx_count {
return Ok(Vec::new());
}
let end_index = (start_index + BLOCK_TXS_PAGE_SIZE).min(tx_count);
let count = end_index - start_index;
let mut txs = Vec::with_capacity(count);
for i in start_index..end_index {
let txindex = TxIndex::from(first + i);
let tx = get_transaction_by_index(txindex, query)?;
txs.push(tx);
}
Ok(txs)
}

View File

@@ -1,20 +0,0 @@
use brk_error::{Error, Result};
use brk_types::MempoolBlock;
use crate::Query;
/// Get projected mempool blocks for fee estimation
pub fn get_mempool_blocks(query: &Query) -> Result<Vec<MempoolBlock>> {
let mempool = query.mempool().ok_or(Error::Str("Mempool not available"))?;
let block_stats = mempool.get_block_stats();
let blocks = block_stats
.into_iter()
.map(|stats| {
MempoolBlock::new(stats.tx_count, stats.total_vsize, stats.total_fee, stats.fee_range)
})
.collect();
Ok(blocks)
}

View File

@@ -1,11 +0,0 @@
use brk_error::{Error, Result};
use brk_types::RecommendedFees;
use crate::Query;
pub fn get_recommended_fees(query: &Query) -> Result<RecommendedFees> {
query
.mempool()
.map(|mempool| mempool.get_fees())
.ok_or(Error::MempoolNotAvailable)
}

View File

@@ -1,10 +0,0 @@
use brk_error::{Error, Result};
use brk_types::MempoolInfo;
use crate::Query;
/// Get mempool statistics
pub fn get_mempool_info(query: &Query) -> Result<MempoolInfo> {
let mempool = query.mempool().ok_or(Error::Str("Mempool not available"))?;
Ok(mempool.get_info())
}

View File

@@ -1,9 +0,0 @@
mod blocks;
mod fees;
mod info;
mod txids;
pub use blocks::*;
pub use fees::*;
pub use info::*;
pub use txids::*;

View File

@@ -1,11 +0,0 @@
use brk_error::{Error, Result};
use brk_types::Txid;
use crate::Query;
/// Get all mempool transaction IDs
pub fn get_mempool_txids(query: &Query) -> Result<Vec<Txid>> {
let mempool = query.mempool().ok_or(Error::Str("Mempool not available"))?;
let txs = mempool.get_txs();
Ok(txs.keys().cloned().collect())
}

View File

@@ -1,44 +0,0 @@
use brk_error::Result;
use brk_types::{BlockFeeRatesEntry, FeeRatePercentiles, TimePeriod};
use vecdb::{IterableVec, VecIndex};
use super::dateindex_iter::DateIndexIter;
use crate::Query;
pub fn get_block_fee_rates(
time_period: TimePeriod,
query: &Query,
) -> Result<Vec<BlockFeeRatesEntry>> {
let computer = query.computer();
let current_height = query.get_height();
let start = current_height
.to_usize()
.saturating_sub(time_period.block_count());
let iter = DateIndexIter::new(computer, start, current_height.to_usize());
let vecs = &computer.chain.indexes_to_fee_rate.dateindex;
let mut min = vecs.unwrap_min().iter();
let mut pct10 = vecs.unwrap_pct10().iter();
let mut pct25 = vecs.unwrap_pct25().iter();
let mut median = vecs.unwrap_median().iter();
let mut pct75 = vecs.unwrap_pct75().iter();
let mut pct90 = vecs.unwrap_pct90().iter();
let mut max = vecs.unwrap_max().iter();
Ok(iter.collect(|di, ts, h| {
Some(BlockFeeRatesEntry {
avg_height: h.into(),
timestamp: *ts as u32,
percentiles: FeeRatePercentiles::new(
min.get(di).unwrap_or_default(),
pct10.get(di).unwrap_or_default(),
pct25.get(di).unwrap_or_default(),
median.get(di).unwrap_or_default(),
pct75.get(di).unwrap_or_default(),
pct90.get(di).unwrap_or_default(),
max.get(di).unwrap_or_default(),
),
})
}))
}

View File

@@ -1,32 +0,0 @@
use brk_error::Result;
use brk_types::{BlockFeesEntry, TimePeriod};
use vecdb::{IterableVec, VecIndex};
use super::dateindex_iter::DateIndexIter;
use crate::Query;
pub fn get_block_fees(time_period: TimePeriod, query: &Query) -> Result<Vec<BlockFeesEntry>> {
let computer = query.computer();
let current_height = query.get_height();
let start = current_height
.to_usize()
.saturating_sub(time_period.block_count());
let iter = DateIndexIter::new(computer, start, current_height.to_usize());
let mut fees = computer
.chain
.indexes_to_fee
.sats
.dateindex
.unwrap_average()
.iter();
Ok(iter.collect(|di, ts, h| {
fees.get(di).map(|fee| BlockFeesEntry {
avg_height: h.into(),
timestamp: *ts as u32,
avg_fees: u64::from(*fee),
})
}))
}

View File

@@ -1,33 +0,0 @@
use brk_error::Result;
use brk_types::{BlockRewardsEntry, TimePeriod};
use vecdb::{IterableVec, VecIndex};
use super::dateindex_iter::DateIndexIter;
use crate::Query;
pub fn get_block_rewards(time_period: TimePeriod, query: &Query) -> Result<Vec<BlockRewardsEntry>> {
let computer = query.computer();
let current_height = query.get_height();
let start = current_height
.to_usize()
.saturating_sub(time_period.block_count());
let iter = DateIndexIter::new(computer, start, current_height.to_usize());
// coinbase = subsidy + fees
let mut rewards = computer
.chain
.indexes_to_coinbase
.sats
.dateindex
.unwrap_average()
.iter();
Ok(iter.collect(|di, ts, h| {
rewards.get(di).map(|reward| BlockRewardsEntry {
avg_height: h.into(),
timestamp: *ts as u32,
avg_rewards: u64::from(*reward),
})
}))
}

View File

@@ -1,62 +0,0 @@
use brk_error::Result;
use brk_types::{BlockSizeEntry, BlockSizesWeights, BlockWeightEntry, TimePeriod};
use vecdb::{IterableVec, VecIndex};
use super::dateindex_iter::DateIndexIter;
use crate::Query;
pub fn get_block_sizes_weights(
time_period: TimePeriod,
query: &Query,
) -> Result<BlockSizesWeights> {
let computer = query.computer();
let current_height = query.get_height();
let start = current_height
.to_usize()
.saturating_sub(time_period.block_count());
let iter = DateIndexIter::new(computer, start, current_height.to_usize());
let mut sizes_vec = computer
.chain
.indexes_to_block_size
.dateindex
.unwrap_average()
.iter();
let mut weights_vec = computer
.chain
.indexes_to_block_weight
.dateindex
.unwrap_average()
.iter();
let entries: Vec<_> = iter.collect(|di, ts, h| {
let size = sizes_vec.get(di).map(|s| u64::from(*s));
let weight = weights_vec.get(di).map(|w| u64::from(*w));
Some((h.into(), *ts as u32, size, weight))
});
let sizes = entries
.iter()
.filter_map(|(h, ts, size, _)| {
size.map(|s| BlockSizeEntry {
avg_height: *h,
timestamp: *ts,
avg_size: s,
})
})
.collect();
let weights = entries
.iter()
.filter_map(|(h, ts, _, weight)| {
weight.map(|w| BlockWeightEntry {
avg_height: *h,
timestamp: *ts,
avg_weight: w,
})
})
.collect();
Ok(BlockSizesWeights { sizes, weights })
}

View File

@@ -1,120 +0,0 @@
use std::time::{SystemTime, UNIX_EPOCH};
use brk_error::Result;
use brk_types::{DifficultyAdjustment, DifficultyEpoch, Height};
use vecdb::GenericStoredVec;
use crate::Query;
/// Blocks per difficulty epoch (2 weeks target)
const BLOCKS_PER_EPOCH: u32 = 2016;
/// Target block time in seconds (10 minutes)
const TARGET_BLOCK_TIME: u64 = 600;
/// Get difficulty adjustment information
pub fn get_difficulty_adjustment(query: &Query) -> Result<DifficultyAdjustment> {
let indexer = query.indexer();
let computer = query.computer();
let current_height = query.get_height();
let current_height_u32: u32 = current_height.into();
// Get current epoch
let current_epoch = computer
.indexes
.height_to_difficultyepoch
.read_once(current_height)?;
let current_epoch_usize: usize = current_epoch.into();
// Get epoch start height
let epoch_start_height = computer
.indexes
.difficultyepoch_to_first_height
.read_once(current_epoch)?;
let epoch_start_u32: u32 = epoch_start_height.into();
// Calculate epoch progress
let next_retarget_height = epoch_start_u32 + BLOCKS_PER_EPOCH;
let blocks_into_epoch = current_height_u32 - epoch_start_u32;
let remaining_blocks = next_retarget_height - current_height_u32;
let progress_percent = (blocks_into_epoch as f64 / BLOCKS_PER_EPOCH as f64) * 100.0;
// Get timestamps using difficultyepoch_to_timestamp for epoch start
let epoch_start_timestamp = computer
.chain
.difficultyepoch_to_timestamp
.read_once(current_epoch)?;
let current_timestamp = indexer
.vecs
.block
.height_to_timestamp
.read_once(current_height)?;
// Calculate average block time in current epoch
let elapsed_time = (*current_timestamp - *epoch_start_timestamp) as u64;
let time_avg = if blocks_into_epoch > 0 {
elapsed_time / blocks_into_epoch as u64
} else {
TARGET_BLOCK_TIME
};
// Estimate remaining time and retarget date
let remaining_time = remaining_blocks as u64 * time_avg;
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|d| d.as_secs())
.unwrap_or(*current_timestamp as u64);
let estimated_retarget_date = now + remaining_time;
// Calculate expected vs actual time for difficulty change estimate
let expected_time = blocks_into_epoch as u64 * TARGET_BLOCK_TIME;
let difficulty_change = if elapsed_time > 0 && blocks_into_epoch > 0 {
((expected_time as f64 / elapsed_time as f64) - 1.0) * 100.0
} else {
0.0
};
// Time offset from expected schedule
let time_offset = expected_time as i64 - elapsed_time as i64;
// Calculate previous retarget using stored difficulty values
let previous_retarget = if current_epoch_usize > 0 {
let prev_epoch = DifficultyEpoch::from(current_epoch_usize - 1);
let prev_epoch_start = computer
.indexes
.difficultyepoch_to_first_height
.read_once(prev_epoch)?;
let prev_difficulty = indexer
.vecs
.block
.height_to_difficulty
.read_once(prev_epoch_start)?;
let curr_difficulty = indexer
.vecs
.block
.height_to_difficulty
.read_once(epoch_start_height)?;
if *prev_difficulty > 0.0 {
((*curr_difficulty / *prev_difficulty) - 1.0) * 100.0
} else {
0.0
}
} else {
0.0
};
Ok(DifficultyAdjustment {
progress_percent,
difficulty_change,
estimated_retarget_date,
remaining_blocks,
remaining_time,
previous_retarget,
next_retarget_height: Height::from(next_retarget_height),
time_avg,
adjusted_time_avg: time_avg,
time_offset,
})
}

View File

@@ -1,26 +0,0 @@
use brk_error::Result;
use brk_types::{DifficultyAdjustmentEntry, TimePeriod};
use vecdb::VecIndex;
use crate::Query;
use super::epochs::iter_difficulty_epochs;
/// Get historical difficulty adjustments.
pub fn get_difficulty_adjustments(
time_period: Option<TimePeriod>,
query: &Query,
) -> Result<Vec<DifficultyAdjustmentEntry>> {
let current_height = query.get_height();
let end = current_height.to_usize();
let start = match time_period {
Some(tp) => end.saturating_sub(tp.block_count()),
None => 0,
};
let mut entries = iter_difficulty_epochs(query.computer(), start, end);
// Return in reverse chronological order (newest first)
entries.reverse();
Ok(entries)
}

View File

@@ -1,99 +0,0 @@
use brk_error::Result;
use brk_types::{DateIndex, DifficultyEntry, HashrateEntry, HashrateSummary, Height, TimePeriod};
use vecdb::{GenericStoredVec, IterableVec, VecIndex};
use super::epochs::iter_difficulty_epochs;
use crate::Query;
/// Get hashrate and difficulty data for a time period.
pub fn get_hashrate(time_period: Option<TimePeriod>, query: &Query) -> Result<HashrateSummary> {
let indexer = query.indexer();
let computer = query.computer();
let current_height = query.get_height();
// Get current difficulty
let current_difficulty = *indexer
.vecs
.block
.height_to_difficulty
.read_once(current_height)?;
// Get current hashrate
let current_dateindex = computer
.indexes
.height_to_dateindex
.read_once(current_height)?;
let current_hashrate = *computer
.chain
.indexes_to_hash_rate
.dateindex
.unwrap_last()
.read_once(current_dateindex)? as u128;
// Calculate start height based on time period
let end = current_height.to_usize();
let start = match time_period {
Some(tp) => end.saturating_sub(tp.block_count()),
None => 0,
};
// Get hashrate entries using iterators for efficiency
let start_dateindex = computer
.indexes
.height_to_dateindex
.read_once(Height::from(start))?;
let end_dateindex = current_dateindex;
// Sample at regular intervals to avoid too many data points
let total_days = end_dateindex
.to_usize()
.saturating_sub(start_dateindex.to_usize())
+ 1;
let step = (total_days / 200).max(1); // Max ~200 data points
// Create iterators for the loop
let mut hashrate_iter = computer
.chain
.indexes_to_hash_rate
.dateindex
.unwrap_last()
.iter();
let mut timestamp_iter = computer
.chain
.timeindexes_to_timestamp
.dateindex_extra
.unwrap_first()
.iter();
let mut hashrates = Vec::with_capacity(total_days / step + 1);
let mut di = start_dateindex.to_usize();
while di <= end_dateindex.to_usize() {
let dateindex = DateIndex::from(di);
if let (Some(hr), Some(timestamp)) =
(hashrate_iter.get(dateindex), timestamp_iter.get(dateindex))
{
hashrates.push(HashrateEntry {
timestamp,
avg_hashrate: (*hr) as u128,
});
}
di += step;
}
// Get difficulty adjustments within the period
let difficulty: Vec<DifficultyEntry> = iter_difficulty_epochs(computer, start, end)
.into_iter()
.map(|e| DifficultyEntry {
timestamp: e.timestamp,
difficulty: e.difficulty,
height: e.height,
})
.collect();
Ok(HashrateSummary {
hashrates,
difficulty,
current_hashrate,
current_difficulty,
})
}

View File

@@ -1,21 +0,0 @@
mod block_fee_rates;
mod block_fees;
mod block_rewards;
mod block_sizes_weights;
mod dateindex_iter;
mod difficulty;
mod difficulty_adjustments;
mod epochs;
mod hashrate;
mod pools;
mod reward_stats;
pub use block_fee_rates::*;
pub use block_fees::*;
pub use block_rewards::*;
pub use block_sizes_weights::*;
pub use difficulty::*;
pub use difficulty_adjustments::*;
pub use hashrate::*;
pub use pools::*;
pub use reward_stats::*;

View File

@@ -1,172 +0,0 @@
use brk_error::{Error, Result};
use brk_types::{
Height, PoolBlockCounts, PoolBlockShares, PoolDetail, PoolDetailInfo, PoolInfo, PoolSlug,
PoolStats, PoolsSummary, TimePeriod, pools,
};
use vecdb::{AnyVec, GenericStoredVec, IterableVec, VecIndex};
use crate::Query;
/// Get mining pool statistics for a time period using pre-computed cumulative counts.
pub fn get_mining_pools(time_period: TimePeriod, query: &Query) -> Result<PoolsSummary> {
let computer = query.computer();
let current_height = query.get_height();
let end = current_height.to_usize();
// No blocks indexed yet
if computer.pools.height_to_pool.len() == 0 {
return Ok(PoolsSummary {
pools: vec![],
block_count: 0,
last_estimated_hashrate: 0,
});
}
// Calculate start height based on time period
let start = end.saturating_sub(time_period.block_count());
let pools = pools();
let mut pool_data: Vec<(&'static brk_types::Pool, u32)> = Vec::new();
// For each pool, get cumulative count at end and start, subtract to get range count
for (pool_id, pool_vecs) in &computer.pools.vecs {
let mut cumulative = pool_vecs
.indexes_to_blocks_mined
.height_extra
.unwrap_cumulative()
.iter();
let count_at_end: u32 = *cumulative.get(current_height).unwrap_or_default();
let count_at_start: u32 = if start == 0 {
0
} else {
*cumulative.get(Height::from(start - 1)).unwrap_or_default()
};
let block_count = count_at_end.saturating_sub(count_at_start);
// Only include pools that mined at least one block in the period
if block_count > 0 {
pool_data.push((pools.get(*pool_id), block_count));
}
}
// Sort by block count descending
pool_data.sort_by(|a, b| b.1.cmp(&a.1));
let total_blocks: u32 = pool_data.iter().map(|(_, count)| count).sum();
// Build stats with ranks
let pool_stats: Vec<PoolStats> = pool_data
.into_iter()
.enumerate()
.map(|(idx, (pool, block_count))| {
let share = if total_blocks > 0 {
block_count as f64 / total_blocks as f64
} else {
0.0
};
PoolStats::new(pool, block_count, (idx + 1) as u32, share)
})
.collect();
// TODO: Calculate actual hashrate from difficulty
let last_estimated_hashrate = 0u128;
Ok(PoolsSummary {
pools: pool_stats,
block_count: total_blocks,
last_estimated_hashrate,
})
}
/// Get list of all known mining pools (no statistics).
pub fn get_all_pools() -> Vec<PoolInfo> {
pools().iter().map(PoolInfo::from).collect()
}
/// Get detailed information about a specific pool by slug.
pub fn get_pool_detail(slug: PoolSlug, query: &Query) -> Result<PoolDetail> {
let computer = query.computer();
let current_height = query.get_height();
let end = current_height.to_usize();
let pools_list = pools();
let pool = pools_list.get(slug);
// Get pool vecs for this specific pool
let pool_vecs = computer
.pools
.vecs
.get(&slug)
.ok_or_else(|| Error::Str("Pool data not found"))?;
let mut cumulative = pool_vecs
.indexes_to_blocks_mined
.height_extra
.unwrap_cumulative()
.iter();
// Get total blocks (all time)
let total_all: u32 = *cumulative.get(current_height).unwrap_or_default();
// Get blocks for 24h (144 blocks)
let start_24h = end.saturating_sub(144);
let count_before_24h: u32 = if start_24h == 0 {
0
} else {
*cumulative
.get(Height::from(start_24h - 1))
.unwrap_or_default()
};
let total_24h = total_all.saturating_sub(count_before_24h);
// Get blocks for 1w (1008 blocks)
let start_1w = end.saturating_sub(1008);
let count_before_1w: u32 = if start_1w == 0 {
0
} else {
*cumulative
.get(Height::from(start_1w - 1))
.unwrap_or_default()
};
let total_1w = total_all.saturating_sub(count_before_1w);
// Calculate total network blocks for share calculation
let network_blocks_all = (end + 1) as u32;
let network_blocks_24h = (end - start_24h + 1) as u32;
let network_blocks_1w = (end - start_1w + 1) as u32;
let share_all = if network_blocks_all > 0 {
total_all as f64 / network_blocks_all as f64
} else {
0.0
};
let share_24h = if network_blocks_24h > 0 {
total_24h as f64 / network_blocks_24h as f64
} else {
0.0
};
let share_1w = if network_blocks_1w > 0 {
total_1w as f64 / network_blocks_1w as f64
} else {
0.0
};
Ok(PoolDetail {
pool: PoolDetailInfo::from(pool),
block_count: PoolBlockCounts {
all: total_all,
day: total_24h,
week: total_1w,
},
block_share: PoolBlockShares {
all: share_all,
day: share_24h,
week: share_1w,
},
estimated_hashrate: 0, // TODO: Calculate from share and network hashrate
reported_hashrate: None,
})
}

View File

@@ -1,58 +0,0 @@
use brk_error::Result;
use brk_types::{Height, RewardStats, Sats};
use vecdb::{IterableVec, VecIndex};
use crate::Query;
pub fn get_reward_stats(block_count: usize, query: &Query) -> Result<RewardStats> {
let computer = query.computer();
let current_height = query.get_height();
let end_block = current_height;
let start_block = Height::from(current_height.to_usize().saturating_sub(block_count - 1));
let mut coinbase_iter = computer
.chain
.indexes_to_coinbase
.sats
.height
.as_ref()
.unwrap()
.iter();
let mut fee_iter = computer.chain.indexes_to_fee.sats.height.unwrap_sum().iter();
let mut tx_count_iter = computer
.chain
.indexes_to_tx_count
.height
.as_ref()
.unwrap()
.iter();
let mut total_reward = Sats::ZERO;
let mut total_fee = Sats::ZERO;
let mut total_tx: u64 = 0;
for height in start_block.to_usize()..=end_block.to_usize() {
let h = Height::from(height);
if let Some(coinbase) = coinbase_iter.get(h) {
total_reward += Sats::from(u64::from(*coinbase));
}
if let Some(fee) = fee_iter.get(h) {
total_fee += Sats::from(u64::from(*fee));
}
if let Some(tx_count) = tx_count_iter.get(h) {
total_tx += u64::from(*tx_count);
}
}
Ok(RewardStats {
start_block,
end_block,
total_reward,
total_fee,
total_tx,
})
}

View File

@@ -1,11 +0,0 @@
mod addr;
mod block;
mod mempool;
mod mining;
mod tx;
pub use addr::*;
pub use block::*;
pub use mempool::*;
pub use mining::*;
pub use tx::*;

View File

@@ -1,50 +0,0 @@
use std::str::FromStr;
use bitcoin::hex::DisplayHex;
use brk_error::{Error, Result};
use brk_types::{TxIndex, Txid, TxidPath, TxidPrefix};
use vecdb::GenericStoredVec;
use crate::Query;
pub fn get_transaction_hex(TxidPath { txid }: TxidPath, query: &Query) -> Result<String> {
let Ok(txid) = bitcoin::Txid::from_str(&txid) else {
return Err(Error::InvalidTxid);
};
let txid = Txid::from(txid);
// First check mempool for unconfirmed transactions
if let Some(mempool) = query.mempool()
&& let Some(tx_with_hex) = mempool.get_txs().get(&txid)
{
return Ok(tx_with_hex.hex().to_string());
}
// Look up confirmed transaction by txid prefix
let prefix = TxidPrefix::from(&txid);
let indexer = query.indexer();
let Ok(Some(txindex)) = indexer
.stores
.txidprefix_to_txindex
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
get_transaction_hex_by_index(txindex, query)
}
pub fn get_transaction_hex_by_index(txindex: TxIndex, query: &Query) -> Result<String> {
let indexer = query.indexer();
let reader = query.reader();
let computer = query.computer();
let total_size = indexer.vecs.tx.txindex_to_total_size.read_once(txindex)?;
let position = computer.blks.txindex_to_position.read_once(txindex)?;
let buffer = reader.read_raw_bytes(position, *total_size as usize)?;
Ok(buffer.to_lower_hex_string())
}

View File

@@ -1,9 +0,0 @@
mod hex;
mod outspend;
mod status;
mod tx;
pub use hex::*;
pub use outspend::*;
pub use status::*;
pub use tx::*;

View File

@@ -1,174 +0,0 @@
use std::str::FromStr;
use brk_error::{Error, Result};
use brk_types::{TxInIndex, TxOutspend, TxStatus, Txid, TxidPath, TxidPrefix, Vin, Vout};
use vecdb::{GenericStoredVec, TypedVecIterator};
use crate::Query;
/// Get the spend status of a specific output
pub fn get_tx_outspend(
TxidPath { txid }: TxidPath,
vout: Vout,
query: &Query,
) -> Result<TxOutspend> {
let Ok(txid) = bitcoin::Txid::from_str(&txid) else {
return Err(Error::InvalidTxid);
};
let txid = Txid::from(txid);
// Mempool outputs are unspent in on-chain terms
if let Some(mempool) = query.mempool()
&& mempool.get_txs().contains_key(&txid)
{
return Ok(TxOutspend::UNSPENT);
}
// Look up confirmed transaction
let prefix = TxidPrefix::from(&txid);
let indexer = query.indexer();
let Ok(Some(txindex)) = indexer
.stores
.txidprefix_to_txindex
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
// Calculate txoutindex
let first_txoutindex = indexer
.vecs
.tx
.txindex_to_first_txoutindex
.read_once(txindex)?;
let txoutindex = first_txoutindex + vout;
// Look up spend status
let computer = query.computer();
let txinindex = computer
.stateful
.txoutindex_to_txinindex
.read_once(txoutindex)?;
if txinindex == TxInIndex::UNSPENT {
return Ok(TxOutspend::UNSPENT);
}
get_outspend_details(txinindex, query)
}
/// Get the spend status of all outputs in a transaction
pub fn get_tx_outspends(TxidPath { txid }: TxidPath, query: &Query) -> Result<Vec<TxOutspend>> {
let Ok(txid) = bitcoin::Txid::from_str(&txid) else {
return Err(Error::InvalidTxid);
};
let txid = Txid::from(txid);
// Mempool outputs are unspent in on-chain terms
if let Some(mempool) = query.mempool()
&& let Some(tx_with_hex) = mempool.get_txs().get(&txid)
{
let output_count = tx_with_hex.tx().output.len();
return Ok(vec![TxOutspend::UNSPENT; output_count]);
}
// Look up confirmed transaction
let prefix = TxidPrefix::from(&txid);
let indexer = query.indexer();
let Ok(Some(txindex)) = indexer
.stores
.txidprefix_to_txindex
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
// Get output range
let first_txoutindex = indexer
.vecs
.tx
.txindex_to_first_txoutindex
.read_once(txindex)?;
let next_first_txoutindex = indexer
.vecs
.tx
.txindex_to_first_txoutindex
.read_once(txindex.incremented())?;
let output_count = usize::from(next_first_txoutindex) - usize::from(first_txoutindex);
// Get spend status for each output
let computer = query.computer();
let mut txoutindex_to_txinindex_iter = computer.stateful.txoutindex_to_txinindex.iter()?;
let mut outspends = Vec::with_capacity(output_count);
for i in 0..output_count {
let txoutindex = first_txoutindex + Vout::from(i);
let txinindex = txoutindex_to_txinindex_iter.get_unwrap(txoutindex);
if txinindex == TxInIndex::UNSPENT {
outspends.push(TxOutspend::UNSPENT);
} else {
outspends.push(get_outspend_details(txinindex, query)?);
}
}
Ok(outspends)
}
/// Get spending transaction details from a txinindex
fn get_outspend_details(txinindex: TxInIndex, query: &Query) -> Result<TxOutspend> {
let indexer = query.indexer();
// Look up spending txindex directly
let spending_txindex = indexer
.vecs
.txin
.txinindex_to_txindex
.read_once(txinindex)?;
// Calculate vin
let spending_first_txinindex = indexer
.vecs
.tx
.txindex_to_first_txinindex
.read_once(spending_txindex)?;
let vin = Vin::from(usize::from(txinindex) - usize::from(spending_first_txinindex));
// Get spending tx details
let spending_txid = indexer
.vecs
.tx
.txindex_to_txid
.read_once(spending_txindex)?;
let spending_height = indexer
.vecs
.tx
.txindex_to_height
.read_once(spending_txindex)?;
let block_hash = indexer
.vecs
.block
.height_to_blockhash
.read_once(spending_height)?;
let block_time = indexer
.vecs
.block
.height_to_timestamp
.read_once(spending_height)?;
Ok(TxOutspend {
spent: true,
txid: Some(spending_txid),
vin: Some(vin),
status: Some(TxStatus {
confirmed: true,
block_height: Some(spending_height),
block_hash: Some(block_hash),
block_time: Some(block_time),
}),
})
}

View File

@@ -1,46 +0,0 @@
use std::str::FromStr;
use brk_error::{Error, Result};
use brk_types::{TxStatus, Txid, TxidPath, TxidPrefix};
use vecdb::GenericStoredVec;
use crate::Query;
pub fn get_transaction_status(TxidPath { txid }: TxidPath, query: &Query) -> Result<TxStatus> {
let Ok(txid) = bitcoin::Txid::from_str(&txid) else {
return Err(Error::InvalidTxid);
};
let txid = Txid::from(txid);
// First check mempool for unconfirmed transactions
if let Some(mempool) = query.mempool()
&& mempool.get_txs().contains_key(&txid)
{
return Ok(TxStatus::UNCONFIRMED);
}
// Look up confirmed transaction by txid prefix
let prefix = TxidPrefix::from(&txid);
let indexer = query.indexer();
let Ok(Some(txindex)) = indexer
.stores
.txidprefix_to_txindex
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
// Get block info for status
let height = indexer.vecs.tx.txindex_to_height.read_once(txindex)?;
let block_hash = indexer.vecs.block.height_to_blockhash.read_once(height)?;
let block_time = indexer.vecs.block.height_to_timestamp.read_once(height)?;
Ok(TxStatus {
confirmed: true,
block_height: Some(height),
block_hash: Some(block_hash),
block_time: Some(block_time),
})
}

View File

@@ -1,166 +0,0 @@
use std::{io::Cursor, str::FromStr};
use bitcoin::consensus::Decodable;
use brk_error::{Error, Result};
use brk_types::{
Sats, Transaction, TxIn, TxIndex, TxOut, TxStatus, Txid, TxidPath, TxidPrefix, Vout, Weight,
};
use vecdb::{GenericStoredVec, TypedVecIterator};
use crate::Query;
pub fn get_transaction(TxidPath { txid }: TxidPath, query: &Query) -> Result<Transaction> {
let Ok(txid) = bitcoin::Txid::from_str(&txid) else {
return Err(Error::InvalidTxid);
};
let txid = Txid::from(txid);
// First check mempool for unconfirmed transactions
if let Some(mempool) = query.mempool()
&& let Some(tx_with_hex) = mempool.get_txs().get(&txid)
{
return Ok(tx_with_hex.tx().clone());
}
// Look up confirmed transaction by txid prefix
let prefix = TxidPrefix::from(&txid);
let indexer = query.indexer();
let Ok(Some(txindex)) = indexer
.stores
.txidprefix_to_txindex
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
get_transaction_by_index(txindex, query)
}
pub fn get_transaction_by_index(txindex: TxIndex, query: &Query) -> Result<Transaction> {
let indexer = query.indexer();
let reader = query.reader();
let computer = query.computer();
// Get tx metadata using read_once for single lookups
let txid = indexer.vecs.tx.txindex_to_txid.read_once(txindex)?;
let height = indexer.vecs.tx.txindex_to_height.read_once(txindex)?;
let version = indexer.vecs.tx.txindex_to_txversion.read_once(txindex)?;
let lock_time = indexer.vecs.tx.txindex_to_rawlocktime.read_once(txindex)?;
let total_size = indexer.vecs.tx.txindex_to_total_size.read_once(txindex)?;
let first_txinindex = indexer
.vecs
.tx
.txindex_to_first_txinindex
.read_once(txindex)?;
let position = computer.blks.txindex_to_position.read_once(txindex)?;
// Get block info for status
let block_hash = indexer.vecs.block.height_to_blockhash.read_once(height)?;
let block_time = indexer.vecs.block.height_to_timestamp.read_once(height)?;
// Read and decode the raw transaction from blk file
let buffer = reader.read_raw_bytes(position, *total_size as usize)?;
let mut cursor = Cursor::new(buffer);
let tx = bitcoin::Transaction::consensus_decode(&mut cursor)
.map_err(|_| Error::Str("Failed to decode transaction"))?;
// For iterating through inputs, we need iterators (multiple lookups)
let mut txindex_to_txid_iter = indexer.vecs.tx.txindex_to_txid.iter()?;
let mut txindex_to_first_txoutindex_iter =
indexer.vecs.tx.txindex_to_first_txoutindex.iter()?;
let mut txinindex_to_outpoint_iter = indexer.vecs.txin.txinindex_to_outpoint.iter()?;
let mut txoutindex_to_value_iter = indexer.vecs.txout.txoutindex_to_value.iter()?;
// Build inputs with prevout information
let input: Vec<TxIn> = tx
.input
.iter()
.enumerate()
.map(|(i, txin)| {
let txinindex = first_txinindex + i;
let outpoint = txinindex_to_outpoint_iter.get_unwrap(txinindex);
let is_coinbase = outpoint.is_coinbase();
// Get prevout info if not coinbase
let (prev_txid, prev_vout, prevout) = if is_coinbase {
(Txid::COINBASE, Vout::MAX, None)
} else {
let prev_txindex = outpoint.txindex();
let prev_vout = outpoint.vout();
let prev_txid = txindex_to_txid_iter.get_unwrap(prev_txindex);
// Calculate the txoutindex for the prevout
let prev_first_txoutindex =
txindex_to_first_txoutindex_iter.get_unwrap(prev_txindex);
let prev_txoutindex = prev_first_txoutindex + prev_vout;
// Get the value of the prevout
let prev_value = txoutindex_to_value_iter.get_unwrap(prev_txoutindex);
// We don't have the script_pubkey stored directly, so we need to reconstruct
// For now, we'll get it from the decoded transaction's witness/scriptsig
// which can reveal the prevout script type, but the actual script needs
// to be fetched from the spending tx or reconstructed from address bytes
let prevout = Some(TxOut::from((
bitcoin::ScriptBuf::new(), // Placeholder - would need to reconstruct
prev_value,
)));
(prev_txid, prev_vout, prevout)
};
TxIn {
txid: prev_txid,
vout: prev_vout,
prevout,
script_sig: txin.script_sig.clone(),
script_sig_asm: (),
is_coinbase,
sequence: txin.sequence.0,
inner_redeem_script_asm: (),
}
})
.collect();
// Calculate weight before consuming tx.output
let weight = Weight::from(tx.weight());
// Calculate sigop cost
// Note: Using |_| None means P2SH and SegWit sigops won't be counted accurately
// since we don't provide the prevout scripts. This matches mempool tx behavior.
// For accurate counting, we'd need to reconstruct prevout scripts from indexed data.
let total_sigop_cost = tx.total_sigop_cost(|_| None);
// Build outputs
let output: Vec<TxOut> = tx.output.into_iter().map(TxOut::from).collect();
// Build status
let status = TxStatus {
confirmed: true,
block_height: Some(height),
block_hash: Some(block_hash),
block_time: Some(block_time),
};
let mut transaction = Transaction {
index: Some(txindex),
txid,
version,
lock_time,
total_size: *total_size as usize,
weight,
total_sigop_cost,
fee: Sats::ZERO, // Will be computed below
input,
output,
status,
};
// Compute fee from inputs - outputs
transaction.compute_fee();
Ok(transaction)
}

View File

@@ -1,52 +0,0 @@
use serde::{Deserialize, Deserializer};
use serde_json::Value;
pub fn de_unquote_i64<'de, D>(deserializer: D) -> Result<Option<i64>, D::Error>
where
D: Deserializer<'de>,
{
let value: Option<Value> = Option::deserialize(deserializer)?;
if value.is_none() {
return Ok(None);
}
let value = value.unwrap();
if let Some(mut s) = value.as_str().map(|s| s.to_string()) {
if s.starts_with('"') && s.ends_with('"') && s.len() >= 2 {
s = s[1..s.len() - 1].to_string();
}
s.parse::<i64>().map(Some).map_err(serde::de::Error::custom)
} else if let Some(n) = value.as_i64() {
Ok(Some(n))
} else {
Err(serde::de::Error::custom("expected a string or number"))
}
}
pub fn de_unquote_usize<'de, D>(deserializer: D) -> Result<Option<usize>, D::Error>
where
D: Deserializer<'de>,
{
let value: Option<Value> = Option::deserialize(deserializer)?;
if value.is_none() {
return Ok(None);
}
let value = value.unwrap();
if let Some(mut s) = value.as_str().map(|s| s.to_string()) {
if s.starts_with('"') && s.ends_with('"') && s.len() >= 2 {
s = s[1..s.len() - 1].to_string();
}
s.parse::<usize>()
.map(Some)
.map_err(serde::de::Error::custom)
} else if let Some(n) = value.as_u64() {
Ok(Some(n as usize))
} else {
Err(serde::de::Error::custom("expected a string or number"))
}
}

View File

@@ -0,0 +1,235 @@
use std::str::FromStr;
use bitcoin::{Network, PublicKey, ScriptBuf};
use brk_error::{Error, Result};
use brk_types::{
Address, AddressBytes, AddressChainStats, AddressHash, AddressIndexOutPoint,
AddressIndexTxIndex, AddressStats, AnyAddressDataIndexEnum, OutputType, Sats, TxIndex,
TxStatus, Txid, TypeIndex, Unit, Utxo, Vout,
};
use vecdb::TypedVecIterator;
use crate::Query;
/// Maximum number of mempool txids to return
const MAX_MEMPOOL_TXIDS: usize = 50;
impl Query {
pub fn address(&self, Address { address }: Address) -> Result<AddressStats> {
let indexer = self.indexer();
let computer = self.computer();
let stores = &indexer.stores;
let script = if let Ok(address) = bitcoin::Address::from_str(&address) {
if !address.is_valid_for_network(Network::Bitcoin) {
return Err(Error::InvalidNetwork);
}
let address = address.assume_checked();
address.script_pubkey()
} else if let Ok(pubkey) = PublicKey::from_str(&address) {
ScriptBuf::new_p2pk(&pubkey)
} else {
return Err(Error::InvalidAddress);
};
let outputtype = OutputType::from(&script);
let Ok(bytes) = AddressBytes::try_from((&script, outputtype)) else {
return Err(Error::Str("Failed to convert the address to bytes"));
};
let addresstype = outputtype;
let hash = AddressHash::from(&bytes);
let Ok(Some(type_index)) = stores
.addresstype_to_addresshash_to_addressindex
.get(addresstype)
.unwrap()
.get(&hash)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownAddress);
};
let any_address_index = computer
.stateful
.any_address_indexes
.get_anyaddressindex_once(outputtype, type_index)?;
let address_data = match any_address_index.to_enum() {
AnyAddressDataIndexEnum::Loaded(index) => computer
.stateful
.addresses_data
.loaded
.iter()?
.get_unwrap(index),
AnyAddressDataIndexEnum::Empty(index) => computer
.stateful
.addresses_data
.empty
.iter()?
.get_unwrap(index)
.into(),
};
Ok(AddressStats {
address: address.into(),
chain_stats: AddressChainStats {
type_index,
funded_txo_count: address_data.funded_txo_count,
funded_txo_sum: address_data.received,
spent_txo_count: address_data.spent_txo_count,
spent_txo_sum: address_data.sent,
tx_count: address_data.tx_count,
},
mempool_stats: self.mempool().map(|mempool| {
mempool
.get_addresses()
.get(&bytes)
.map(|(stats, _)| stats)
.cloned()
.unwrap_or_default()
}),
})
}
pub fn address_txids(
&self,
address: Address,
after_txid: Option<Txid>,
limit: usize,
) -> Result<Vec<Txid>> {
let indexer = self.indexer();
let stores = &indexer.stores;
let (outputtype, type_index) = self.resolve_address(&address)?;
let store = stores
.addresstype_to_addressindex_and_txindex
.get(outputtype)
.unwrap();
let prefix = u32::from(type_index).to_be_bytes();
let after_txindex = if let Some(after_txid) = after_txid {
let txindex = stores
.txidprefix_to_txindex
.get(&after_txid.into())
.map_err(|_| Error::Str("Failed to look up after_txid"))?
.ok_or(Error::Str("after_txid not found"))?
.into_owned();
Some(txindex)
} else {
None
};
let txindices: Vec<TxIndex> = store
.prefix(prefix)
.rev()
.filter(|(key, _): &(AddressIndexTxIndex, Unit)| {
if let Some(after) = after_txindex {
key.txindex() < after
} else {
true
}
})
.take(limit)
.map(|(key, _)| key.txindex())
.collect();
let mut txindex_to_txid_iter = indexer.vecs.tx.txindex_to_txid.iter()?;
let txids: Vec<Txid> = txindices
.into_iter()
.map(|txindex| txindex_to_txid_iter.get_unwrap(txindex))
.collect();
Ok(txids)
}
pub fn address_utxos(&self, address: Address) -> Result<Vec<Utxo>> {
let indexer = self.indexer();
let stores = &indexer.stores;
let vecs = &indexer.vecs;
let (outputtype, type_index) = self.resolve_address(&address)?;
let store = stores
.addresstype_to_addressindex_and_unspentoutpoint
.get(outputtype)
.unwrap();
let prefix = u32::from(type_index).to_be_bytes();
let outpoints: Vec<(TxIndex, Vout)> = store
.prefix(prefix)
.map(|(key, _): (AddressIndexOutPoint, Unit)| (key.txindex(), key.vout()))
.collect();
let mut txindex_to_txid_iter = vecs.tx.txindex_to_txid.iter()?;
let mut txindex_to_height_iter = vecs.tx.txindex_to_height.iter()?;
let mut txindex_to_first_txoutindex_iter = vecs.tx.txindex_to_first_txoutindex.iter()?;
let mut txoutindex_to_value_iter = vecs.txout.txoutindex_to_value.iter()?;
let mut height_to_blockhash_iter = vecs.block.height_to_blockhash.iter()?;
let mut height_to_timestamp_iter = vecs.block.height_to_timestamp.iter()?;
let utxos: Vec<Utxo> = outpoints
.into_iter()
.map(|(txindex, vout)| {
let txid: Txid = txindex_to_txid_iter.get_unwrap(txindex);
let height = txindex_to_height_iter.get_unwrap(txindex);
let first_txoutindex = txindex_to_first_txoutindex_iter.get_unwrap(txindex);
let txoutindex = first_txoutindex + vout;
let value: Sats = txoutindex_to_value_iter.get_unwrap(txoutindex);
let block_hash = height_to_blockhash_iter.get_unwrap(height);
let block_time = height_to_timestamp_iter.get_unwrap(height);
Utxo {
txid,
vout,
status: TxStatus {
confirmed: true,
block_height: Some(height),
block_hash: Some(block_hash),
block_time: Some(block_time),
},
value,
}
})
.collect();
Ok(utxos)
}
pub fn address_mempool_txids(&self, address: Address) -> Result<Vec<Txid>> {
let mempool = self.mempool().ok_or(Error::Str("Mempool not available"))?;
let bytes = AddressBytes::from_str(&address.address)?;
let addresses = mempool.get_addresses();
let txids: Vec<Txid> = addresses
.get(&bytes)
.map(|(_, txids)| txids.iter().take(MAX_MEMPOOL_TXIDS).cloned().collect())
.unwrap_or_default();
Ok(txids)
}
/// Resolve an address string to its output type and type_index
fn resolve_address(&self, address: &Address) -> Result<(OutputType, TypeIndex)> {
let stores = &self.indexer().stores;
let bytes = AddressBytes::from_str(&address.address)?;
let outputtype = OutputType::from(&bytes);
let hash = AddressHash::from(&bytes);
let Ok(Some(type_index)) = stores
.addresstype_to_addresshash_to_addressindex
.get(outputtype)
.unwrap()
.get(&hash)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownAddress);
};
Ok((outputtype, type_index))
}
}

View File

@@ -0,0 +1,103 @@
use brk_error::{Error, Result};
use brk_types::{BlockHash, BlockHashPrefix, BlockInfo, Height, TxIndex};
use vecdb::{AnyVec, GenericStoredVec, VecIndex};
use crate::Query;
const DEFAULT_BLOCK_COUNT: u32 = 10;
impl Query {
pub fn block(&self, hash: &str) -> Result<BlockInfo> {
let height = self.height_by_hash(hash)?;
self.block_by_height(height)
}
pub fn block_by_height(&self, height: Height) -> Result<BlockInfo> {
let indexer = self.indexer();
let max_height = self.max_height();
if height > max_height {
return Err(Error::Str("Block height out of range"));
}
let blockhash = indexer.vecs.block.height_to_blockhash.read_once(height)?;
let difficulty = indexer.vecs.block.height_to_difficulty.read_once(height)?;
let timestamp = indexer.vecs.block.height_to_timestamp.read_once(height)?;
let size = indexer.vecs.block.height_to_total_size.read_once(height)?;
let weight = indexer.vecs.block.height_to_weight.read_once(height)?;
let tx_count = self.tx_count_at_height(height, max_height)?;
Ok(BlockInfo {
id: blockhash,
height,
tx_count,
size: *size,
weight,
timestamp,
difficulty: *difficulty,
})
}
pub fn blocks(&self, start_height: Option<Height>) -> Result<Vec<BlockInfo>> {
let max_height = self.height();
let start = start_height.unwrap_or(max_height);
let start = start.min(max_height);
let start_u32: u32 = start.into();
let count = DEFAULT_BLOCK_COUNT.min(start_u32 + 1);
let mut blocks = Vec::with_capacity(count as usize);
for i in 0..count {
let height = Height::from(start_u32 - i);
blocks.push(self.block_by_height(height)?);
}
Ok(blocks)
}
// === Helper methods ===
pub fn height_by_hash(&self, hash: &str) -> Result<Height> {
let indexer = self.indexer();
let blockhash: BlockHash = hash.parse().map_err(|_| Error::Str("Invalid block hash"))?;
let prefix = BlockHashPrefix::from(&blockhash);
indexer
.stores
.blockhashprefix_to_height
.get(&prefix)?
.map(|h| *h)
.ok_or(Error::Str("Block not found"))
}
fn max_height(&self) -> Height {
Height::from(
self.indexer()
.vecs
.block
.height_to_blockhash
.len()
.saturating_sub(1),
)
}
fn tx_count_at_height(&self, height: Height, max_height: Height) -> Result<u32> {
let indexer = self.indexer();
let computer = self.computer();
let first_txindex = indexer.vecs.tx.height_to_first_txindex.read_once(height)?;
let next_first_txindex = if height < max_height {
indexer
.vecs
.tx
.height_to_first_txindex
.read_once(height.incremented())?
} else {
TxIndex::from(computer.indexes.txindex_to_txindex.len())
};
Ok((next_first_txindex.to_usize() - first_txindex.to_usize()) as u32)
}
}

View File

@@ -0,0 +1,7 @@
mod info;
mod raw;
mod status;
mod timestamp;
mod txs;
pub const BLOCK_TXS_PAGE_SIZE: usize = 25;

View File

@@ -0,0 +1,35 @@
use brk_error::{Error, Result};
use brk_types::Height;
use vecdb::{AnyVec, GenericStoredVec};
use crate::Query;
impl Query {
pub fn block_raw(&self, hash: &str) -> Result<Vec<u8>> {
let height = self.height_by_hash(hash)?;
self.block_raw_by_height(height)
}
fn block_raw_by_height(&self, height: Height) -> Result<Vec<u8>> {
let indexer = self.indexer();
let computer = self.computer();
let reader = self.reader();
let max_height = Height::from(
indexer
.vecs
.block
.height_to_blockhash
.len()
.saturating_sub(1),
);
if height > max_height {
return Err(Error::Str("Block height out of range"));
}
let position = computer.blks.height_to_position.read_once(height)?;
let size = indexer.vecs.block.height_to_total_size.read_once(height)?;
reader.read_raw_bytes(position, *size as usize)
}
}

View File

@@ -0,0 +1,43 @@
use brk_error::Result;
use brk_types::{BlockStatus, Height};
use vecdb::{AnyVec, GenericStoredVec};
use crate::Query;
impl Query {
pub fn block_status(&self, hash: &str) -> Result<BlockStatus> {
let height = self.height_by_hash(hash)?;
self.block_status_by_height(height)
}
fn block_status_by_height(&self, height: Height) -> Result<BlockStatus> {
let indexer = self.indexer();
let max_height = Height::from(
indexer
.vecs
.block
.height_to_blockhash
.len()
.saturating_sub(1),
);
if height > max_height {
return Ok(BlockStatus::not_in_best_chain());
}
let next_best = if height < max_height {
Some(
indexer
.vecs
.block
.height_to_blockhash
.read_once(height.incremented())?,
)
} else {
None
};
Ok(BlockStatus::in_best_chain(height, next_best))
}
}

View File

@@ -0,0 +1,81 @@
use brk_error::{Error, Result};
use brk_types::{BlockTimestamp, Date, DateIndex, Height, Timestamp};
use jiff::Timestamp as JiffTimestamp;
use vecdb::{GenericStoredVec, TypedVecIterator};
use crate::Query;
impl Query {
pub fn block_by_timestamp(&self, timestamp: Timestamp) -> Result<BlockTimestamp> {
let indexer = self.indexer();
let computer = self.computer();
let max_height = self.height();
let max_height_usize: usize = max_height.into();
if max_height_usize == 0 {
return Err(Error::Str("No blocks indexed"));
}
let target = timestamp;
let date = Date::from(target);
let dateindex = DateIndex::try_from(date).unwrap_or_default();
// Get first height of the target date
let first_height_of_day = computer
.indexes
.dateindex_to_first_height
.read_once(dateindex)
.unwrap_or(Height::from(0usize));
let start: usize = usize::from(first_height_of_day).min(max_height_usize);
// Use iterator for efficient sequential access
let mut timestamp_iter = indexer.vecs.block.height_to_timestamp.iter()?;
// Search forward from start to find the last block <= target timestamp
let mut best_height = start;
let mut best_ts = timestamp_iter.get_unwrap(Height::from(start));
for h in (start + 1)..=max_height_usize {
let height = Height::from(h);
let block_ts = timestamp_iter.get_unwrap(height);
if block_ts <= target {
best_height = h;
best_ts = block_ts;
} else {
break;
}
}
// Check one block before start in case we need to go backward
if start > 0 && best_ts > target {
let prev_height = Height::from(start - 1);
let prev_ts = timestamp_iter.get_unwrap(prev_height);
if prev_ts <= target {
best_height = start - 1;
best_ts = prev_ts;
}
}
let height = Height::from(best_height);
let blockhash = indexer
.vecs
.block
.height_to_blockhash
.iter()?
.get_unwrap(height);
// Convert timestamp to ISO 8601 format
let ts_secs: i64 = (*best_ts).into();
let iso_timestamp = JiffTimestamp::from_second(ts_secs)
.map(|t| t.to_string())
.unwrap_or_else(|_| best_ts.to_string());
Ok(BlockTimestamp {
height,
hash: blockhash,
timestamp: iso_timestamp,
})
}
}

View File

@@ -0,0 +1,128 @@
use brk_error::{Error, Result};
use brk_types::{Height, Transaction, TxIndex, Txid};
use vecdb::{AnyVec, GenericStoredVec, TypedVecIterator};
use super::BLOCK_TXS_PAGE_SIZE;
use crate::Query;
impl Query {
pub fn block_txids(&self, hash: &str) -> Result<Vec<Txid>> {
let height = self.height_by_hash(hash)?;
self.block_txids_by_height(height)
}
pub fn block_txs(&self, hash: &str, start_index: usize) -> Result<Vec<Transaction>> {
let height = self.height_by_hash(hash)?;
self.block_txs_by_height(height, start_index)
}
pub fn block_txid_at_index(&self, hash: &str, index: usize) -> Result<Txid> {
let height = self.height_by_hash(hash)?;
self.block_txid_at_index_by_height(height, index)
}
// === Helper methods ===
fn block_txids_by_height(&self, height: Height) -> Result<Vec<Txid>> {
let indexer = self.indexer();
let max_height = self.height();
if height > max_height {
return Err(Error::Str("Block height out of range"));
}
let first_txindex = indexer.vecs.tx.height_to_first_txindex.read_once(height)?;
let next_first_txindex = indexer
.vecs
.tx
.height_to_first_txindex
.read_once(height.incremented())
.unwrap_or_else(|_| TxIndex::from(indexer.vecs.tx.txindex_to_txid.len()));
let first: usize = first_txindex.into();
let next: usize = next_first_txindex.into();
let count = next - first;
let txids: Vec<Txid> = indexer
.vecs
.tx
.txindex_to_txid
.iter()?
.skip(first)
.take(count)
.collect();
Ok(txids)
}
fn block_txs_by_height(
&self,
height: Height,
start_index: usize,
) -> Result<Vec<Transaction>> {
let indexer = self.indexer();
let max_height = self.height();
if height > max_height {
return Err(Error::Str("Block height out of range"));
}
let first_txindex = indexer.vecs.tx.height_to_first_txindex.read_once(height)?;
let next_first_txindex = indexer
.vecs
.tx
.height_to_first_txindex
.read_once(height.incremented())
.unwrap_or_else(|_| TxIndex::from(indexer.vecs.tx.txindex_to_txid.len()));
let first: usize = first_txindex.into();
let next: usize = next_first_txindex.into();
let tx_count = next - first;
if start_index >= tx_count {
return Ok(Vec::new());
}
let end_index = (start_index + BLOCK_TXS_PAGE_SIZE).min(tx_count);
let count = end_index - start_index;
let mut txs = Vec::with_capacity(count);
for i in start_index..end_index {
let txindex = TxIndex::from(first + i);
let tx = self.transaction_by_index(txindex)?;
txs.push(tx);
}
Ok(txs)
}
fn block_txid_at_index_by_height(&self, height: Height, index: usize) -> Result<Txid> {
let indexer = self.indexer();
let max_height = self.height();
if height > max_height {
return Err(Error::Str("Block height out of range"));
}
let first_txindex = indexer.vecs.tx.height_to_first_txindex.read_once(height)?;
let next_first_txindex = indexer
.vecs
.tx
.height_to_first_txindex
.read_once(height.incremented())
.unwrap_or_else(|_| TxIndex::from(indexer.vecs.tx.txindex_to_txid.len()));
let first: usize = first_txindex.into();
let next: usize = next_first_txindex.into();
let tx_count = next - first;
if index >= tx_count {
return Err(Error::Str("Transaction index out of range"));
}
let txindex = TxIndex::from(first + index);
let txid = indexer.vecs.tx.txindex_to_txid.iter()?.get_unwrap(txindex);
Ok(txid)
}
}

View File

@@ -0,0 +1,38 @@
use brk_error::{Error, Result};
use brk_types::{MempoolBlock, MempoolInfo, RecommendedFees, Txid};
use crate::Query;
impl Query {
pub fn mempool_info(&self) -> Result<MempoolInfo> {
let mempool = self.mempool().ok_or(Error::Str("Mempool not available"))?;
Ok(mempool.get_info())
}
pub fn mempool_txids(&self) -> Result<Vec<Txid>> {
let mempool = self.mempool().ok_or(Error::Str("Mempool not available"))?;
let txs = mempool.get_txs();
Ok(txs.keys().cloned().collect())
}
pub fn recommended_fees(&self) -> Result<RecommendedFees> {
self.mempool()
.map(|mempool| mempool.get_fees())
.ok_or(Error::MempoolNotAvailable)
}
pub fn mempool_blocks(&self) -> Result<Vec<MempoolBlock>> {
let mempool = self.mempool().ok_or(Error::Str("Mempool not available"))?;
let block_stats = mempool.get_block_stats();
let blocks = block_stats
.into_iter()
.map(|stats| {
MempoolBlock::new(stats.tx_count, stats.total_vsize, stats.total_fee, stats.fee_range)
})
.collect();
Ok(blocks)
}
}

View File

@@ -0,0 +1,43 @@
use brk_error::Result;
use brk_types::{BlockFeeRatesEntry, FeeRatePercentiles, TimePeriod};
use vecdb::{IterableVec, VecIndex};
use super::dateindex_iter::DateIndexIter;
use crate::Query;
impl Query {
pub fn block_fee_rates(&self, time_period: TimePeriod) -> Result<Vec<BlockFeeRatesEntry>> {
let computer = self.computer();
let current_height = self.height();
let start = current_height
.to_usize()
.saturating_sub(time_period.block_count());
let iter = DateIndexIter::new(computer, start, current_height.to_usize());
let vecs = &computer.chain.indexes_to_fee_rate.dateindex;
let mut min = vecs.unwrap_min().iter();
let mut pct10 = vecs.unwrap_pct10().iter();
let mut pct25 = vecs.unwrap_pct25().iter();
let mut median = vecs.unwrap_median().iter();
let mut pct75 = vecs.unwrap_pct75().iter();
let mut pct90 = vecs.unwrap_pct90().iter();
let mut max = vecs.unwrap_max().iter();
Ok(iter.collect(|di, ts, h| {
Some(BlockFeeRatesEntry {
avg_height: h,
timestamp: ts,
percentiles: FeeRatePercentiles::new(
min.get(di).unwrap_or_default(),
pct10.get(di).unwrap_or_default(),
pct25.get(di).unwrap_or_default(),
median.get(di).unwrap_or_default(),
pct75.get(di).unwrap_or_default(),
pct90.get(di).unwrap_or_default(),
max.get(di).unwrap_or_default(),
),
})
}))
}
}

View File

@@ -0,0 +1,34 @@
use brk_error::Result;
use brk_types::{BlockFeesEntry, TimePeriod};
use vecdb::{IterableVec, VecIndex};
use super::dateindex_iter::DateIndexIter;
use crate::Query;
impl Query {
pub fn block_fees(&self, time_period: TimePeriod) -> Result<Vec<BlockFeesEntry>> {
let computer = self.computer();
let current_height = self.height();
let start = current_height
.to_usize()
.saturating_sub(time_period.block_count());
let iter = DateIndexIter::new(computer, start, current_height.to_usize());
let mut fees = computer
.chain
.indexes_to_fee
.sats
.dateindex
.unwrap_average()
.iter();
Ok(iter.collect(|di, ts, h| {
fees.get(di).map(|fee| BlockFeesEntry {
avg_height: h,
timestamp: ts,
avg_fees: fee,
})
}))
}
}

View File

@@ -0,0 +1,35 @@
use brk_error::Result;
use brk_types::{BlockRewardsEntry, TimePeriod};
use vecdb::{IterableVec, VecIndex};
use super::dateindex_iter::DateIndexIter;
use crate::Query;
impl Query {
pub fn block_rewards(&self, time_period: TimePeriod) -> Result<Vec<BlockRewardsEntry>> {
let computer = self.computer();
let current_height = self.height();
let start = current_height
.to_usize()
.saturating_sub(time_period.block_count());
let iter = DateIndexIter::new(computer, start, current_height.to_usize());
// coinbase = subsidy + fees
let mut rewards = computer
.chain
.indexes_to_coinbase
.sats
.dateindex
.unwrap_average()
.iter();
Ok(iter.collect(|di, ts, h| {
rewards.get(di).map(|reward| BlockRewardsEntry {
avg_height: h.into(),
timestamp: *ts,
avg_rewards: *reward,
})
}))
}
}

View File

@@ -0,0 +1,61 @@
use brk_error::Result;
use brk_types::{BlockSizeEntry, BlockSizesWeights, BlockWeightEntry, TimePeriod};
use vecdb::{IterableVec, VecIndex};
use super::dateindex_iter::DateIndexIter;
use crate::Query;
impl Query {
pub fn block_sizes_weights(&self, time_period: TimePeriod) -> Result<BlockSizesWeights> {
let computer = self.computer();
let current_height = self.height();
let start = current_height
.to_usize()
.saturating_sub(time_period.block_count());
let iter = DateIndexIter::new(computer, start, current_height.to_usize());
let mut sizes_vec = computer
.chain
.indexes_to_block_size
.dateindex
.unwrap_average()
.iter();
let mut weights_vec = computer
.chain
.indexes_to_block_weight
.dateindex
.unwrap_average()
.iter();
let entries: Vec<_> = iter.collect(|di, ts, h| {
let size = sizes_vec.get(di).map(|s| *s);
let weight = weights_vec.get(di).map(|w| *w);
Some((h.into(), (*ts), size, weight))
});
let sizes = entries
.iter()
.filter_map(|(h, ts, size, _)| {
size.map(|s| BlockSizeEntry {
avg_height: *h,
timestamp: *ts,
avg_size: s,
})
})
.collect();
let weights = entries
.iter()
.filter_map(|(h, ts, _, weight)| {
weight.map(|w| BlockWeightEntry {
avg_height: *h,
timestamp: *ts,
avg_weight: w,
})
})
.collect();
Ok(BlockSizesWeights { sizes, weights })
}
}

View File

@@ -39,7 +39,11 @@ impl<'a> DateIndexIter<'a> {
where
F: FnMut(DateIndex, Timestamp, Height) -> Option<T>,
{
let total = self.end_di.to_usize().saturating_sub(self.start_di.to_usize()) + 1;
let total = self
.end_di
.to_usize()
.saturating_sub(self.start_di.to_usize())
+ 1;
let mut timestamps = self
.computer
.chain
@@ -54,10 +58,10 @@ impl<'a> DateIndexIter<'a> {
while i <= self.end_di.to_usize() {
let di = DateIndex::from(i);
if let (Some(ts), Some(h)) = (timestamps.get(di), heights.get(di)) {
if let Some(entry) = transform(di, ts, h) {
entries.push(entry);
}
if let (Some(ts), Some(h)) = (timestamps.get(di), heights.get(di))
&& let Some(entry) = transform(di, ts, h)
{
entries.push(entry);
}
i += self.step;
}

View File

@@ -0,0 +1,121 @@
use std::time::{SystemTime, UNIX_EPOCH};
use brk_error::Result;
use brk_types::{DifficultyAdjustment, DifficultyEpoch, Height};
use vecdb::GenericStoredVec;
use crate::Query;
/// Blocks per difficulty epoch (2 weeks target)
const BLOCKS_PER_EPOCH: u32 = 2016;
/// Target block time in seconds (10 minutes)
const TARGET_BLOCK_TIME: u64 = 600;
impl Query {
pub fn difficulty_adjustment(&self) -> Result<DifficultyAdjustment> {
let indexer = self.indexer();
let computer = self.computer();
let current_height = self.height();
let current_height_u32: u32 = current_height.into();
// Get current epoch
let current_epoch = computer
.indexes
.height_to_difficultyepoch
.read_once(current_height)?;
let current_epoch_usize: usize = current_epoch.into();
// Get epoch start height
let epoch_start_height = computer
.indexes
.difficultyepoch_to_first_height
.read_once(current_epoch)?;
let epoch_start_u32: u32 = epoch_start_height.into();
// Calculate epoch progress
let next_retarget_height = epoch_start_u32 + BLOCKS_PER_EPOCH;
let blocks_into_epoch = current_height_u32 - epoch_start_u32;
let remaining_blocks = next_retarget_height - current_height_u32;
let progress_percent = (blocks_into_epoch as f64 / BLOCKS_PER_EPOCH as f64) * 100.0;
// Get timestamps using difficultyepoch_to_timestamp for epoch start
let epoch_start_timestamp = computer
.chain
.difficultyepoch_to_timestamp
.read_once(current_epoch)?;
let current_timestamp = indexer
.vecs
.block
.height_to_timestamp
.read_once(current_height)?;
// Calculate average block time in current epoch
let elapsed_time = (*current_timestamp - *epoch_start_timestamp) as u64;
let time_avg = if blocks_into_epoch > 0 {
elapsed_time / blocks_into_epoch as u64
} else {
TARGET_BLOCK_TIME
};
// Estimate remaining time and retarget date
let remaining_time = remaining_blocks as u64 * time_avg;
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|d| d.as_secs())
.unwrap_or(*current_timestamp as u64);
let estimated_retarget_date = now + remaining_time;
// Calculate expected vs actual time for difficulty change estimate
let expected_time = blocks_into_epoch as u64 * TARGET_BLOCK_TIME;
let difficulty_change = if elapsed_time > 0 && blocks_into_epoch > 0 {
((expected_time as f64 / elapsed_time as f64) - 1.0) * 100.0
} else {
0.0
};
// Time offset from expected schedule
let time_offset = expected_time as i64 - elapsed_time as i64;
// Calculate previous retarget using stored difficulty values
let previous_retarget = if current_epoch_usize > 0 {
let prev_epoch = DifficultyEpoch::from(current_epoch_usize - 1);
let prev_epoch_start = computer
.indexes
.difficultyepoch_to_first_height
.read_once(prev_epoch)?;
let prev_difficulty = indexer
.vecs
.block
.height_to_difficulty
.read_once(prev_epoch_start)?;
let curr_difficulty = indexer
.vecs
.block
.height_to_difficulty
.read_once(epoch_start_height)?;
if *prev_difficulty > 0.0 {
((*curr_difficulty / *prev_difficulty) - 1.0) * 100.0
} else {
0.0
}
} else {
0.0
};
Ok(DifficultyAdjustment {
progress_percent,
difficulty_change,
estimated_retarget_date,
remaining_blocks,
remaining_time,
previous_retarget,
next_retarget_height: Height::from(next_retarget_height),
time_avg,
adjusted_time_avg: time_avg,
time_offset,
})
}
}

View File

@@ -0,0 +1,26 @@
use brk_error::Result;
use brk_types::{DifficultyAdjustmentEntry, TimePeriod};
use vecdb::VecIndex;
use super::epochs::iter_difficulty_epochs;
use crate::Query;
impl Query {
pub fn difficulty_adjustments(
&self,
time_period: Option<TimePeriod>,
) -> Result<Vec<DifficultyAdjustmentEntry>> {
let current_height = self.height();
let end = current_height.to_usize();
let start = match time_period {
Some(tp) => end.saturating_sub(tp.block_count()),
None => 0,
};
let mut entries = iter_difficulty_epochs(self.computer(), start, end);
// Return in reverse chronological order (newest first)
entries.reverse();
Ok(entries)
}
}

View File

@@ -0,0 +1,100 @@
use brk_error::Result;
use brk_types::{DateIndex, DifficultyEntry, HashrateEntry, HashrateSummary, Height, TimePeriod};
use vecdb::{GenericStoredVec, IterableVec, VecIndex};
use super::epochs::iter_difficulty_epochs;
use crate::Query;
impl Query {
pub fn hashrate(&self, time_period: Option<TimePeriod>) -> Result<HashrateSummary> {
let indexer = self.indexer();
let computer = self.computer();
let current_height = self.height();
// Get current difficulty
let current_difficulty = *indexer
.vecs
.block
.height_to_difficulty
.read_once(current_height)?;
// Get current hashrate
let current_dateindex = computer
.indexes
.height_to_dateindex
.read_once(current_height)?;
let current_hashrate = *computer
.chain
.indexes_to_hash_rate
.dateindex
.unwrap_last()
.read_once(current_dateindex)? as u128;
// Calculate start height based on time period
let end = current_height.to_usize();
let start = match time_period {
Some(tp) => end.saturating_sub(tp.block_count()),
None => 0,
};
// Get hashrate entries using iterators for efficiency
let start_dateindex = computer
.indexes
.height_to_dateindex
.read_once(Height::from(start))?;
let end_dateindex = current_dateindex;
// Sample at regular intervals to avoid too many data points
let total_days = end_dateindex
.to_usize()
.saturating_sub(start_dateindex.to_usize())
+ 1;
let step = (total_days / 200).max(1); // Max ~200 data points
// Create iterators for the loop
let mut hashrate_iter = computer
.chain
.indexes_to_hash_rate
.dateindex
.unwrap_last()
.iter();
let mut timestamp_iter = computer
.chain
.timeindexes_to_timestamp
.dateindex_extra
.unwrap_first()
.iter();
let mut hashrates = Vec::with_capacity(total_days / step + 1);
let mut di = start_dateindex.to_usize();
while di <= end_dateindex.to_usize() {
let dateindex = DateIndex::from(di);
if let (Some(hr), Some(timestamp)) =
(hashrate_iter.get(dateindex), timestamp_iter.get(dateindex))
{
hashrates.push(HashrateEntry {
timestamp,
avg_hashrate: (*hr) as u128,
});
}
di += step;
}
// Get difficulty adjustments within the period
let difficulty: Vec<DifficultyEntry> = iter_difficulty_epochs(computer, start, end)
.into_iter()
.map(|e| DifficultyEntry {
timestamp: e.timestamp,
difficulty: e.difficulty,
height: e.height,
})
.collect();
Ok(HashrateSummary {
hashrates,
difficulty,
current_hashrate,
current_difficulty,
})
}
}

View File

@@ -0,0 +1,11 @@
mod block_fee_rates;
mod block_fees;
mod block_rewards;
mod block_sizes;
mod dateindex_iter;
mod difficulty;
mod difficulty_adjustments;
mod epochs;
mod hashrate;
mod pools;
mod reward_stats;

View File

@@ -0,0 +1,171 @@
use brk_error::{Error, Result};
use brk_types::{
Height, PoolBlockCounts, PoolBlockShares, PoolDetail, PoolDetailInfo, PoolInfo, PoolSlug,
PoolStats, PoolsSummary, TimePeriod, pools,
};
use vecdb::{AnyVec, IterableVec, VecIndex};
use crate::Query;
impl Query {
pub fn mining_pools(&self, time_period: TimePeriod) -> Result<PoolsSummary> {
let computer = self.computer();
let current_height = self.height();
let end = current_height.to_usize();
// No blocks indexed yet
if computer.pools.height_to_pool.len() == 0 {
return Ok(PoolsSummary {
pools: vec![],
block_count: 0,
last_estimated_hashrate: 0,
});
}
// Calculate start height based on time period
let start = end.saturating_sub(time_period.block_count());
let pools = pools();
let mut pool_data: Vec<(&'static brk_types::Pool, u32)> = Vec::new();
// For each pool, get cumulative count at end and start, subtract to get range count
for (pool_id, pool_vecs) in &computer.pools.vecs {
let mut cumulative = pool_vecs
.indexes_to_blocks_mined
.height_extra
.unwrap_cumulative()
.iter();
let count_at_end: u32 = *cumulative.get(current_height).unwrap_or_default();
let count_at_start: u32 = if start == 0 {
0
} else {
*cumulative.get(Height::from(start - 1)).unwrap_or_default()
};
let block_count = count_at_end.saturating_sub(count_at_start);
// Only include pools that mined at least one block in the period
if block_count > 0 {
pool_data.push((pools.get(*pool_id), block_count));
}
}
// Sort by block count descending
pool_data.sort_by(|a, b| b.1.cmp(&a.1));
let total_blocks: u32 = pool_data.iter().map(|(_, count)| count).sum();
// Build stats with ranks
let pool_stats: Vec<PoolStats> = pool_data
.into_iter()
.enumerate()
.map(|(idx, (pool, block_count))| {
let share = if total_blocks > 0 {
block_count as f64 / total_blocks as f64
} else {
0.0
};
PoolStats::new(pool, block_count, (idx + 1) as u32, share)
})
.collect();
// TODO: Calculate actual hashrate from difficulty
let last_estimated_hashrate = 0u128;
Ok(PoolsSummary {
pools: pool_stats,
block_count: total_blocks,
last_estimated_hashrate,
})
}
pub fn all_pools(&self) -> Vec<PoolInfo> {
pools().iter().map(PoolInfo::from).collect()
}
pub fn pool_detail(&self, slug: PoolSlug) -> Result<PoolDetail> {
let computer = self.computer();
let current_height = self.height();
let end = current_height.to_usize();
let pools_list = pools();
let pool = pools_list.get(slug);
// Get pool vecs for this specific pool
let pool_vecs = computer
.pools
.vecs
.get(&slug)
.ok_or_else(|| Error::Str("Pool data not found"))?;
let mut cumulative = pool_vecs
.indexes_to_blocks_mined
.height_extra
.unwrap_cumulative()
.iter();
// Get total blocks (all time)
let total_all: u32 = *cumulative.get(current_height).unwrap_or_default();
// Get blocks for 24h (144 blocks)
let start_24h = end.saturating_sub(144);
let count_before_24h: u32 = if start_24h == 0 {
0
} else {
*cumulative
.get(Height::from(start_24h - 1))
.unwrap_or_default()
};
let total_24h = total_all.saturating_sub(count_before_24h);
// Get blocks for 1w (1008 blocks)
let start_1w = end.saturating_sub(1008);
let count_before_1w: u32 = if start_1w == 0 {
0
} else {
*cumulative
.get(Height::from(start_1w - 1))
.unwrap_or_default()
};
let total_1w = total_all.saturating_sub(count_before_1w);
// Calculate total network blocks for share calculation
let network_blocks_all = (end + 1) as u32;
let network_blocks_24h = (end - start_24h + 1) as u32;
let network_blocks_1w = (end - start_1w + 1) as u32;
let share_all = if network_blocks_all > 0 {
total_all as f64 / network_blocks_all as f64
} else {
0.0
};
let share_24h = if network_blocks_24h > 0 {
total_24h as f64 / network_blocks_24h as f64
} else {
0.0
};
let share_1w = if network_blocks_1w > 0 {
total_1w as f64 / network_blocks_1w as f64
} else {
0.0
};
Ok(PoolDetail {
pool: PoolDetailInfo::from(pool),
block_count: PoolBlockCounts {
all: total_all,
day: total_24h,
week: total_1w,
},
block_share: PoolBlockShares {
all: share_all,
day: share_24h,
week: share_1w,
},
estimated_hashrate: 0, // TODO: Calculate from share and network hashrate
reported_hashrate: None,
})
}
}

View File

@@ -0,0 +1,66 @@
use brk_error::Result;
use brk_types::{Height, RewardStats, Sats};
use vecdb::{IterableVec, VecIndex};
use crate::Query;
impl Query {
pub fn reward_stats(&self, block_count: usize) -> Result<RewardStats> {
let computer = self.computer();
let current_height = self.height();
let end_block = current_height;
let start_block = Height::from(current_height.to_usize().saturating_sub(block_count - 1));
let mut coinbase_iter = computer
.chain
.indexes_to_coinbase
.sats
.height
.as_ref()
.unwrap()
.iter();
let mut fee_iter = computer
.chain
.indexes_to_fee
.sats
.height
.unwrap_sum()
.iter();
let mut tx_count_iter = computer
.chain
.indexes_to_tx_count
.height
.as_ref()
.unwrap()
.iter();
let mut total_reward = Sats::ZERO;
let mut total_fee = Sats::ZERO;
let mut total_tx: u64 = 0;
for height in start_block.to_usize()..=end_block.to_usize() {
let h = Height::from(height);
if let Some(coinbase) = coinbase_iter.get(h) {
total_reward += coinbase;
}
if let Some(fee) = fee_iter.get(h) {
total_fee += fee;
}
if let Some(tx_count) = tx_count_iter.get(h) {
total_tx += *tx_count;
}
}
Ok(RewardStats {
start_block,
end_block,
total_reward,
total_fee,
total_tx,
})
}
}

View File

@@ -0,0 +1,11 @@
//! Query implementation modules.
//!
//! Each module extends `Query` with domain-specific methods using `impl Query` blocks.
mod address;
mod block;
mod mempool;
mod mining;
mod transaction;
pub use block::BLOCK_TXS_PAGE_SIZE;

View File

@@ -0,0 +1,405 @@
use std::{io::Cursor, str::FromStr};
use bitcoin::{consensus::Decodable, hex::DisplayHex};
use brk_error::{Error, Result};
use brk_types::{
Sats, Transaction, TxIn, TxInIndex, TxIndex, TxOut, TxOutspend, TxStatus, Txid, TxidPath,
TxidPrefix, Vin, Vout, Weight,
};
use vecdb::{GenericStoredVec, TypedVecIterator};
use crate::Query;
impl Query {
pub fn transaction(&self, TxidPath { txid }: TxidPath) -> Result<Transaction> {
let Ok(txid) = bitcoin::Txid::from_str(&txid) else {
return Err(Error::InvalidTxid);
};
let txid = Txid::from(txid);
// First check mempool for unconfirmed transactions
if let Some(mempool) = self.mempool()
&& let Some(tx_with_hex) = mempool.get_txs().get(&txid)
{
return Ok(tx_with_hex.tx().clone());
}
// Look up confirmed transaction by txid prefix
let prefix = TxidPrefix::from(&txid);
let indexer = self.indexer();
let Ok(Some(txindex)) = indexer
.stores
.txidprefix_to_txindex
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
self.transaction_by_index(txindex)
}
pub fn transaction_status(&self, TxidPath { txid }: TxidPath) -> Result<TxStatus> {
let Ok(txid) = bitcoin::Txid::from_str(&txid) else {
return Err(Error::InvalidTxid);
};
let txid = Txid::from(txid);
// First check mempool for unconfirmed transactions
if let Some(mempool) = self.mempool()
&& mempool.get_txs().contains_key(&txid)
{
return Ok(TxStatus::UNCONFIRMED);
}
// Look up confirmed transaction by txid prefix
let prefix = TxidPrefix::from(&txid);
let indexer = self.indexer();
let Ok(Some(txindex)) = indexer
.stores
.txidprefix_to_txindex
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
// Get block info for status
let height = indexer.vecs.tx.txindex_to_height.read_once(txindex)?;
let block_hash = indexer.vecs.block.height_to_blockhash.read_once(height)?;
let block_time = indexer.vecs.block.height_to_timestamp.read_once(height)?;
Ok(TxStatus {
confirmed: true,
block_height: Some(height),
block_hash: Some(block_hash),
block_time: Some(block_time),
})
}
pub fn transaction_hex(&self, TxidPath { txid }: TxidPath) -> Result<String> {
let Ok(txid) = bitcoin::Txid::from_str(&txid) else {
return Err(Error::InvalidTxid);
};
let txid = Txid::from(txid);
// First check mempool for unconfirmed transactions
if let Some(mempool) = self.mempool()
&& let Some(tx_with_hex) = mempool.get_txs().get(&txid)
{
return Ok(tx_with_hex.hex().to_string());
}
// Look up confirmed transaction by txid prefix
let prefix = TxidPrefix::from(&txid);
let indexer = self.indexer();
let Ok(Some(txindex)) = indexer
.stores
.txidprefix_to_txindex
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
self.transaction_hex_by_index(txindex)
}
pub fn outspend(&self, TxidPath { txid }: TxidPath, vout: Vout) -> Result<TxOutspend> {
let Ok(txid) = bitcoin::Txid::from_str(&txid) else {
return Err(Error::InvalidTxid);
};
let txid = Txid::from(txid);
// Mempool outputs are unspent in on-chain terms
if let Some(mempool) = self.mempool()
&& mempool.get_txs().contains_key(&txid)
{
return Ok(TxOutspend::UNSPENT);
}
// Look up confirmed transaction
let prefix = TxidPrefix::from(&txid);
let indexer = self.indexer();
let Ok(Some(txindex)) = indexer
.stores
.txidprefix_to_txindex
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
// Calculate txoutindex
let first_txoutindex = indexer
.vecs
.tx
.txindex_to_first_txoutindex
.read_once(txindex)?;
let txoutindex = first_txoutindex + vout;
// Look up spend status
let computer = self.computer();
let txinindex = computer
.stateful
.txoutindex_to_txinindex
.read_once(txoutindex)?;
if txinindex == TxInIndex::UNSPENT {
return Ok(TxOutspend::UNSPENT);
}
self.outspend_details(txinindex)
}
pub fn outspends(&self, TxidPath { txid }: TxidPath) -> Result<Vec<TxOutspend>> {
let Ok(txid) = bitcoin::Txid::from_str(&txid) else {
return Err(Error::InvalidTxid);
};
let txid = Txid::from(txid);
// Mempool outputs are unspent in on-chain terms
if let Some(mempool) = self.mempool()
&& let Some(tx_with_hex) = mempool.get_txs().get(&txid)
{
let output_count = tx_with_hex.tx().output.len();
return Ok(vec![TxOutspend::UNSPENT; output_count]);
}
// Look up confirmed transaction
let prefix = TxidPrefix::from(&txid);
let indexer = self.indexer();
let Ok(Some(txindex)) = indexer
.stores
.txidprefix_to_txindex
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
// Get output range
let first_txoutindex = indexer
.vecs
.tx
.txindex_to_first_txoutindex
.read_once(txindex)?;
let next_first_txoutindex = indexer
.vecs
.tx
.txindex_to_first_txoutindex
.read_once(txindex.incremented())?;
let output_count = usize::from(next_first_txoutindex) - usize::from(first_txoutindex);
// Get spend status for each output
let computer = self.computer();
let mut txoutindex_to_txinindex_iter = computer.stateful.txoutindex_to_txinindex.iter()?;
let mut outspends = Vec::with_capacity(output_count);
for i in 0..output_count {
let txoutindex = first_txoutindex + Vout::from(i);
let txinindex = txoutindex_to_txinindex_iter.get_unwrap(txoutindex);
if txinindex == TxInIndex::UNSPENT {
outspends.push(TxOutspend::UNSPENT);
} else {
outspends.push(self.outspend_details(txinindex)?);
}
}
Ok(outspends)
}
// === Helper methods ===
pub fn transaction_by_index(&self, txindex: TxIndex) -> Result<Transaction> {
let indexer = self.indexer();
let reader = self.reader();
let computer = self.computer();
// Get tx metadata using read_once for single lookups
let txid = indexer.vecs.tx.txindex_to_txid.read_once(txindex)?;
let height = indexer.vecs.tx.txindex_to_height.read_once(txindex)?;
let version = indexer.vecs.tx.txindex_to_txversion.read_once(txindex)?;
let lock_time = indexer.vecs.tx.txindex_to_rawlocktime.read_once(txindex)?;
let total_size = indexer.vecs.tx.txindex_to_total_size.read_once(txindex)?;
let first_txinindex = indexer
.vecs
.tx
.txindex_to_first_txinindex
.read_once(txindex)?;
let position = computer.blks.txindex_to_position.read_once(txindex)?;
// Get block info for status
let block_hash = indexer.vecs.block.height_to_blockhash.read_once(height)?;
let block_time = indexer.vecs.block.height_to_timestamp.read_once(height)?;
// Read and decode the raw transaction from blk file
let buffer = reader.read_raw_bytes(position, *total_size as usize)?;
let mut cursor = Cursor::new(buffer);
let tx = bitcoin::Transaction::consensus_decode(&mut cursor)
.map_err(|_| Error::Str("Failed to decode transaction"))?;
// For iterating through inputs, we need iterators (multiple lookups)
let mut txindex_to_txid_iter = indexer.vecs.tx.txindex_to_txid.iter()?;
let mut txindex_to_first_txoutindex_iter =
indexer.vecs.tx.txindex_to_first_txoutindex.iter()?;
let mut txinindex_to_outpoint_iter = indexer.vecs.txin.txinindex_to_outpoint.iter()?;
let mut txoutindex_to_value_iter = indexer.vecs.txout.txoutindex_to_value.iter()?;
// Build inputs with prevout information
let input: Vec<TxIn> = tx
.input
.iter()
.enumerate()
.map(|(i, txin)| {
let txinindex = first_txinindex + i;
let outpoint = txinindex_to_outpoint_iter.get_unwrap(txinindex);
let is_coinbase = outpoint.is_coinbase();
// Get prevout info if not coinbase
let (prev_txid, prev_vout, prevout) = if is_coinbase {
(Txid::COINBASE, Vout::MAX, None)
} else {
let prev_txindex = outpoint.txindex();
let prev_vout = outpoint.vout();
let prev_txid = txindex_to_txid_iter.get_unwrap(prev_txindex);
// Calculate the txoutindex for the prevout
let prev_first_txoutindex =
txindex_to_first_txoutindex_iter.get_unwrap(prev_txindex);
let prev_txoutindex = prev_first_txoutindex + prev_vout;
// Get the value of the prevout
let prev_value = txoutindex_to_value_iter.get_unwrap(prev_txoutindex);
let prevout = Some(TxOut::from((
bitcoin::ScriptBuf::new(), // Placeholder - would need to reconstruct
prev_value,
)));
(prev_txid, prev_vout, prevout)
};
TxIn {
txid: prev_txid,
vout: prev_vout,
prevout,
script_sig: txin.script_sig.clone(),
script_sig_asm: (),
is_coinbase,
sequence: txin.sequence.0,
inner_redeem_script_asm: (),
}
})
.collect();
// Calculate weight before consuming tx.output
let weight = Weight::from(tx.weight());
// Calculate sigop cost
let total_sigop_cost = tx.total_sigop_cost(|_| None);
// Build outputs
let output: Vec<TxOut> = tx.output.into_iter().map(TxOut::from).collect();
// Build status
let status = TxStatus {
confirmed: true,
block_height: Some(height),
block_hash: Some(block_hash),
block_time: Some(block_time),
};
let mut transaction = Transaction {
index: Some(txindex),
txid,
version,
lock_time,
total_size: *total_size as usize,
weight,
total_sigop_cost,
fee: Sats::ZERO, // Will be computed below
input,
output,
status,
};
// Compute fee from inputs - outputs
transaction.compute_fee();
Ok(transaction)
}
fn transaction_hex_by_index(&self, txindex: TxIndex) -> Result<String> {
let indexer = self.indexer();
let reader = self.reader();
let computer = self.computer();
let total_size = indexer.vecs.tx.txindex_to_total_size.read_once(txindex)?;
let position = computer.blks.txindex_to_position.read_once(txindex)?;
let buffer = reader.read_raw_bytes(position, *total_size as usize)?;
Ok(buffer.to_lower_hex_string())
}
fn outspend_details(&self, txinindex: TxInIndex) -> Result<TxOutspend> {
let indexer = self.indexer();
// Look up spending txindex directly
let spending_txindex = indexer
.vecs
.txin
.txinindex_to_txindex
.read_once(txinindex)?;
// Calculate vin
let spending_first_txinindex = indexer
.vecs
.tx
.txindex_to_first_txinindex
.read_once(spending_txindex)?;
let vin = Vin::from(usize::from(txinindex) - usize::from(spending_first_txinindex));
// Get spending tx details
let spending_txid = indexer
.vecs
.tx
.txindex_to_txid
.read_once(spending_txindex)?;
let spending_height = indexer
.vecs
.tx
.txindex_to_height
.read_once(spending_txindex)?;
let block_hash = indexer
.vecs
.block
.height_to_blockhash
.read_once(spending_height)?;
let block_time = indexer
.vecs
.block
.height_to_timestamp
.read_once(spending_height)?;
Ok(TxOutspend {
spent: true,
txid: Some(spending_txid),
vin: Some(vin),
status: Some(TxStatus {
confirmed: true,
block_height: Some(spending_height),
block_hash: Some(block_hash),
block_time: Some(block_time),
}),
})
}
}

View File

@@ -9,44 +9,30 @@ use brk_indexer::Indexer;
use brk_mempool::Mempool;
use brk_reader::Reader;
use brk_traversable::TreeNode;
use brk_types::{
Address, AddressStats, BlockInfo, BlockStatus, BlockTimestamp, Format, HashrateSummary,
Height, Index, IndexInfo, Limit, MempoolInfo, Metric, MetricCount, PoolDetail, PoolInfo,
PoolSlug, PoolsSummary, RecommendedFees, TimePeriod, Timestamp, Transaction, TxOutspend,
TxStatus, Txid, TxidPath, Utxo, Vout,
};
use brk_types::{Format, Height, Index, IndexInfo, Limit, Metric, MetricCount};
use vecdb::{AnyExportableVec, AnyStoredVec};
// Infrastructure modules
#[cfg(feature = "tokio")]
mod r#async;
mod chain;
mod deser;
mod output;
mod pagination;
mod params;
mod vecs;
// Query impl blocks (extend Query with domain methods)
mod r#impl;
// Re-exports
#[cfg(feature = "tokio")]
pub use r#async::*;
pub use output::{Output, Value};
pub use pagination::{PaginatedIndexParam, PaginatedMetrics, PaginationParam};
pub use params::{Params, ParamsDeprec, ParamsOpt};
use vecs::Vecs;
pub use crate::chain::BLOCK_TXS_PAGE_SIZE;
pub use crate::chain::validate_address;
use crate::{
chain::{
get_address, get_address_mempool_txids, get_address_txids, get_address_utxos,
get_all_pools, get_block_by_height, get_block_by_timestamp, get_block_raw,
get_block_status_by_height, get_block_txid_at_index, get_block_txids, get_block_txs,
get_blocks, get_difficulty_adjustment, get_hashrate, get_height_by_hash,
get_mempool_blocks, get_mempool_info, get_mempool_txids, get_mining_pools, get_pool_detail,
get_recommended_fees, get_transaction, get_transaction_hex, get_transaction_status,
get_tx_outspend, get_tx_outspends,
},
vecs::{IndexToVec, MetricToVec},
pub use brk_types::{
DataRange, DataRangeFormat, MetricSelection, MetricSelectionLegacy, PaginatedMetrics,
Pagination, PaginationIndex,
};
pub use r#impl::BLOCK_TXS_PAGE_SIZE;
pub use output::{Output, Value};
use crate::vecs::{IndexToVec, MetricToVec};
use vecs::Vecs;
#[derive(Clone)]
pub struct Query(Arc<QueryInner<'static>>);
@@ -79,217 +65,17 @@ impl Query {
}))
}
pub fn get_height(&self) -> Height {
/// Current indexed height
pub fn height(&self) -> Height {
Height::from(self.indexer().vecs.block.height_to_blockhash.stamp())
}
pub fn get_address(&self, address: Address) -> Result<AddressStats> {
get_address(address, self)
}
pub fn get_address_txids(
&self,
address: Address,
after_txid: Option<Txid>,
limit: usize,
) -> Result<Vec<Txid>> {
get_address_txids(address, after_txid, limit, self)
}
pub fn get_address_utxos(&self, address: Address) -> Result<Vec<Utxo>> {
get_address_utxos(address, self)
}
pub fn get_address_mempool_txids(&self, address: Address) -> Result<Vec<Txid>> {
get_address_mempool_txids(address, self)
}
pub fn get_transaction(&self, txid: TxidPath) -> Result<Transaction> {
get_transaction(txid, self)
}
pub fn get_transaction_status(&self, txid: TxidPath) -> Result<TxStatus> {
get_transaction_status(txid, self)
}
pub fn get_transaction_hex(&self, txid: TxidPath) -> Result<String> {
get_transaction_hex(txid, self)
}
pub fn get_tx_outspend(&self, txid: TxidPath, vout: Vout) -> Result<TxOutspend> {
get_tx_outspend(txid, vout, self)
}
pub fn get_tx_outspends(&self, txid: TxidPath) -> Result<Vec<TxOutspend>> {
get_tx_outspends(txid, self)
}
pub fn get_block(&self, hash: &str) -> Result<BlockInfo> {
let height = get_height_by_hash(hash, self)?;
get_block_by_height(height, self)
}
pub fn get_block_by_height(&self, height: Height) -> Result<BlockInfo> {
get_block_by_height(height, self)
}
pub fn get_block_by_timestamp(&self, timestamp: Timestamp) -> Result<BlockTimestamp> {
get_block_by_timestamp(timestamp, self)
}
pub fn get_block_status(&self, hash: &str) -> Result<BlockStatus> {
let height = get_height_by_hash(hash, self)?;
get_block_status_by_height(height, self)
}
pub fn get_blocks(&self, start_height: Option<Height>) -> Result<Vec<BlockInfo>> {
get_blocks(start_height, self)
}
pub fn get_block_txids(&self, hash: &str) -> Result<Vec<Txid>> {
let height = get_height_by_hash(hash, self)?;
get_block_txids(height, self)
}
pub fn get_block_txs(&self, hash: &str, start_index: usize) -> Result<Vec<Transaction>> {
let height = get_height_by_hash(hash, self)?;
get_block_txs(height, start_index, self)
}
pub fn get_block_txid_at_index(&self, hash: &str, index: usize) -> Result<Txid> {
let height = get_height_by_hash(hash, self)?;
get_block_txid_at_index(height, index, self)
}
pub fn get_block_raw(&self, hash: &str) -> Result<Vec<u8>> {
let height = get_height_by_hash(hash, self)?;
get_block_raw(height, self)
}
pub fn get_mempool_info(&self) -> Result<MempoolInfo> {
get_mempool_info(self)
}
pub fn get_mempool_txids(&self) -> Result<Vec<Txid>> {
get_mempool_txids(self)
}
pub fn get_recommended_fees(&self) -> Result<RecommendedFees> {
get_recommended_fees(self)
}
pub fn get_mempool_blocks(&self) -> Result<Vec<brk_types::MempoolBlock>> {
get_mempool_blocks(self)
}
pub fn get_difficulty_adjustment(&self) -> Result<brk_types::DifficultyAdjustment> {
get_difficulty_adjustment(self)
}
pub fn get_mining_pools(&self, time_period: TimePeriod) -> Result<PoolsSummary> {
get_mining_pools(time_period, self)
}
pub fn get_all_pools(&self) -> Vec<PoolInfo> {
get_all_pools()
}
pub fn get_pool_detail(&self, slug: PoolSlug) -> Result<PoolDetail> {
get_pool_detail(slug, self)
}
pub fn get_hashrate(&self, time_period: Option<TimePeriod>) -> Result<HashrateSummary> {
get_hashrate(time_period, self)
}
pub fn get_difficulty_adjustments(
&self,
time_period: Option<TimePeriod>,
) -> Result<Vec<brk_types::DifficultyAdjustmentEntry>> {
chain::get_difficulty_adjustments(time_period, self)
}
pub fn get_block_fees(&self, time_period: TimePeriod) -> Result<Vec<brk_types::BlockFeesEntry>> {
chain::get_block_fees(time_period, self)
}
pub fn get_block_rewards(
&self,
time_period: TimePeriod,
) -> Result<Vec<brk_types::BlockRewardsEntry>> {
chain::get_block_rewards(time_period, self)
}
pub fn get_block_fee_rates(
&self,
time_period: TimePeriod,
) -> Result<Vec<brk_types::BlockFeeRatesEntry>> {
chain::get_block_fee_rates(time_period, self)
}
pub fn get_block_sizes_weights(
&self,
time_period: TimePeriod,
) -> Result<brk_types::BlockSizesWeights> {
chain::get_block_sizes_weights(time_period, self)
}
pub fn get_reward_stats(&self, block_count: usize) -> Result<brk_types::RewardStats> {
chain::get_reward_stats(block_count, self)
}
// === Metrics methods ===
pub fn match_metric(&self, metric: &Metric, limit: Limit) -> Vec<&'static str> {
self.vecs().matches(metric, limit)
}
pub fn search_metric_with_index(
&self,
metric: &str,
index: Index,
// params: &Params,
) -> Result<Vec<(String, &&dyn AnyExportableVec)>> {
todo!();
// let all_metrics = &self.vecs.metrics;
// let metrics = &params.metrics;
// let index = params.index;
// let ids_to_vec = self
// .vecs
// .index_to_metric_to_vec
// .get(&index)
// .ok_or(Error::String(format!(
// "Index \"{}\" isn't a valid index",
// index
// )))?;
// metrics
// .iter()
// .map(|metric| {
// let vec = ids_to_vec.get(metric.as_str()).ok_or_else(|| {
// let matches: Vec<&str> = MATCHER.with(|matcher| {
// let matcher = matcher.borrow();
// let mut scored: Vec<(&str, i64)> = all_metrics
// .iter()
// .filter_map(|m| matcher.fuzzy_match(m, metric).map(|s| (*m, s)))
// .collect();
// scored.sort_unstable_by_key(|&(_, s)| std::cmp::Reverse(s));
// scored.into_iter().take(5).map(|(m, _)| m).collect()
// });
// let mut message = format!("No vec \"{metric}\" for index \"{index}\".\n");
// if !matches.is_empty() {
// message += &format!("\nDid you mean: {matches:?}\n");
// }
// Error::String(message)
// });
// vec.map(|vec| (metric.clone(), vec))
// })
// .collect::<Result<Vec<_>>>()
}
fn columns_to_csv(
columns: &[&&dyn AnyExportableVec],
from: Option<i64>,
@@ -336,7 +122,7 @@ impl Query {
pub fn format(
&self,
metrics: Vec<&&dyn AnyExportableVec>,
params: &ParamsOpt,
params: &DataRangeFormat,
) -> Result<Output> {
let from = params.from().map(|from| {
metrics
@@ -381,9 +167,50 @@ impl Query {
})
}
pub fn search_and_format(&self, params: Params) -> Result<Output> {
todo!()
// self.format(self.search(&params)?, &params.rest)
/// Search for vecs matching the given metrics and index
pub fn search(&self, params: &MetricSelection) -> Vec<&'static dyn AnyExportableVec> {
params
.metrics
.iter()
.filter_map(|metric| self.vecs().get(metric, params.index))
.collect()
}
/// Calculate total weight of the vecs for the given range
pub fn weight(vecs: &[&dyn AnyExportableVec], from: Option<i64>, to: Option<i64>) -> usize {
vecs.iter().map(|v| v.range_weight(from, to)).sum()
}
pub fn search_and_format(&self, params: MetricSelection) -> Result<Output> {
let vecs = self.search(&params);
if vecs.is_empty() {
return Ok(Output::default(params.range.format()));
}
self.format(vecs.iter().collect(), &params.range)
}
/// Search and format with weight limit (for DDoS prevention)
pub fn search_and_format_checked(
&self,
params: MetricSelection,
max_weight: usize,
) -> Result<Output> {
let vecs = self.search(&params);
if vecs.is_empty() {
return Ok(Output::default(params.range.format()));
}
let weight = Self::weight(&vecs, params.from(), params.to());
if weight > max_weight {
return Err(Error::String(format!(
"Request too heavy: {weight} bytes exceeds limit of {max_weight} bytes"
)));
}
self.format(vecs.iter().collect(), &params.range)
}
pub fn metric_to_index_to_vec(&self) -> &BTreeMap<&str, IndexToVec<'_>> {
@@ -413,7 +240,7 @@ impl Query {
&self.vecs().indexes
}
pub fn get_metrics(&self, pagination: PaginationParam) -> PaginatedMetrics {
pub fn get_metrics(&self, pagination: Pagination) -> PaginatedMetrics {
self.vecs().metrics(pagination)
}
@@ -421,7 +248,7 @@ impl Query {
self.vecs().catalog()
}
pub fn get_index_to_vecids(&self, paginated_index: PaginatedIndexParam) -> Vec<&str> {
pub fn get_index_to_vecids(&self, paginated_index: PaginationIndex) -> Option<&[&str]> {
self.vecs().index_to_ids(paginated_index)
}
@@ -429,6 +256,8 @@ impl Query {
self.vecs().metric_to_indexes(metric)
}
// === Core accessors ===
#[inline]
pub fn reader(&self) -> &Reader {
&self.0.reader

View File

@@ -1,45 +0,0 @@
use brk_types::Index;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::deser::de_unquote_usize;
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
pub struct PaginationParam {
#[schemars(description = "Pagination index")]
#[serde(default, alias = "p", deserialize_with = "de_unquote_usize")]
pub page: Option<usize>,
}
impl PaginationParam {
pub const PER_PAGE: usize = 1_000;
pub fn start(&self, len: usize) -> usize {
(self.page.unwrap_or_default() * Self::PER_PAGE).clamp(0, len)
}
pub fn end(&self, len: usize) -> usize {
((self.page.unwrap_or_default() + 1) * Self::PER_PAGE).clamp(0, len)
}
}
#[derive(Debug, Deserialize, JsonSchema)]
pub struct PaginatedIndexParam {
pub index: Index,
#[serde(flatten)]
pub pagination: PaginationParam,
}
/// A paginated list of available metric names (1000 per page)
#[derive(Debug, Serialize, JsonSchema)]
pub struct PaginatedMetrics {
/// Current page number (0-indexed)
#[schemars(example = 0)]
pub current_page: usize,
/// Maximum valid page index (0-indexed)
#[schemars(example = 21000)]
pub max_page: usize,
/// List of metric names (max 1000 per page)
#[schemars(example = ["price_open", "price_close", "realized_price", "..."])]
pub metrics: &'static [&'static str],
}

View File

@@ -1,130 +0,0 @@
use std::ops::Deref;
use brk_types::{Format, Index, Metric, Metrics};
use schemars::JsonSchema;
use serde::Deserialize;
use crate::deser::{de_unquote_i64, de_unquote_usize};
#[derive(Debug, Deserialize, JsonSchema)]
pub struct Params {
/// Requested metrics
#[serde(alias = "m")]
pub metrics: Metrics,
#[serde(alias = "i")]
pub index: Index,
#[serde(flatten)]
pub rest: ParamsOpt,
}
impl Deref for Params {
type Target = ParamsOpt;
fn deref(&self) -> &Self::Target {
&self.rest
}
}
impl From<(Index, Metric, ParamsOpt)> for Params {
#[inline]
fn from((index, metric, rest): (Index, Metric, ParamsOpt)) -> Self {
Self {
index,
metrics: Metrics::from(metric),
rest,
}
}
}
impl From<(Index, Metrics, ParamsOpt)> for Params {
#[inline]
fn from((index, metrics, rest): (Index, Metrics, ParamsOpt)) -> Self {
Self {
index,
metrics,
rest,
}
}
}
#[derive(Default, Debug, Deserialize, JsonSchema)]
pub struct ParamsOpt {
/// Inclusive starting index, if negative will be from the end
#[serde(default, alias = "f", deserialize_with = "de_unquote_i64")]
from: Option<i64>,
/// Exclusive ending index, if negative will be from the end, overrides 'count'
#[serde(default, alias = "t", deserialize_with = "de_unquote_i64")]
to: Option<i64>,
/// Number of values requested
#[serde(default, alias = "c", deserialize_with = "de_unquote_usize")]
count: Option<usize>,
/// Format of the output
#[serde(default)]
format: Format,
}
impl ParamsOpt {
pub fn set_from(mut self, from: i64) -> Self {
self.from.replace(from);
self
}
pub fn set_to(mut self, to: i64) -> Self {
self.to.replace(to);
self
}
pub fn set_count(mut self, count: usize) -> Self {
self.count.replace(count);
self
}
pub fn from(&self) -> Option<i64> {
self.from
}
pub fn to(&self) -> Option<i64> {
if self.to.is_none()
&& let Some(c) = self.count
{
let c = c as i64;
if let Some(f) = self.from {
if f >= 0 || f.abs() > c {
return Some(f + c);
}
} else {
return Some(c);
}
}
self.to
}
pub fn format(&self) -> Format {
self.format
}
}
#[derive(Debug, Deserialize)]
pub struct ParamsDeprec {
#[serde(alias = "i")]
pub index: Index,
#[serde(alias = "v")]
pub ids: Metrics,
#[serde(flatten)]
pub rest: ParamsOpt,
}
impl From<ParamsDeprec> for Params {
#[inline]
fn from(value: ParamsDeprec) -> Self {
Params {
index: value.index,
metrics: value.ids,
rest: value.rest,
}
}
}

View File

@@ -3,13 +3,11 @@ use std::collections::BTreeMap;
use brk_computer::Computer;
use brk_indexer::Indexer;
use brk_traversable::{Traversable, TreeNode};
use brk_types::{Index, IndexInfo, Limit, Metric};
use brk_types::{Index, IndexInfo, Limit, Metric, PaginatedMetrics, Pagination, PaginationIndex};
use derive_deref::{Deref, DerefMut};
use quickmatch::{QuickMatch, QuickMatchConfig};
use vecdb::AnyExportableVec;
use crate::pagination::{PaginatedIndexParam, PaginatedMetrics, PaginationParam};
#[derive(Default)]
pub struct Vecs<'a> {
pub metric_to_index_to_vec: BTreeMap<&'a str, IndexToVec<'a>>,
@@ -18,7 +16,6 @@ pub struct Vecs<'a> {
pub indexes: Vec<IndexInfo>,
pub distinct_metric_count: usize,
pub total_metric_count: usize,
pub longest_metric_len: usize,
catalog: Option<TreeNode>,
matcher: Option<QuickMatch<'a>>,
metric_to_indexes: BTreeMap<&'a str, Vec<Index>>,
@@ -58,12 +55,6 @@ impl<'a> Vecs<'a> {
sort_ids(&mut ids);
this.metrics = ids;
this.longest_metric_len = this
.metrics
.iter()
.map(|s| s.len())
.max()
.unwrap_or_default();
this.distinct_metric_count = this.metric_to_index_to_vec.keys().count();
this.total_metric_count = this
.index_to_metric_to_vec
@@ -107,44 +98,35 @@ impl<'a> Vecs<'a> {
this
}
// Not the most performant or type safe but only built once so that's okay
fn insert(&mut self, vec: &'a dyn AnyExportableVec) {
let name = vec.name();
// dbg!(vec.region_name());
let serialized_index = vec.index_type_to_string();
let index = Index::try_from(serialized_index)
.inspect_err(|_| {
dbg!(&serialized_index);
})
.unwrap();
.unwrap_or_else(|_| panic!("Unknown index type: {serialized_index}"));
let prev = self
.metric_to_index_to_vec
.entry(name)
.or_default()
.insert(index, vec);
if prev.is_some() {
dbg!(serialized_index, name);
panic!()
}
assert!(prev.is_none(), "Duplicate metric: {name} for index {index:?}");
let prev = self
.index_to_metric_to_vec
.entry(index)
.or_default()
.insert(name, vec);
if prev.is_some() {
dbg!(serialized_index, name);
panic!()
}
assert!(prev.is_none(), "Duplicate metric: {name} for index {index:?}");
}
pub fn metrics(&'static self, pagination: PaginationParam) -> PaginatedMetrics {
pub fn metrics(&'static self, pagination: Pagination) -> PaginatedMetrics {
let len = self.metrics.len();
let start = pagination.start(len);
let end = pagination.end(len);
PaginatedMetrics {
current_page: pagination.page.unwrap_or_default(),
max_page: len.div_ceil(PaginationParam::PER_PAGE).saturating_sub(1),
current_page: pagination.page(),
max_page: len.div_ceil(Pagination::PER_PAGE).saturating_sub(1),
metrics: &self.metrics[start..end],
}
}
@@ -156,28 +138,34 @@ impl<'a> Vecs<'a> {
pub fn index_to_ids(
&self,
PaginatedIndexParam { index, pagination }: PaginatedIndexParam,
) -> Vec<&'a str> {
let vec = self.index_to_metrics.get(&index).unwrap();
PaginationIndex { index, pagination }: PaginationIndex,
) -> Option<&[&'a str]> {
let vec = self.index_to_metrics.get(&index)?;
let len = vec.len();
let start = pagination.start(len);
let end = pagination.end(len);
vec.iter().skip(start).take(end).cloned().collect()
Some(&vec[start..end])
}
pub fn catalog(&self) -> &TreeNode {
self.catalog.as_ref().unwrap()
self.catalog.as_ref().expect("catalog not initialized")
}
pub fn matches(&self, metric: &Metric, limit: Limit) -> Vec<&'_ str> {
self.matcher()
self.matcher
.as_ref()
.expect("matcher not initialized")
.matches_with(metric, &QuickMatchConfig::new().with_limit(*limit))
}
fn matcher(&self) -> &QuickMatch<'_> {
self.matcher.as_ref().unwrap()
/// Look up a vec by metric name and index
pub fn get(&self, metric: &Metric, index: Index) -> Option<&'a dyn AnyExportableVec> {
let metric_name = metric.replace("-", "_");
self.metric_to_index_to_vec
.get(metric_name.as_str())
.and_then(|index_to_vec| index_to_vec.get(&index).copied())
}
}