query + server: more endpoints/methods/helpers

This commit is contained in:
nym21
2025-12-14 21:12:10 +01:00
parent b491b1f41f
commit 882a3525af
52 changed files with 1757 additions and 99 deletions

View File

@@ -6,8 +6,9 @@ use brk_indexer::Indexer;
use brk_mempool::Mempool;
use brk_reader::Reader;
use brk_types::{
Address, AddressStats, BlockInfo, BlockStatus, BlockTimestamp, DifficultyAdjustment, Height,
Index, IndexInfo, Limit, MempoolBlock, MempoolInfo, Metric, MetricCount, RecommendedFees,
Address, AddressStats, BlockInfo, BlockStatus, BlockTimestamp, DifficultyAdjustment,
HashrateSummary, Height, Index, IndexInfo, Limit, MempoolBlock, MempoolInfo, Metric,
MetricCount, PoolDetail, PoolInfo, PoolSlug, PoolsSummary, RecommendedFees, TimePeriod,
Timestamp, Transaction, TreeNode, TxOutspend, TxStatus, Txid, TxidPath, Utxo, Vout,
};
use tokio::task::spawn_blocking;
@@ -117,7 +118,11 @@ impl AsyncQuery {
spawn_blocking(move || query.get_block_txids(&hash)).await?
}
pub async fn get_block_txs(&self, hash: String, start_index: usize) -> Result<Vec<Transaction>> {
pub async fn get_block_txs(
&self,
hash: String,
start_index: usize,
) -> Result<Vec<Transaction>> {
let query = self.0.clone();
spawn_blocking(move || query.get_block_txs(&hash, start_index)).await?
}
@@ -152,6 +157,70 @@ impl AsyncQuery {
spawn_blocking(move || query.get_difficulty_adjustment()).await?
}
pub async fn get_mining_pools(&self, time_period: TimePeriod) -> Result<PoolsSummary> {
let query = self.0.clone();
spawn_blocking(move || query.get_mining_pools(time_period)).await?
}
pub async fn get_all_pools(&self) -> Result<Vec<PoolInfo>> {
Ok(self.0.get_all_pools())
}
pub async fn get_pool_detail(&self, slug: PoolSlug) -> Result<PoolDetail> {
let query = self.0.clone();
spawn_blocking(move || query.get_pool_detail(slug)).await?
}
pub async fn get_hashrate(&self, time_period: Option<TimePeriod>) -> Result<HashrateSummary> {
let query = self.0.clone();
spawn_blocking(move || query.get_hashrate(time_period)).await?
}
pub async fn get_difficulty_adjustments(
&self,
time_period: Option<TimePeriod>,
) -> Result<Vec<brk_types::DifficultyAdjustmentEntry>> {
let query = self.0.clone();
spawn_blocking(move || query.get_difficulty_adjustments(time_period)).await?
}
pub async fn get_block_fees(
&self,
time_period: TimePeriod,
) -> Result<Vec<brk_types::BlockFeesEntry>> {
let query = self.0.clone();
spawn_blocking(move || query.get_block_fees(time_period)).await?
}
pub async fn get_block_rewards(
&self,
time_period: TimePeriod,
) -> Result<Vec<brk_types::BlockRewardsEntry>> {
let query = self.0.clone();
spawn_blocking(move || query.get_block_rewards(time_period)).await?
}
pub async fn get_block_fee_rates(
&self,
time_period: TimePeriod,
) -> Result<Vec<brk_types::BlockFeeRatesEntry>> {
let query = self.0.clone();
spawn_blocking(move || query.get_block_fee_rates(time_period)).await?
}
pub async fn get_block_sizes_weights(
&self,
time_period: TimePeriod,
) -> Result<brk_types::BlockSizesWeights> {
let query = self.0.clone();
spawn_blocking(move || query.get_block_sizes_weights(time_period)).await?
}
pub async fn get_reward_stats(&self, block_count: usize) -> Result<brk_types::RewardStats> {
let query = self.0.clone();
spawn_blocking(move || query.get_reward_stats(block_count)).await?
}
pub async fn match_metric(&self, metric: Metric, limit: Limit) -> Result<Vec<&'static str>> {
let query = self.0.clone();
spawn_blocking(move || Ok(query.match_metric(&metric, limit))).await?

View File

@@ -41,13 +41,13 @@ pub fn get_address_txids(
.rev()
.filter(|(key, _): &(AddressIndexTxIndex, Unit)| {
if let Some(after) = after_txindex {
TxIndex::from(key.txindex()) < after
key.txindex() < after
} else {
true
}
})
.take(limit)
.map(|(key, _)| TxIndex::from(key.txindex()))
.map(|(key, _)| key.txindex())
.collect();
let mut txindex_to_txid_iter = indexer.vecs.tx.txindex_to_txid.iter()?;

View File

@@ -1,7 +1,7 @@
use brk_error::{Error, Result};
use brk_types::{BlockTimestamp, Date, DateIndex, Height, Timestamp};
use jiff::Timestamp as JiffTimestamp;
use vecdb::{AnyVec, GenericStoredVec, TypedVecIterator};
use vecdb::{GenericStoredVec, TypedVecIterator};
use crate::Query;
@@ -59,7 +59,12 @@ pub fn get_block_by_timestamp(timestamp: Timestamp, query: &Query) -> Result<Blo
}
let height = Height::from(best_height);
let blockhash = indexer.vecs.block.height_to_blockhash.iter()?.get_unwrap(height);
let blockhash = indexer
.vecs
.block
.height_to_blockhash
.iter()?
.get_unwrap(height);
// Convert timestamp to ISO 8601 format
let ts_secs: i64 = (*best_ts).into();

View File

@@ -0,0 +1,44 @@
use brk_error::Result;
use brk_types::{BlockFeeRatesEntry, FeeRatePercentiles, TimePeriod};
use vecdb::{IterableVec, VecIndex};
use super::dateindex_iter::DateIndexIter;
use crate::Query;
pub fn get_block_fee_rates(
time_period: TimePeriod,
query: &Query,
) -> Result<Vec<BlockFeeRatesEntry>> {
let computer = query.computer();
let current_height = query.get_height();
let start = current_height
.to_usize()
.saturating_sub(time_period.block_count());
let iter = DateIndexIter::new(computer, start, current_height.to_usize());
let vecs = &computer.chain.indexes_to_fee_rate.dateindex;
let mut min = vecs.unwrap_min().iter();
let mut pct10 = vecs.unwrap_pct10().iter();
let mut pct25 = vecs.unwrap_pct25().iter();
let mut median = vecs.unwrap_median().iter();
let mut pct75 = vecs.unwrap_pct75().iter();
let mut pct90 = vecs.unwrap_pct90().iter();
let mut max = vecs.unwrap_max().iter();
Ok(iter.collect(|di, ts, h| {
Some(BlockFeeRatesEntry {
avg_height: h.into(),
timestamp: *ts as u32,
percentiles: FeeRatePercentiles::new(
min.get(di).unwrap_or_default(),
pct10.get(di).unwrap_or_default(),
pct25.get(di).unwrap_or_default(),
median.get(di).unwrap_or_default(),
pct75.get(di).unwrap_or_default(),
pct90.get(di).unwrap_or_default(),
max.get(di).unwrap_or_default(),
),
})
}))
}

View File

@@ -0,0 +1,32 @@
use brk_error::Result;
use brk_types::{BlockFeesEntry, TimePeriod};
use vecdb::{IterableVec, VecIndex};
use super::dateindex_iter::DateIndexIter;
use crate::Query;
pub fn get_block_fees(time_period: TimePeriod, query: &Query) -> Result<Vec<BlockFeesEntry>> {
let computer = query.computer();
let current_height = query.get_height();
let start = current_height
.to_usize()
.saturating_sub(time_period.block_count());
let iter = DateIndexIter::new(computer, start, current_height.to_usize());
let mut fees = computer
.chain
.indexes_to_fee
.sats
.dateindex
.unwrap_average()
.iter();
Ok(iter.collect(|di, ts, h| {
fees.get(di).map(|fee| BlockFeesEntry {
avg_height: h.into(),
timestamp: *ts as u32,
avg_fees: u64::from(*fee),
})
}))
}

View File

@@ -0,0 +1,33 @@
use brk_error::Result;
use brk_types::{BlockRewardsEntry, TimePeriod};
use vecdb::{IterableVec, VecIndex};
use super::dateindex_iter::DateIndexIter;
use crate::Query;
pub fn get_block_rewards(time_period: TimePeriod, query: &Query) -> Result<Vec<BlockRewardsEntry>> {
let computer = query.computer();
let current_height = query.get_height();
let start = current_height
.to_usize()
.saturating_sub(time_period.block_count());
let iter = DateIndexIter::new(computer, start, current_height.to_usize());
// coinbase = subsidy + fees
let mut rewards = computer
.chain
.indexes_to_coinbase
.sats
.dateindex
.unwrap_average()
.iter();
Ok(iter.collect(|di, ts, h| {
rewards.get(di).map(|reward| BlockRewardsEntry {
avg_height: h.into(),
timestamp: *ts as u32,
avg_rewards: u64::from(*reward),
})
}))
}

View File

@@ -0,0 +1,62 @@
use brk_error::Result;
use brk_types::{BlockSizeEntry, BlockSizesWeights, BlockWeightEntry, TimePeriod};
use vecdb::{IterableVec, VecIndex};
use super::dateindex_iter::DateIndexIter;
use crate::Query;
pub fn get_block_sizes_weights(
time_period: TimePeriod,
query: &Query,
) -> Result<BlockSizesWeights> {
let computer = query.computer();
let current_height = query.get_height();
let start = current_height
.to_usize()
.saturating_sub(time_period.block_count());
let iter = DateIndexIter::new(computer, start, current_height.to_usize());
let mut sizes_vec = computer
.chain
.indexes_to_block_size
.dateindex
.unwrap_average()
.iter();
let mut weights_vec = computer
.chain
.indexes_to_block_weight
.dateindex
.unwrap_average()
.iter();
let entries: Vec<_> = iter.collect(|di, ts, h| {
let size = sizes_vec.get(di).map(|s| u64::from(*s));
let weight = weights_vec.get(di).map(|w| u64::from(*w));
Some((h.into(), *ts as u32, size, weight))
});
let sizes = entries
.iter()
.filter_map(|(h, ts, size, _)| {
size.map(|s| BlockSizeEntry {
avg_height: *h,
timestamp: *ts,
avg_size: s,
})
})
.collect();
let weights = entries
.iter()
.filter_map(|(h, ts, _, weight)| {
weight.map(|w| BlockWeightEntry {
avg_height: *h,
timestamp: *ts,
avg_weight: w,
})
})
.collect();
Ok(BlockSizesWeights { sizes, weights })
}

View File

@@ -0,0 +1,67 @@
use brk_computer::Computer;
use brk_types::{DateIndex, Height, Timestamp};
use vecdb::{GenericStoredVec, IterableVec, VecIndex};
/// Helper for iterating over dateindex ranges with sampling.
pub struct DateIndexIter<'a> {
computer: &'a Computer,
start_di: DateIndex,
end_di: DateIndex,
step: usize,
}
impl<'a> DateIndexIter<'a> {
pub fn new(computer: &'a Computer, start_height: usize, end_height: usize) -> Self {
let start_di = computer
.indexes
.height_to_dateindex
.read_once(Height::from(start_height))
.unwrap_or_default();
let end_di = computer
.indexes
.height_to_dateindex
.read_once(Height::from(end_height))
.unwrap_or_default();
let total = end_di.to_usize().saturating_sub(start_di.to_usize()) + 1;
let step = (total / 200).max(1);
Self {
computer,
start_di,
end_di,
step,
}
}
/// Iterate and collect entries using the provided transform function.
pub fn collect<T, F>(&self, mut transform: F) -> Vec<T>
where
F: FnMut(DateIndex, Timestamp, Height) -> Option<T>,
{
let total = self.end_di.to_usize().saturating_sub(self.start_di.to_usize()) + 1;
let mut timestamps = self
.computer
.chain
.timeindexes_to_timestamp
.dateindex_extra
.unwrap_first()
.iter();
let mut heights = self.computer.indexes.dateindex_to_first_height.iter();
let mut entries = Vec::with_capacity(total / self.step + 1);
let mut i = self.start_di.to_usize();
while i <= self.end_di.to_usize() {
let di = DateIndex::from(i);
if let (Some(ts), Some(h)) = (timestamps.get(di), heights.get(di)) {
if let Some(entry) = transform(di, ts, h) {
entries.push(entry);
}
}
i += self.step;
}
entries
}
}

View File

@@ -0,0 +1,26 @@
use brk_error::Result;
use brk_types::{DifficultyAdjustmentEntry, TimePeriod};
use vecdb::VecIndex;
use crate::Query;
use super::epochs::iter_difficulty_epochs;
/// Get historical difficulty adjustments.
pub fn get_difficulty_adjustments(
time_period: Option<TimePeriod>,
query: &Query,
) -> Result<Vec<DifficultyAdjustmentEntry>> {
let current_height = query.get_height();
let end = current_height.to_usize();
let start = match time_period {
Some(tp) => end.saturating_sub(tp.block_count()),
None => 0,
};
let mut entries = iter_difficulty_epochs(query.computer(), start, end);
// Return in reverse chronological order (newest first)
entries.reverse();
Ok(entries)
}

View File

@@ -0,0 +1,63 @@
use brk_computer::Computer;
use brk_types::{DifficultyAdjustmentEntry, DifficultyEpoch, Height};
use vecdb::{GenericStoredVec, IterableVec, VecIndex};
/// Iterate over difficulty epochs within a height range.
pub fn iter_difficulty_epochs(
computer: &Computer,
start_height: usize,
end_height: usize,
) -> Vec<DifficultyAdjustmentEntry> {
let start_epoch = computer
.indexes
.height_to_difficultyepoch
.read_once(Height::from(start_height))
.unwrap_or_default();
let end_epoch = computer
.indexes
.height_to_difficultyepoch
.read_once(Height::from(end_height))
.unwrap_or_default();
let mut epoch_to_height_iter = computer.indexes.difficultyepoch_to_first_height.iter();
let mut epoch_to_timestamp_iter = computer.chain.difficultyepoch_to_timestamp.iter();
let mut epoch_to_difficulty_iter = computer
.chain
.indexes_to_difficulty
.difficultyepoch
.unwrap_last()
.iter();
let mut results = Vec::with_capacity(end_epoch.to_usize() - start_epoch.to_usize() + 1);
let mut prev_difficulty: Option<f64> = None;
for epoch_usize in start_epoch.to_usize()..=end_epoch.to_usize() {
let epoch = DifficultyEpoch::from(epoch_usize);
let epoch_height = epoch_to_height_iter.get(epoch).unwrap_or_default();
// Skip epochs before our start height but track difficulty
if epoch_height.to_usize() < start_height {
prev_difficulty = epoch_to_difficulty_iter.get(epoch).map(|d| *d);
continue;
}
let epoch_timestamp = epoch_to_timestamp_iter.get(epoch).unwrap_or_default();
let epoch_difficulty = *epoch_to_difficulty_iter.get(epoch).unwrap_or_default();
let change_percent = match prev_difficulty {
Some(prev) if prev > 0.0 => ((epoch_difficulty / prev) - 1.0) * 100.0,
_ => 0.0,
};
results.push(DifficultyAdjustmentEntry {
timestamp: epoch_timestamp,
height: epoch_height,
difficulty: epoch_difficulty,
change_percent,
});
prev_difficulty = Some(epoch_difficulty);
}
results
}

View File

@@ -0,0 +1,99 @@
use brk_error::Result;
use brk_types::{DateIndex, DifficultyEntry, HashrateEntry, HashrateSummary, Height, TimePeriod};
use vecdb::{GenericStoredVec, IterableVec, VecIndex};
use super::epochs::iter_difficulty_epochs;
use crate::Query;
/// Get hashrate and difficulty data for a time period.
pub fn get_hashrate(time_period: Option<TimePeriod>, query: &Query) -> Result<HashrateSummary> {
let indexer = query.indexer();
let computer = query.computer();
let current_height = query.get_height();
// Get current difficulty
let current_difficulty = *indexer
.vecs
.block
.height_to_difficulty
.read_once(current_height)?;
// Get current hashrate
let current_dateindex = computer
.indexes
.height_to_dateindex
.read_once(current_height)?;
let current_hashrate = *computer
.chain
.indexes_to_hash_rate
.dateindex
.unwrap_last()
.read_once(current_dateindex)? as u128;
// Calculate start height based on time period
let end = current_height.to_usize();
let start = match time_period {
Some(tp) => end.saturating_sub(tp.block_count()),
None => 0,
};
// Get hashrate entries using iterators for efficiency
let start_dateindex = computer
.indexes
.height_to_dateindex
.read_once(Height::from(start))?;
let end_dateindex = current_dateindex;
// Sample at regular intervals to avoid too many data points
let total_days = end_dateindex
.to_usize()
.saturating_sub(start_dateindex.to_usize())
+ 1;
let step = (total_days / 200).max(1); // Max ~200 data points
// Create iterators for the loop
let mut hashrate_iter = computer
.chain
.indexes_to_hash_rate
.dateindex
.unwrap_last()
.iter();
let mut timestamp_iter = computer
.chain
.timeindexes_to_timestamp
.dateindex_extra
.unwrap_first()
.iter();
let mut hashrates = Vec::with_capacity(total_days / step + 1);
let mut di = start_dateindex.to_usize();
while di <= end_dateindex.to_usize() {
let dateindex = DateIndex::from(di);
if let (Some(hr), Some(timestamp)) =
(hashrate_iter.get(dateindex), timestamp_iter.get(dateindex))
{
hashrates.push(HashrateEntry {
timestamp,
avg_hashrate: (*hr) as u128,
});
}
di += step;
}
// Get difficulty adjustments within the period
let difficulty: Vec<DifficultyEntry> = iter_difficulty_epochs(computer, start, end)
.into_iter()
.map(|e| DifficultyEntry {
timestamp: e.timestamp,
difficulty: e.difficulty,
height: e.height,
})
.collect();
Ok(HashrateSummary {
hashrates,
difficulty,
current_hashrate,
current_difficulty,
})
}

View File

@@ -1,3 +1,21 @@
mod block_fee_rates;
mod block_fees;
mod block_rewards;
mod block_sizes_weights;
mod dateindex_iter;
mod difficulty;
mod difficulty_adjustments;
mod epochs;
mod hashrate;
mod pools;
mod reward_stats;
pub use block_fee_rates::*;
pub use block_fees::*;
pub use block_rewards::*;
pub use block_sizes_weights::*;
pub use difficulty::*;
pub use difficulty_adjustments::*;
pub use hashrate::*;
pub use pools::*;
pub use reward_stats::*;

View File

@@ -0,0 +1,172 @@
use brk_error::{Error, Result};
use brk_types::{
Height, PoolBlockCounts, PoolBlockShares, PoolDetail, PoolDetailInfo, PoolInfo, PoolSlug,
PoolStats, PoolsSummary, TimePeriod, pools,
};
use vecdb::{AnyVec, GenericStoredVec, IterableVec, VecIndex};
use crate::Query;
/// Get mining pool statistics for a time period using pre-computed cumulative counts.
pub fn get_mining_pools(time_period: TimePeriod, query: &Query) -> Result<PoolsSummary> {
let computer = query.computer();
let current_height = query.get_height();
let end = current_height.to_usize();
// No blocks indexed yet
if computer.pools.height_to_pool.len() == 0 {
return Ok(PoolsSummary {
pools: vec![],
block_count: 0,
last_estimated_hashrate: 0,
});
}
// Calculate start height based on time period
let start = end.saturating_sub(time_period.block_count());
let pools = pools();
let mut pool_data: Vec<(&'static brk_types::Pool, u32)> = Vec::new();
// For each pool, get cumulative count at end and start, subtract to get range count
for (pool_id, pool_vecs) in &computer.pools.vecs {
let mut cumulative = pool_vecs
.indexes_to_blocks_mined
.height_extra
.unwrap_cumulative()
.iter();
let count_at_end: u32 = *cumulative.get(current_height).unwrap_or_default();
let count_at_start: u32 = if start == 0 {
0
} else {
*cumulative.get(Height::from(start - 1)).unwrap_or_default()
};
let block_count = count_at_end.saturating_sub(count_at_start);
// Only include pools that mined at least one block in the period
if block_count > 0 {
pool_data.push((pools.get(*pool_id), block_count));
}
}
// Sort by block count descending
pool_data.sort_by(|a, b| b.1.cmp(&a.1));
let total_blocks: u32 = pool_data.iter().map(|(_, count)| count).sum();
// Build stats with ranks
let pool_stats: Vec<PoolStats> = pool_data
.into_iter()
.enumerate()
.map(|(idx, (pool, block_count))| {
let share = if total_blocks > 0 {
block_count as f64 / total_blocks as f64
} else {
0.0
};
PoolStats::new(pool, block_count, (idx + 1) as u32, share)
})
.collect();
// TODO: Calculate actual hashrate from difficulty
let last_estimated_hashrate = 0u128;
Ok(PoolsSummary {
pools: pool_stats,
block_count: total_blocks,
last_estimated_hashrate,
})
}
/// Get list of all known mining pools (no statistics).
pub fn get_all_pools() -> Vec<PoolInfo> {
pools().iter().map(PoolInfo::from).collect()
}
/// Get detailed information about a specific pool by slug.
pub fn get_pool_detail(slug: PoolSlug, query: &Query) -> Result<PoolDetail> {
let computer = query.computer();
let current_height = query.get_height();
let end = current_height.to_usize();
let pools_list = pools();
let pool = pools_list.get(slug);
// Get pool vecs for this specific pool
let pool_vecs = computer
.pools
.vecs
.get(&slug)
.ok_or_else(|| Error::Str("Pool data not found"))?;
let mut cumulative = pool_vecs
.indexes_to_blocks_mined
.height_extra
.unwrap_cumulative()
.iter();
// Get total blocks (all time)
let total_all: u32 = *cumulative.get(current_height).unwrap_or_default();
// Get blocks for 24h (144 blocks)
let start_24h = end.saturating_sub(144);
let count_before_24h: u32 = if start_24h == 0 {
0
} else {
*cumulative
.get(Height::from(start_24h - 1))
.unwrap_or_default()
};
let total_24h = total_all.saturating_sub(count_before_24h);
// Get blocks for 1w (1008 blocks)
let start_1w = end.saturating_sub(1008);
let count_before_1w: u32 = if start_1w == 0 {
0
} else {
*cumulative
.get(Height::from(start_1w - 1))
.unwrap_or_default()
};
let total_1w = total_all.saturating_sub(count_before_1w);
// Calculate total network blocks for share calculation
let network_blocks_all = (end + 1) as u32;
let network_blocks_24h = (end - start_24h + 1) as u32;
let network_blocks_1w = (end - start_1w + 1) as u32;
let share_all = if network_blocks_all > 0 {
total_all as f64 / network_blocks_all as f64
} else {
0.0
};
let share_24h = if network_blocks_24h > 0 {
total_24h as f64 / network_blocks_24h as f64
} else {
0.0
};
let share_1w = if network_blocks_1w > 0 {
total_1w as f64 / network_blocks_1w as f64
} else {
0.0
};
Ok(PoolDetail {
pool: PoolDetailInfo::from(pool),
block_count: PoolBlockCounts {
all: total_all,
day: total_24h,
week: total_1w,
},
block_share: PoolBlockShares {
all: share_all,
day: share_24h,
week: share_1w,
},
estimated_hashrate: 0, // TODO: Calculate from share and network hashrate
reported_hashrate: None,
})
}

View File

@@ -0,0 +1,58 @@
use brk_error::Result;
use brk_types::{Height, RewardStats, Sats};
use vecdb::{IterableVec, VecIndex};
use crate::Query;
pub fn get_reward_stats(block_count: usize, query: &Query) -> Result<RewardStats> {
let computer = query.computer();
let current_height = query.get_height();
let end_block = current_height;
let start_block = Height::from(current_height.to_usize().saturating_sub(block_count - 1));
let mut coinbase_iter = computer
.chain
.indexes_to_coinbase
.sats
.height
.as_ref()
.unwrap()
.iter();
let mut fee_iter = computer.chain.indexes_to_fee.sats.height.unwrap_sum().iter();
let mut tx_count_iter = computer
.chain
.indexes_to_tx_count
.height
.as_ref()
.unwrap()
.iter();
let mut total_reward = Sats::ZERO;
let mut total_fee = Sats::ZERO;
let mut total_tx: u64 = 0;
for height in start_block.to_usize()..=end_block.to_usize() {
let h = Height::from(height);
if let Some(coinbase) = coinbase_iter.get(h) {
total_reward += Sats::from(u64::from(*coinbase));
}
if let Some(fee) = fee_iter.get(h) {
total_fee += Sats::from(u64::from(*fee));
}
if let Some(tx_count) = tx_count_iter.get(h) {
total_tx += u64::from(*tx_count);
}
}
Ok(RewardStats {
start_block,
end_block,
total_reward,
total_fee,
total_tx,
})
}

View File

@@ -47,7 +47,10 @@ pub fn get_tx_outspend(
// Look up spend status
let computer = query.computer();
let txinindex = computer.stateful.txoutindex_to_txinindex.read_once(txoutindex)?;
let txinindex = computer
.stateful
.txoutindex_to_txinindex
.read_once(txoutindex)?;
if txinindex == TxInIndex::UNSPENT {
return Ok(TxOutspend::UNSPENT);
@@ -119,10 +122,13 @@ pub fn get_tx_outspends(TxidPath { txid }: TxidPath, query: &Query) -> Result<Ve
/// Get spending transaction details from a txinindex
fn get_outspend_details(txinindex: TxInIndex, query: &Query) -> Result<TxOutspend> {
let indexer = query.indexer();
let computer = query.computer();
// Look up spending txindex directly
let spending_txindex = computer.indexes.txinindex_to_txindex.read_once(txinindex)?;
let spending_txindex = indexer
.vecs
.txin
.txinindex_to_txindex
.read_once(txinindex)?;
// Calculate vin
let spending_first_txinindex = indexer
@@ -133,8 +139,16 @@ fn get_outspend_details(txinindex: TxInIndex, query: &Query) -> Result<TxOutspen
let vin = Vin::from(usize::from(txinindex) - usize::from(spending_first_txinindex));
// Get spending tx details
let spending_txid = indexer.vecs.tx.txindex_to_txid.read_once(spending_txindex)?;
let spending_height = indexer.vecs.tx.txindex_to_height.read_once(spending_txindex)?;
let spending_txid = indexer
.vecs
.tx
.txindex_to_txid
.read_once(spending_txindex)?;
let spending_height = indexer
.vecs
.tx
.txindex_to_height
.read_once(spending_txindex)?;
let block_hash = indexer
.vecs
.block

View File

@@ -10,9 +10,10 @@ use brk_mempool::Mempool;
use brk_reader::Reader;
use brk_traversable::TreeNode;
use brk_types::{
Address, AddressStats, BlockInfo, BlockStatus, BlockTimestamp, Format, Height, Index,
IndexInfo, Limit, MempoolInfo, Metric, MetricCount, RecommendedFees, Timestamp, Transaction,
TxOutspend, TxStatus, Txid, TxidPath, Utxo, Vout,
Address, AddressStats, BlockInfo, BlockStatus, BlockTimestamp, Format, HashrateSummary,
Height, Index, IndexInfo, Limit, MempoolInfo, Metric, MetricCount, PoolDetail, PoolInfo,
PoolSlug, PoolsSummary, RecommendedFees, TimePeriod, Timestamp, Transaction, TxOutspend,
TxStatus, Txid, TxidPath, Utxo, Vout,
};
use vecdb::{AnyExportableVec, AnyStoredVec};
@@ -37,11 +38,12 @@ pub use crate::chain::validate_address;
use crate::{
chain::{
get_address, get_address_mempool_txids, get_address_txids, get_address_utxos,
get_block_by_height, get_block_by_timestamp, get_block_raw, get_block_status_by_height,
get_block_txid_at_index, get_block_txids, get_block_txs, get_blocks,
get_difficulty_adjustment, get_height_by_hash, get_mempool_blocks, get_mempool_info,
get_mempool_txids, get_recommended_fees, get_transaction, get_transaction_hex,
get_transaction_status, get_tx_outspend, get_tx_outspends,
get_all_pools, get_block_by_height, get_block_by_timestamp, get_block_raw,
get_block_status_by_height, get_block_txid_at_index, get_block_txids, get_block_txs,
get_blocks, get_difficulty_adjustment, get_hashrate, get_height_by_hash,
get_mempool_blocks, get_mempool_info, get_mempool_txids, get_mining_pools, get_pool_detail,
get_recommended_fees, get_transaction, get_transaction_hex, get_transaction_status,
get_tx_outspend, get_tx_outspends,
},
vecs::{IndexToVec, MetricToVec},
};
@@ -184,6 +186,58 @@ impl Query {
get_difficulty_adjustment(self)
}
pub fn get_mining_pools(&self, time_period: TimePeriod) -> Result<PoolsSummary> {
get_mining_pools(time_period, self)
}
pub fn get_all_pools(&self) -> Vec<PoolInfo> {
get_all_pools()
}
pub fn get_pool_detail(&self, slug: PoolSlug) -> Result<PoolDetail> {
get_pool_detail(slug, self)
}
pub fn get_hashrate(&self, time_period: Option<TimePeriod>) -> Result<HashrateSummary> {
get_hashrate(time_period, self)
}
pub fn get_difficulty_adjustments(
&self,
time_period: Option<TimePeriod>,
) -> Result<Vec<brk_types::DifficultyAdjustmentEntry>> {
chain::get_difficulty_adjustments(time_period, self)
}
pub fn get_block_fees(&self, time_period: TimePeriod) -> Result<Vec<brk_types::BlockFeesEntry>> {
chain::get_block_fees(time_period, self)
}
pub fn get_block_rewards(
&self,
time_period: TimePeriod,
) -> Result<Vec<brk_types::BlockRewardsEntry>> {
chain::get_block_rewards(time_period, self)
}
pub fn get_block_fee_rates(
&self,
time_period: TimePeriod,
) -> Result<Vec<brk_types::BlockFeeRatesEntry>> {
chain::get_block_fee_rates(time_period, self)
}
pub fn get_block_sizes_weights(
&self,
time_period: TimePeriod,
) -> Result<brk_types::BlockSizesWeights> {
chain::get_block_sizes_weights(time_period, self)
}
pub fn get_reward_stats(&self, block_count: usize) -> Result<brk_types::RewardStats> {
chain::get_reward_stats(block_count, self)
}
pub fn match_metric(&self, metric: &Metric, limit: Limit) -> Vec<&'static str> {
self.vecs().matches(metric, limit)
}