global: snapshot

This commit is contained in:
nym21
2026-03-31 22:53:25 +02:00
parent d038141a8a
commit ae26db6df2
83 changed files with 3398 additions and 710 deletions

View File

@@ -1,12 +1,17 @@
use bitcoin::consensus::Decodable;
use bitcoin::hex::DisplayHex;
use brk_error::{Error, Result};
use brk_types::{
BlockExtras, BlockHash, BlockHashPrefix, BlockInfo, BlockPool, Height, TxIndex, pools,
BlockExtras, BlockHash, BlockHashPrefix, BlockHeader, BlockInfo, BlockInfoV1, BlockPool,
FeeRate, Height, Sats, Timestamp, TxIndex, VSize, pools,
};
use vecdb::{AnyVec, ReadableVec, VecIndex};
use crate::Query;
const DEFAULT_BLOCK_COUNT: u32 = 10;
const DEFAULT_V1_BLOCK_COUNT: u32 = 15;
const HEADER_SIZE: usize = 80;
impl Query {
pub fn block(&self, hash: &BlockHash) -> Result<BlockInfo> {
@@ -15,58 +20,70 @@ impl Query {
}
pub fn block_by_height(&self, height: Height) -> Result<BlockInfo> {
let indexer = self.indexer();
let max_height = self.max_height();
if height > max_height {
return Err(Error::OutOfRange("Block height out of range".into()));
}
self.blocks_range(height.to_usize(), height.to_usize() + 1)?
.pop()
.ok_or(Error::NotFound("Block not found".into()))
}
let blockhash = indexer.vecs.blocks.blockhash.read_once(height)?;
let difficulty = indexer.vecs.blocks.difficulty.collect_one(height).unwrap();
let timestamp = indexer.vecs.blocks.timestamp.collect_one(height).unwrap();
let size = indexer.vecs.blocks.total.collect_one(height).unwrap();
let weight = indexer.vecs.blocks.weight.collect_one(height).unwrap();
let tx_count = self.tx_count_at_height(height, max_height)?;
pub fn block_by_height_v1(&self, height: Height) -> Result<BlockInfoV1> {
let max_height = self.max_height();
if height > max_height {
return Err(Error::OutOfRange("Block height out of range".into()));
}
self.blocks_v1_range(height.to_usize(), height.to_usize() + 1)?
.pop()
.ok_or(Error::NotFound("Block not found".into()))
}
Ok(BlockInfo {
id: blockhash,
height,
tx_count,
size: *size,
weight,
timestamp,
difficulty: *difficulty,
})
pub fn block_header_hex(&self, hash: &BlockHash) -> Result<String> {
let height = self.height_by_hash(hash)?;
let header = self.read_block_header(height)?;
Ok(bitcoin::consensus::encode::serialize_hex(&header))
}
pub fn block_hash_by_height(&self, height: Height) -> Result<BlockHash> {
let max_height = self.max_height();
if height > max_height {
return Err(Error::OutOfRange("Block height out of range".into()));
}
Ok(self.indexer().vecs.blocks.blockhash.read_once(height)?)
}
pub fn blocks(&self, start_height: Option<Height>) -> Result<Vec<BlockInfo>> {
let max_height = self.indexed_height();
let (begin, end) = self.resolve_block_range(start_height, DEFAULT_BLOCK_COUNT);
self.blocks_range(begin, end)
}
let start = start_height.unwrap_or(max_height);
let start = start.min(max_height);
pub fn blocks_v1(&self, start_height: Option<Height>) -> Result<Vec<BlockInfoV1>> {
let (begin, end) = self.resolve_block_range(start_height, DEFAULT_V1_BLOCK_COUNT);
self.blocks_v1_range(begin, end)
}
let start_u32: u32 = start.into();
let count = DEFAULT_BLOCK_COUNT.min(start_u32 + 1) as usize;
// === Range queries (bulk reads) ===
if count == 0 {
fn blocks_range(&self, begin: usize, end: usize) -> Result<Vec<BlockInfo>> {
if begin >= end {
return Ok(Vec::new());
}
let indexer = self.indexer();
let computer = self.computer();
let reader = self.reader();
// Batch-read all PcoVec data for the contiguous range (avoids
// per-block page decompression — 4 reads instead of 4*count).
let end = start_u32 as usize + 1;
let begin = end - count;
// Bulk read all indexed data
let blockhashes = indexer.vecs.blocks.blockhash.collect_range_at(begin, end);
let difficulties = indexer.vecs.blocks.difficulty.collect_range_at(begin, end);
let timestamps = indexer.vecs.blocks.timestamp.collect_range_at(begin, end);
let sizes = indexer.vecs.blocks.total.collect_range_at(begin, end);
let weights = indexer.vecs.blocks.weight.collect_range_at(begin, end);
let positions = indexer.vecs.blocks.position.collect_range_at(begin, end);
// Batch-read first_tx_index for tx_count computation (need one extra for next boundary)
// Bulk read tx indexes for tx_count
let max_height = self.indexed_height();
let tx_index_end = if end <= max_height.to_usize() {
end + 1
} else {
@@ -79,24 +96,39 @@ impl Query {
.collect_range_at(begin, tx_index_end);
let total_txs = computer.indexes.tx_index.identity.len();
// Bulk read median time window
let median_start = begin.saturating_sub(10);
let median_timestamps: Vec<Timestamp> = indexer
.vecs
.blocks
.timestamp
.collect_range_at(median_start, end);
let count = end - begin;
let mut blocks = Vec::with_capacity(count);
for i in (0..count).rev() {
let height = Height::from(begin + i);
let blockhash = indexer.vecs.blocks.blockhash.read_once(height)?;
let raw_header = reader.read_raw_bytes(positions[i], HEADER_SIZE)?;
let header = Self::decode_header(&raw_header)?;
let tx_count = if i + 1 < first_tx_indexes.len() {
first_tx_indexes[i + 1].to_usize() - first_tx_indexes[i].to_usize()
(first_tx_indexes[i + 1].to_usize() - first_tx_indexes[i].to_usize()) as u32
} else {
total_txs - first_tx_indexes[i].to_usize()
(total_txs - first_tx_indexes[i].to_usize()) as u32
};
let median_time =
Self::compute_median_time(&median_timestamps, begin + i, median_start);
blocks.push(BlockInfo {
id: blockhash,
height,
tx_count: tx_count as u32,
id: blockhashes[i].clone(),
height: Height::from(begin + i),
header,
timestamp: timestamps[i],
tx_count,
size: *sizes[i],
weight: weights[i],
timestamp: timestamps[i],
median_time,
difficulty: *difficulties[i],
});
}
@@ -104,13 +136,254 @@ impl Query {
Ok(blocks)
}
pub(crate) fn blocks_v1_range(&self, begin: usize, end: usize) -> Result<Vec<BlockInfoV1>> {
if begin >= end {
return Ok(Vec::new());
}
let count = end - begin;
let indexer = self.indexer();
let computer = self.computer();
let reader = self.reader();
let all_pools = pools();
// Bulk read all indexed data
let blockhashes = indexer.vecs.blocks.blockhash.collect_range_at(begin, end);
let difficulties = indexer.vecs.blocks.difficulty.collect_range_at(begin, end);
let timestamps = indexer.vecs.blocks.timestamp.collect_range_at(begin, end);
let sizes = indexer.vecs.blocks.total.collect_range_at(begin, end);
let weights = indexer.vecs.blocks.weight.collect_range_at(begin, end);
let positions = indexer.vecs.blocks.position.collect_range_at(begin, end);
let pool_slugs = computer.pools.pool.collect_range_at(begin, end);
// Bulk read tx indexes
let max_height = self.indexed_height();
let tx_index_end = if end <= max_height.to_usize() {
end + 1
} else {
end
};
let first_tx_indexes: Vec<TxIndex> = indexer
.vecs
.transactions
.first_tx_index
.collect_range_at(begin, tx_index_end);
let total_txs = computer.indexes.tx_index.identity.len();
// Bulk read segwit stats
let segwit_txs = indexer.vecs.blocks.segwit_txs.collect_range_at(begin, end);
let segwit_sizes = indexer.vecs.blocks.segwit_size.collect_range_at(begin, end);
let segwit_weights = indexer
.vecs
.blocks
.segwit_weight
.collect_range_at(begin, end);
// Bulk read extras data
let fee_sats = computer
.mining
.rewards
.fees
.block
.sats
.collect_range_at(begin, end);
let subsidy_sats = computer
.mining
.rewards
.subsidy
.block
.sats
.collect_range_at(begin, end);
let input_counts = computer.inputs.count.sum.collect_range_at(begin, end);
let output_counts = computer
.outputs
.count
.total
.sum
.collect_range_at(begin, end);
let utxo_set_sizes = computer
.outputs
.count
.unspent
.height
.collect_range_at(begin, end);
let input_volumes = computer
.transactions
.volume
.transfer_volume
.block
.sats
.collect_range_at(begin, end);
let output_volumes = computer
.mining
.rewards
.output_volume
.collect_range_at(begin, end);
// Bulk read effective fee rate distribution (accounts for CPFP)
let frd = &computer
.transactions
.fees
.effective_fee_rate
.distribution
.block;
let fr_min = frd.min.height.collect_range_at(begin, end);
let fr_pct10 = frd.pct10.height.collect_range_at(begin, end);
let fr_pct25 = frd.pct25.height.collect_range_at(begin, end);
let fr_median = frd.median.height.collect_range_at(begin, end);
let fr_pct75 = frd.pct75.height.collect_range_at(begin, end);
let fr_pct90 = frd.pct90.height.collect_range_at(begin, end);
let fr_max = frd.max.height.collect_range_at(begin, end);
// Bulk read fee amount distribution (sats)
let fad = &computer.transactions.fees.fee.distribution.block;
let fa_min = fad.min.height.collect_range_at(begin, end);
let fa_pct10 = fad.pct10.height.collect_range_at(begin, end);
let fa_pct25 = fad.pct25.height.collect_range_at(begin, end);
let fa_median = fad.median.height.collect_range_at(begin, end);
let fa_pct75 = fad.pct75.height.collect_range_at(begin, end);
let fa_pct90 = fad.pct90.height.collect_range_at(begin, end);
let fa_max = fad.max.height.collect_range_at(begin, end);
// Bulk read tx positions range covering all coinbase txs (first tx of each block)
let tx_pos_begin = first_tx_indexes[0].to_usize();
let tx_pos_end = first_tx_indexes[count - 1].to_usize() + 1;
let all_tx_positions = indexer
.vecs
.transactions
.position
.collect_range_at(tx_pos_begin, tx_pos_end);
// Bulk read median time window
let median_start = begin.saturating_sub(10);
let median_timestamps = indexer
.vecs
.blocks
.timestamp
.collect_range_at(median_start, end);
let mut blocks = Vec::with_capacity(count);
for i in (0..count).rev() {
let raw_header = reader.read_raw_bytes(positions[i], HEADER_SIZE)?;
let header = Self::decode_header(&raw_header)?;
let tx_count = if i + 1 < first_tx_indexes.len() {
(first_tx_indexes[i + 1].to_usize() - first_tx_indexes[i].to_usize()) as u32
} else {
(total_txs - first_tx_indexes[i].to_usize()) as u32
};
let weight = weights[i];
let size = *sizes[i];
let total_fees = fee_sats[i];
let subsidy = subsidy_sats[i];
let total_inputs = (*input_counts[i]).saturating_sub(1);
let total_outputs = *output_counts[i];
let vsize = weight.to_vbytes_ceil();
let total_fees_u64 = u64::from(total_fees);
let non_coinbase = tx_count.saturating_sub(1) as u64;
let pool_slug = pool_slugs[i];
let pool = all_pools.get(pool_slug);
let (
coinbase_raw,
coinbase_address,
coinbase_addresses,
coinbase_signature,
coinbase_signature_ascii,
) = Self::parse_coinbase_tx(
reader,
all_tx_positions[first_tx_indexes[i].to_usize() - tx_pos_begin],
);
let median_time =
Self::compute_median_time(&median_timestamps, begin + i, median_start);
let info = BlockInfo {
id: blockhashes[i].clone(),
height: Height::from(begin + i),
header,
timestamp: timestamps[i],
tx_count,
size,
weight,
median_time,
difficulty: *difficulties[i],
};
let total_input_amt = input_volumes[i];
let total_output_amt = output_volumes[i];
let extras = BlockExtras {
total_fees,
median_fee: fr_median[i],
fee_range: [
fr_min[i],
fr_pct10[i],
fr_pct25[i],
fr_median[i],
fr_pct75[i],
fr_pct90[i],
fr_max[i],
],
reward: subsidy + total_fees,
pool: BlockPool {
id: pool.unique_id(),
name: pool.name.to_string(),
slug: pool_slug,
},
avg_fee: Sats::from(if non_coinbase > 0 {
total_fees_u64 / non_coinbase
} else {
0
}),
avg_fee_rate: FeeRate::from((total_fees, VSize::from(vsize))),
coinbase_raw,
coinbase_address,
coinbase_addresses,
coinbase_signature,
coinbase_signature_ascii,
avg_tx_size: if tx_count > 0 {
size as f64 / tx_count as f64
} else {
0.0
},
total_inputs,
total_outputs,
total_output_amt,
median_fee_amt: fa_median[i],
fee_percentiles: [
fa_min[i],
fa_pct10[i],
fa_pct25[i],
fa_median[i],
fa_pct75[i],
fa_pct90[i],
fa_max[i],
],
segwit_total_txs: *segwit_txs[i],
segwit_total_size: *segwit_sizes[i],
segwit_total_weight: segwit_weights[i],
header: raw_header.to_lower_hex_string(),
utxo_set_change: total_outputs as i64 - total_inputs as i64,
utxo_set_size: *utxo_set_sizes[i],
total_input_amt,
virtual_size: vsize as f64,
};
blocks.push(BlockInfoV1 { info, extras });
}
Ok(blocks)
}
// === Helper methods ===
pub fn height_by_hash(&self, hash: &BlockHash) -> Result<Height> {
let indexer = self.indexer();
let prefix = BlockHashPrefix::from(hash);
indexer
.stores
.blockhash_prefix_to_height
@@ -119,31 +392,103 @@ impl Query {
.ok_or(Error::NotFound("Block not found".into()))
}
pub fn read_block_header(&self, height: Height) -> Result<bitcoin::block::Header> {
let position = self
.indexer()
.vecs
.blocks
.position
.collect_one(height)
.unwrap();
let raw = self.reader().read_raw_bytes(position, HEADER_SIZE)?;
bitcoin::block::Header::consensus_decode(&mut raw.as_slice())
.map_err(|_| Error::Internal("Failed to decode block header"))
}
fn max_height(&self) -> Height {
Height::from(self.indexer().vecs.blocks.blockhash.len().saturating_sub(1))
}
fn tx_count_at_height(&self, height: Height, max_height: Height) -> Result<u32> {
let indexer = self.indexer();
let computer = self.computer();
fn resolve_block_range(&self, start_height: Option<Height>, count: u32) -> (usize, usize) {
let max_height = self.height();
let start = start_height.unwrap_or(max_height).min(max_height);
let start_u32: u32 = start.into();
let count = count.min(start_u32 + 1) as usize;
let end = start_u32 as usize + 1;
let begin = end - count;
(begin, end)
}
let first_tx_index = indexer
.vecs
.transactions
.first_tx_index
.collect_one(height)
.unwrap();
let next_first_tx_index = if height < max_height {
indexer
.vecs
.transactions
.first_tx_index
.collect_one(height.incremented())
.unwrap()
} else {
TxIndex::from(computer.indexes.tx_index.identity.len())
fn decode_header(bytes: &[u8]) -> Result<BlockHeader> {
let raw = bitcoin::block::Header::consensus_decode(&mut &bytes[..])
.map_err(|_| Error::Internal("Failed to decode block header"))?;
Ok(BlockHeader::from(raw))
}
fn compute_median_time(
all_timestamps: &[Timestamp],
height: usize,
window_start: usize,
) -> Timestamp {
let rel_start = height.saturating_sub(10) - window_start;
let rel_end = height + 1 - window_start;
let mut sorted: Vec<usize> = all_timestamps[rel_start..rel_end]
.iter()
.map(|t| usize::from(*t))
.collect();
sorted.sort_unstable();
Timestamp::from(sorted[sorted.len() / 2])
}
fn parse_coinbase_tx(
reader: &brk_reader::Reader,
position: brk_types::BlkPosition,
) -> (String, Option<String>, Vec<String>, String, String) {
let raw_bytes = match reader.read_raw_bytes(position, 1000) {
Ok(bytes) => bytes,
Err(_) => return (String::new(), None, vec![], String::new(), String::new()),
};
Ok((next_first_tx_index.to_usize() - first_tx_index.to_usize()) as u32)
let tx = match bitcoin::Transaction::consensus_decode(&mut raw_bytes.as_slice()) {
Ok(tx) => tx,
Err(_) => return (String::new(), None, vec![], String::new(), String::new()),
};
let coinbase_raw = tx
.input
.first()
.map(|input| input.script_sig.as_bytes().to_lower_hex_string())
.unwrap_or_default();
let coinbase_signature_ascii = tx
.input
.first()
.map(|input| String::from_utf8_lossy(input.script_sig.as_bytes()).to_string())
.unwrap_or_default();
let coinbase_addresses: Vec<String> = tx
.output
.iter()
.filter_map(|output| {
bitcoin::Address::from_script(&output.script_pubkey, bitcoin::Network::Bitcoin)
.ok()
.map(|a| a.to_string())
})
.collect();
let coinbase_address = coinbase_addresses.first().cloned();
let coinbase_signature = tx
.output
.first()
.map(|output| output.script_pubkey.to_asm_string())
.unwrap_or_default();
(
coinbase_raw,
coinbase_address,
coinbase_addresses,
coinbase_signature,
coinbase_signature_ascii,
)
}
}

View File

@@ -12,7 +12,6 @@ impl Query {
fn block_raw_by_height(&self, height: Height) -> Result<Vec<u8>> {
let indexer = self.indexer();
let computer = self.computer();
let reader = self.reader();
let max_height = Height::from(indexer.vecs.blocks.blockhash.len().saturating_sub(1));
@@ -20,7 +19,7 @@ impl Query {
return Err(Error::OutOfRange("Block height out of range".into()));
}
let position = computer.positions.block.collect_one(height).unwrap();
let position = indexer.vecs.blocks.position.collect_one(height).unwrap();
let size = indexer.vecs.blocks.total.collect_one(height).unwrap();
reader.read_raw_bytes(position, *size as usize)

View File

@@ -23,7 +23,7 @@ impl Query {
// === Helper methods ===
fn block_txids_by_height(&self, height: Height) -> Result<Vec<Txid>> {
pub(crate) fn block_txids_by_height(&self, height: Height) -> Result<Vec<Txid>> {
let indexer = self.indexer();
let max_height = self.indexed_height();

View File

@@ -1,5 +1,8 @@
use brk_error::{Error, Result};
use brk_types::{MempoolBlock, MempoolInfo, RecommendedFees, Txid};
use brk_types::{
CpfpEntry, CpfpInfo, MempoolBlock, MempoolInfo, MempoolRecentTx, RecommendedFees, Txid,
TxidParam, TxidPrefix, Weight,
};
use crate::Query;
@@ -40,4 +43,67 @@ impl Query {
Ok(blocks)
}
pub fn mempool_recent(&self) -> Result<Vec<MempoolRecentTx>> {
let mempool = self.mempool().ok_or(Error::MempoolNotAvailable)?;
Ok(mempool.get_txs().recent().to_vec())
}
pub fn cpfp(&self, TxidParam { txid }: TxidParam) -> Result<CpfpInfo> {
let mempool = self.mempool().ok_or(Error::MempoolNotAvailable)?;
let entries = mempool.get_entries();
let prefix = TxidPrefix::from(&txid);
let entry = entries
.get(&prefix)
.ok_or(Error::NotFound("Transaction not in mempool".into()))?;
// Ancestors: walk up the depends chain
let mut ancestors = Vec::new();
let mut stack: Vec<TxidPrefix> = entry.depends.to_vec();
while let Some(p) = stack.pop() {
if let Some(anc) = entries.get(&p) {
ancestors.push(CpfpEntry {
txid: anc.txid.clone(),
weight: Weight::from(anc.vsize),
fee: anc.fee,
});
stack.extend(anc.depends.iter().cloned());
}
}
// Descendants: find entries that depend on this tx's prefix
let mut descendants = Vec::new();
for e in entries.entries().iter().flatten() {
if e.depends.contains(&prefix) {
descendants.push(CpfpEntry {
txid: e.txid.clone(),
weight: Weight::from(e.vsize),
fee: e.fee,
});
}
}
let effective_fee_per_vsize = entry.effective_fee_rate();
Ok(CpfpInfo {
ancestors,
descendants,
effective_fee_per_vsize,
})
}
pub fn transaction_times(&self, txids: &[Txid]) -> Result<Vec<u64>> {
let mempool = self.mempool().ok_or(Error::MempoolNotAvailable)?;
let entries = mempool.get_entries();
Ok(txids
.iter()
.map(|txid| {
entries
.get(&TxidPrefix::from(txid))
.map(|e| usize::from(e.first_seen) as u64)
.unwrap_or(0)
})
.collect())
}
}

View File

@@ -1,7 +1,7 @@
use brk_error::{Error, Result};
use brk_types::{
Height, PoolBlockCounts, PoolBlockShares, PoolDetail, PoolDetailInfo, PoolInfo, PoolSlug,
PoolStats, PoolsSummary, TimePeriod, pools,
BlockInfoV1, Height, PoolBlockCounts, PoolBlockShares, PoolDetail, PoolDetailInfo,
PoolHashrateEntry, PoolInfo, PoolSlug, PoolStats, PoolsSummary, TimePeriod, pools,
};
use vecdb::{AnyVec, ReadableVec, VecIndex};
@@ -177,4 +177,132 @@ impl Query {
reported_hashrate: None,
})
}
pub fn pool_blocks(
&self,
slug: PoolSlug,
start_height: Option<Height>,
) -> Result<Vec<BlockInfoV1>> {
let computer = self.computer();
let max_height = self.height().to_usize();
let start = start_height.map(|h| h.to_usize()).unwrap_or(max_height);
// BytesVec reader gives O(1) mmap reads — efficient for backward scan
let reader = computer.pools.pool.reader();
let end = start.min(reader.len().saturating_sub(1));
let mut heights = Vec::with_capacity(10);
for h in (0..=end).rev() {
if reader.get(h) == slug {
heights.push(h);
if heights.len() >= 10 {
break;
}
}
}
let mut blocks = Vec::with_capacity(heights.len());
for h in heights {
if let Ok(mut v) = self.blocks_v1_range(h, h + 1) {
blocks.append(&mut v);
}
}
Ok(blocks)
}
pub fn pool_hashrate(&self, slug: PoolSlug) -> Result<Vec<PoolHashrateEntry>> {
let pools_list = pools();
let pool = pools_list.get(slug);
let entries = self.compute_pool_hashrate_entries(slug, 0)?;
Ok(entries
.into_iter()
.map(|(ts, hr, share)| PoolHashrateEntry {
timestamp: ts,
avg_hashrate: hr,
share,
pool_name: pool.name.to_string(),
})
.collect())
}
pub fn pools_hashrate(
&self,
time_period: Option<TimePeriod>,
) -> Result<Vec<PoolHashrateEntry>> {
let current_height = self.height().to_usize();
let start = match time_period {
Some(tp) => current_height.saturating_sub(tp.block_count()),
None => 0,
};
let pools_list = pools();
let mut entries = Vec::new();
for pool in pools_list.iter() {
if let Ok(pool_entries) = self.compute_pool_hashrate_entries(pool.slug, start) {
for (ts, hr, share) in pool_entries {
if share > 0.0 {
entries.push(PoolHashrateEntry {
timestamp: ts,
avg_hashrate: hr,
share,
pool_name: pool.name.to_string(),
});
}
}
}
}
Ok(entries)
}
/// Compute (timestamp, hashrate, share) tuples for a pool from `start_height`.
fn compute_pool_hashrate_entries(
&self,
slug: PoolSlug,
start_height: usize,
) -> Result<Vec<(brk_types::Timestamp, u128, f64)>> {
let computer = self.computer();
let indexer = self.indexer();
let end = self.height().to_usize() + 1;
let start = start_height;
let dominance_bps = computer
.pools
.major
.get(&slug)
.map(|v| &v.base.dominance.bps.height)
.or_else(|| {
computer
.pools
.minor
.get(&slug)
.map(|v| &v.dominance.bps.height)
})
.ok_or_else(|| Error::NotFound("Pool not found".into()))?;
let total = end - start;
let step = (total / 200).max(1);
// Batch read everything for the range
let timestamps = indexer.vecs.blocks.timestamp.collect_range_at(start, end);
let bps_values = dominance_bps.collect_range_at(start, end);
let day1_values = computer.indexes.height.day1.collect_range_at(start, end);
let hashrate_vec = &computer.mining.hashrate.rate.base.day1;
// Pre-read all needed hashrates by collecting unique day1 values
let max_day = day1_values.iter().map(|d| d.to_usize()).max().unwrap_or(0);
let min_day = day1_values.iter().map(|d| d.to_usize()).min().unwrap_or(0);
let hashrates = hashrate_vec.collect_range_dyn(min_day, max_day + 1);
Ok((0..total)
.step_by(step)
.filter_map(|i| {
let bps = *bps_values[i];
let share = bps as f64 / 10000.0;
let day_idx = day1_values[i].to_usize() - min_day;
let network_hr = f64::from(*hashrates.get(day_idx)?.as_ref()?);
Some((timestamps[i], (network_hr * share) as u128, share))
})
.collect())
}
}

View File

@@ -1,5 +1,6 @@
use brk_error::Result;
use brk_types::Dollars;
use brk_types::{Dollars, ExchangeRates, HistoricalPrice, HistoricalPriceEntry, Timestamp};
use vecdb::{ReadableVec, VecIndex};
use crate::Query;
@@ -18,4 +19,40 @@ impl Query {
Ok(oracle.price_dollars())
}
pub fn historical_price(&self, timestamp: Option<Timestamp>) -> Result<HistoricalPrice> {
let indexer = self.indexer();
let computer = self.computer();
let max_height = self.height().to_usize();
let end = max_height + 1;
let timestamps = indexer.vecs.blocks.timestamp.collect();
let all_prices = computer.prices.spot.cents.height.collect();
let prices = if let Some(target_ts) = timestamp {
let target = usize::from(target_ts);
let h = timestamps
.binary_search_by_key(&target, |t| usize::from(*t))
.unwrap_or_else(|i| i.min(max_height));
vec![HistoricalPriceEntry {
time: usize::from(timestamps[h]) as u64,
usd: Dollars::from(all_prices[h]),
}]
} else {
let step = (max_height / 200).max(1);
(0..end)
.step_by(step)
.map(|h| HistoricalPriceEntry {
time: usize::from(timestamps[h]) as u64,
usd: Dollars::from(all_prices[h]),
})
.collect()
};
Ok(HistoricalPrice {
prices,
exchange_rates: ExchangeRates {},
})
}
}

View File

@@ -3,8 +3,8 @@ use std::io::Cursor;
use bitcoin::{consensus::Decodable, hex::DisplayHex};
use brk_error::{Error, Result};
use brk_types::{
OutputType, Sats, Transaction, TxIn, TxInIndex, TxIndex, TxOut, TxOutspend, TxStatus, Txid,
TxidParam, TxidPrefix, Vin, Vout, Weight,
Height, MerkleProof, OutputType, Sats, Transaction, TxIn, TxInIndex, TxIndex, TxOut,
TxOutspend, TxStatus, Txid, TxidParam, TxidPrefix, Vin, Vout, Weight,
};
use vecdb::{ReadableVec, VecIndex};
@@ -72,6 +72,20 @@ impl Query {
})
}
pub fn transaction_raw(&self, TxidParam { txid }: TxidParam) -> Result<Vec<u8>> {
let prefix = TxidPrefix::from(&txid);
let indexer = self.indexer();
let Ok(Some(tx_index)) = indexer
.stores
.txid_prefix_to_tx_index
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
self.transaction_raw_by_index(tx_index)
}
pub fn transaction_hex(&self, TxidParam { txid }: TxidParam) -> Result<String> {
// First check mempool for unconfirmed transactions
if let Some(mempool) = self.mempool()
@@ -192,7 +206,6 @@ impl Query {
pub fn transaction_by_index(&self, tx_index: TxIndex) -> Result<Transaction> {
let indexer = self.indexer();
let reader = self.reader();
let computer = self.computer();
// Get tx metadata using collect_one for PcoVec, read_once for BytesVec
let txid = indexer.vecs.transactions.txid.read_once(tx_index)?;
@@ -226,7 +239,12 @@ impl Query {
.first_txin_index
.collect_one(tx_index)
.unwrap();
let position = computer.positions.tx.collect_one(tx_index).unwrap();
let position = indexer
.vecs
.transactions
.position
.collect_one(tx_index)
.unwrap();
// Get block info for status
let block_hash = indexer.vecs.blocks.blockhash.read_once(height)?;
@@ -337,22 +355,15 @@ impl Query {
Ok(transaction)
}
fn transaction_hex_by_index(&self, tx_index: TxIndex) -> Result<String> {
fn transaction_raw_by_index(&self, tx_index: TxIndex) -> Result<Vec<u8>> {
let indexer = self.indexer();
let reader = self.reader();
let computer = self.computer();
let total_size = indexer.vecs.transactions.total_size.collect_one(tx_index).unwrap();
let position = indexer.vecs.transactions.position.collect_one(tx_index).unwrap();
self.reader().read_raw_bytes(position, *total_size as usize)
}
let total_size = indexer
.vecs
.transactions
.total_size
.collect_one(tx_index)
.unwrap();
let position = computer.positions.tx.collect_one(tx_index).unwrap();
let buffer = reader.read_raw_bytes(position, *total_size as usize)?;
Ok(buffer.to_lower_hex_string())
fn transaction_hex_by_index(&self, tx_index: TxIndex) -> Result<String> {
Ok(self.transaction_raw_by_index(tx_index)?.to_lower_hex_string())
}
fn outspend_details(&self, txin_index: TxInIndex) -> Result<TxOutspend> {
@@ -407,4 +418,93 @@ impl Query {
}),
})
}
fn resolve_tx(&self, txid: &Txid) -> Result<(TxIndex, Height)> {
let indexer = self.indexer();
let prefix = TxidPrefix::from(txid);
let tx_index: TxIndex = indexer
.stores
.txid_prefix_to_tx_index
.get(&prefix)?
.map(|cow| cow.into_owned())
.ok_or(Error::UnknownTxid)?;
let height: Height = indexer
.vecs
.transactions
.height
.collect_one(tx_index)
.unwrap();
Ok((tx_index, height))
}
pub fn broadcast_transaction(&self, hex: &str) -> Result<Txid> {
self.client().send_raw_transaction(hex)
}
pub fn merkleblock_proof(&self, txid_param: TxidParam) -> Result<String> {
let (_, height) = self.resolve_tx(&txid_param.txid)?;
let header = self.read_block_header(height)?;
let txids = self.block_txids_by_height(height)?;
let target: bitcoin::Txid = (&txid_param.txid).into();
let btxids: Vec<bitcoin::Txid> = txids.iter().map(bitcoin::Txid::from).collect();
let mb = bitcoin::MerkleBlock::from_header_txids_with_predicate(&header, &btxids, |t| {
*t == target
});
Ok(bitcoin::consensus::encode::serialize_hex(&mb))
}
pub fn merkle_proof(&self, txid_param: TxidParam) -> Result<MerkleProof> {
let (tx_index, height) = self.resolve_tx(&txid_param.txid)?;
let first_tx = self
.indexer()
.vecs
.transactions
.first_tx_index
.collect_one(height)
.ok_or(Error::NotFound("Block not found".into()))?;
let pos = tx_index.to_usize() - first_tx.to_usize();
let txids = self.block_txids_by_height(height)?;
Ok(MerkleProof {
block_height: height,
merkle: merkle_path(&txids, pos),
pos,
})
}
}
fn merkle_path(txids: &[Txid], pos: usize) -> Vec<String> {
use bitcoin::hashes::{Hash, sha256d};
// Txid bytes are in internal order (same layout as bitcoin::Txid)
let mut hashes: Vec<[u8; 32]> = txids
.iter()
.map(|t| bitcoin::Txid::from(t).to_byte_array())
.collect();
let mut proof = Vec::new();
let mut idx = pos;
while hashes.len() > 1 {
let sibling = if idx ^ 1 < hashes.len() { idx ^ 1 } else { idx };
// Display order: reverse bytes for hex output
let mut display = hashes[sibling];
display.reverse();
proof.push(bitcoin::hex::DisplayHex::to_lower_hex_string(&display));
hashes = hashes
.chunks(2)
.map(|pair| {
let right = pair.last().unwrap();
let mut combined = [0u8; 64];
combined[..32].copy_from_slice(&pair[0]);
combined[32..].copy_from_slice(right);
sha256d::Hash::hash(&combined).to_byte_array()
})
.collect();
idx /= 2;
}
proof
}