server: snapshot

This commit is contained in:
nym21
2025-12-15 16:32:45 +01:00
parent 882a3525af
commit 825a4a77c0
100 changed files with 2677 additions and 3438 deletions

View File

@@ -0,0 +1,235 @@
use std::str::FromStr;
use bitcoin::{Network, PublicKey, ScriptBuf};
use brk_error::{Error, Result};
use brk_types::{
Address, AddressBytes, AddressChainStats, AddressHash, AddressIndexOutPoint,
AddressIndexTxIndex, AddressStats, AnyAddressDataIndexEnum, OutputType, Sats, TxIndex,
TxStatus, Txid, TypeIndex, Unit, Utxo, Vout,
};
use vecdb::TypedVecIterator;
use crate::Query;
/// Maximum number of mempool txids to return
const MAX_MEMPOOL_TXIDS: usize = 50;
impl Query {
pub fn address(&self, Address { address }: Address) -> Result<AddressStats> {
let indexer = self.indexer();
let computer = self.computer();
let stores = &indexer.stores;
let script = if let Ok(address) = bitcoin::Address::from_str(&address) {
if !address.is_valid_for_network(Network::Bitcoin) {
return Err(Error::InvalidNetwork);
}
let address = address.assume_checked();
address.script_pubkey()
} else if let Ok(pubkey) = PublicKey::from_str(&address) {
ScriptBuf::new_p2pk(&pubkey)
} else {
return Err(Error::InvalidAddress);
};
let outputtype = OutputType::from(&script);
let Ok(bytes) = AddressBytes::try_from((&script, outputtype)) else {
return Err(Error::Str("Failed to convert the address to bytes"));
};
let addresstype = outputtype;
let hash = AddressHash::from(&bytes);
let Ok(Some(type_index)) = stores
.addresstype_to_addresshash_to_addressindex
.get(addresstype)
.unwrap()
.get(&hash)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownAddress);
};
let any_address_index = computer
.stateful
.any_address_indexes
.get_anyaddressindex_once(outputtype, type_index)?;
let address_data = match any_address_index.to_enum() {
AnyAddressDataIndexEnum::Loaded(index) => computer
.stateful
.addresses_data
.loaded
.iter()?
.get_unwrap(index),
AnyAddressDataIndexEnum::Empty(index) => computer
.stateful
.addresses_data
.empty
.iter()?
.get_unwrap(index)
.into(),
};
Ok(AddressStats {
address: address.into(),
chain_stats: AddressChainStats {
type_index,
funded_txo_count: address_data.funded_txo_count,
funded_txo_sum: address_data.received,
spent_txo_count: address_data.spent_txo_count,
spent_txo_sum: address_data.sent,
tx_count: address_data.tx_count,
},
mempool_stats: self.mempool().map(|mempool| {
mempool
.get_addresses()
.get(&bytes)
.map(|(stats, _)| stats)
.cloned()
.unwrap_or_default()
}),
})
}
pub fn address_txids(
&self,
address: Address,
after_txid: Option<Txid>,
limit: usize,
) -> Result<Vec<Txid>> {
let indexer = self.indexer();
let stores = &indexer.stores;
let (outputtype, type_index) = self.resolve_address(&address)?;
let store = stores
.addresstype_to_addressindex_and_txindex
.get(outputtype)
.unwrap();
let prefix = u32::from(type_index).to_be_bytes();
let after_txindex = if let Some(after_txid) = after_txid {
let txindex = stores
.txidprefix_to_txindex
.get(&after_txid.into())
.map_err(|_| Error::Str("Failed to look up after_txid"))?
.ok_or(Error::Str("after_txid not found"))?
.into_owned();
Some(txindex)
} else {
None
};
let txindices: Vec<TxIndex> = store
.prefix(prefix)
.rev()
.filter(|(key, _): &(AddressIndexTxIndex, Unit)| {
if let Some(after) = after_txindex {
key.txindex() < after
} else {
true
}
})
.take(limit)
.map(|(key, _)| key.txindex())
.collect();
let mut txindex_to_txid_iter = indexer.vecs.tx.txindex_to_txid.iter()?;
let txids: Vec<Txid> = txindices
.into_iter()
.map(|txindex| txindex_to_txid_iter.get_unwrap(txindex))
.collect();
Ok(txids)
}
pub fn address_utxos(&self, address: Address) -> Result<Vec<Utxo>> {
let indexer = self.indexer();
let stores = &indexer.stores;
let vecs = &indexer.vecs;
let (outputtype, type_index) = self.resolve_address(&address)?;
let store = stores
.addresstype_to_addressindex_and_unspentoutpoint
.get(outputtype)
.unwrap();
let prefix = u32::from(type_index).to_be_bytes();
let outpoints: Vec<(TxIndex, Vout)> = store
.prefix(prefix)
.map(|(key, _): (AddressIndexOutPoint, Unit)| (key.txindex(), key.vout()))
.collect();
let mut txindex_to_txid_iter = vecs.tx.txindex_to_txid.iter()?;
let mut txindex_to_height_iter = vecs.tx.txindex_to_height.iter()?;
let mut txindex_to_first_txoutindex_iter = vecs.tx.txindex_to_first_txoutindex.iter()?;
let mut txoutindex_to_value_iter = vecs.txout.txoutindex_to_value.iter()?;
let mut height_to_blockhash_iter = vecs.block.height_to_blockhash.iter()?;
let mut height_to_timestamp_iter = vecs.block.height_to_timestamp.iter()?;
let utxos: Vec<Utxo> = outpoints
.into_iter()
.map(|(txindex, vout)| {
let txid: Txid = txindex_to_txid_iter.get_unwrap(txindex);
let height = txindex_to_height_iter.get_unwrap(txindex);
let first_txoutindex = txindex_to_first_txoutindex_iter.get_unwrap(txindex);
let txoutindex = first_txoutindex + vout;
let value: Sats = txoutindex_to_value_iter.get_unwrap(txoutindex);
let block_hash = height_to_blockhash_iter.get_unwrap(height);
let block_time = height_to_timestamp_iter.get_unwrap(height);
Utxo {
txid,
vout,
status: TxStatus {
confirmed: true,
block_height: Some(height),
block_hash: Some(block_hash),
block_time: Some(block_time),
},
value,
}
})
.collect();
Ok(utxos)
}
pub fn address_mempool_txids(&self, address: Address) -> Result<Vec<Txid>> {
let mempool = self.mempool().ok_or(Error::Str("Mempool not available"))?;
let bytes = AddressBytes::from_str(&address.address)?;
let addresses = mempool.get_addresses();
let txids: Vec<Txid> = addresses
.get(&bytes)
.map(|(_, txids)| txids.iter().take(MAX_MEMPOOL_TXIDS).cloned().collect())
.unwrap_or_default();
Ok(txids)
}
/// Resolve an address string to its output type and type_index
fn resolve_address(&self, address: &Address) -> Result<(OutputType, TypeIndex)> {
let stores = &self.indexer().stores;
let bytes = AddressBytes::from_str(&address.address)?;
let outputtype = OutputType::from(&bytes);
let hash = AddressHash::from(&bytes);
let Ok(Some(type_index)) = stores
.addresstype_to_addresshash_to_addressindex
.get(outputtype)
.unwrap()
.get(&hash)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownAddress);
};
Ok((outputtype, type_index))
}
}

View File

@@ -0,0 +1,103 @@
use brk_error::{Error, Result};
use brk_types::{BlockHash, BlockHashPrefix, BlockInfo, Height, TxIndex};
use vecdb::{AnyVec, GenericStoredVec, VecIndex};
use crate::Query;
const DEFAULT_BLOCK_COUNT: u32 = 10;
impl Query {
pub fn block(&self, hash: &str) -> Result<BlockInfo> {
let height = self.height_by_hash(hash)?;
self.block_by_height(height)
}
pub fn block_by_height(&self, height: Height) -> Result<BlockInfo> {
let indexer = self.indexer();
let max_height = self.max_height();
if height > max_height {
return Err(Error::Str("Block height out of range"));
}
let blockhash = indexer.vecs.block.height_to_blockhash.read_once(height)?;
let difficulty = indexer.vecs.block.height_to_difficulty.read_once(height)?;
let timestamp = indexer.vecs.block.height_to_timestamp.read_once(height)?;
let size = indexer.vecs.block.height_to_total_size.read_once(height)?;
let weight = indexer.vecs.block.height_to_weight.read_once(height)?;
let tx_count = self.tx_count_at_height(height, max_height)?;
Ok(BlockInfo {
id: blockhash,
height,
tx_count,
size: *size,
weight,
timestamp,
difficulty: *difficulty,
})
}
pub fn blocks(&self, start_height: Option<Height>) -> Result<Vec<BlockInfo>> {
let max_height = self.height();
let start = start_height.unwrap_or(max_height);
let start = start.min(max_height);
let start_u32: u32 = start.into();
let count = DEFAULT_BLOCK_COUNT.min(start_u32 + 1);
let mut blocks = Vec::with_capacity(count as usize);
for i in 0..count {
let height = Height::from(start_u32 - i);
blocks.push(self.block_by_height(height)?);
}
Ok(blocks)
}
// === Helper methods ===
pub fn height_by_hash(&self, hash: &str) -> Result<Height> {
let indexer = self.indexer();
let blockhash: BlockHash = hash.parse().map_err(|_| Error::Str("Invalid block hash"))?;
let prefix = BlockHashPrefix::from(&blockhash);
indexer
.stores
.blockhashprefix_to_height
.get(&prefix)?
.map(|h| *h)
.ok_or(Error::Str("Block not found"))
}
fn max_height(&self) -> Height {
Height::from(
self.indexer()
.vecs
.block
.height_to_blockhash
.len()
.saturating_sub(1),
)
}
fn tx_count_at_height(&self, height: Height, max_height: Height) -> Result<u32> {
let indexer = self.indexer();
let computer = self.computer();
let first_txindex = indexer.vecs.tx.height_to_first_txindex.read_once(height)?;
let next_first_txindex = if height < max_height {
indexer
.vecs
.tx
.height_to_first_txindex
.read_once(height.incremented())?
} else {
TxIndex::from(computer.indexes.txindex_to_txindex.len())
};
Ok((next_first_txindex.to_usize() - first_txindex.to_usize()) as u32)
}
}

View File

@@ -0,0 +1,7 @@
mod info;
mod raw;
mod status;
mod timestamp;
mod txs;
pub const BLOCK_TXS_PAGE_SIZE: usize = 25;

View File

@@ -0,0 +1,35 @@
use brk_error::{Error, Result};
use brk_types::Height;
use vecdb::{AnyVec, GenericStoredVec};
use crate::Query;
impl Query {
pub fn block_raw(&self, hash: &str) -> Result<Vec<u8>> {
let height = self.height_by_hash(hash)?;
self.block_raw_by_height(height)
}
fn block_raw_by_height(&self, height: Height) -> Result<Vec<u8>> {
let indexer = self.indexer();
let computer = self.computer();
let reader = self.reader();
let max_height = Height::from(
indexer
.vecs
.block
.height_to_blockhash
.len()
.saturating_sub(1),
);
if height > max_height {
return Err(Error::Str("Block height out of range"));
}
let position = computer.blks.height_to_position.read_once(height)?;
let size = indexer.vecs.block.height_to_total_size.read_once(height)?;
reader.read_raw_bytes(position, *size as usize)
}
}

View File

@@ -0,0 +1,43 @@
use brk_error::Result;
use brk_types::{BlockStatus, Height};
use vecdb::{AnyVec, GenericStoredVec};
use crate::Query;
impl Query {
pub fn block_status(&self, hash: &str) -> Result<BlockStatus> {
let height = self.height_by_hash(hash)?;
self.block_status_by_height(height)
}
fn block_status_by_height(&self, height: Height) -> Result<BlockStatus> {
let indexer = self.indexer();
let max_height = Height::from(
indexer
.vecs
.block
.height_to_blockhash
.len()
.saturating_sub(1),
);
if height > max_height {
return Ok(BlockStatus::not_in_best_chain());
}
let next_best = if height < max_height {
Some(
indexer
.vecs
.block
.height_to_blockhash
.read_once(height.incremented())?,
)
} else {
None
};
Ok(BlockStatus::in_best_chain(height, next_best))
}
}

View File

@@ -0,0 +1,81 @@
use brk_error::{Error, Result};
use brk_types::{BlockTimestamp, Date, DateIndex, Height, Timestamp};
use jiff::Timestamp as JiffTimestamp;
use vecdb::{GenericStoredVec, TypedVecIterator};
use crate::Query;
impl Query {
pub fn block_by_timestamp(&self, timestamp: Timestamp) -> Result<BlockTimestamp> {
let indexer = self.indexer();
let computer = self.computer();
let max_height = self.height();
let max_height_usize: usize = max_height.into();
if max_height_usize == 0 {
return Err(Error::Str("No blocks indexed"));
}
let target = timestamp;
let date = Date::from(target);
let dateindex = DateIndex::try_from(date).unwrap_or_default();
// Get first height of the target date
let first_height_of_day = computer
.indexes
.dateindex_to_first_height
.read_once(dateindex)
.unwrap_or(Height::from(0usize));
let start: usize = usize::from(first_height_of_day).min(max_height_usize);
// Use iterator for efficient sequential access
let mut timestamp_iter = indexer.vecs.block.height_to_timestamp.iter()?;
// Search forward from start to find the last block <= target timestamp
let mut best_height = start;
let mut best_ts = timestamp_iter.get_unwrap(Height::from(start));
for h in (start + 1)..=max_height_usize {
let height = Height::from(h);
let block_ts = timestamp_iter.get_unwrap(height);
if block_ts <= target {
best_height = h;
best_ts = block_ts;
} else {
break;
}
}
// Check one block before start in case we need to go backward
if start > 0 && best_ts > target {
let prev_height = Height::from(start - 1);
let prev_ts = timestamp_iter.get_unwrap(prev_height);
if prev_ts <= target {
best_height = start - 1;
best_ts = prev_ts;
}
}
let height = Height::from(best_height);
let blockhash = indexer
.vecs
.block
.height_to_blockhash
.iter()?
.get_unwrap(height);
// Convert timestamp to ISO 8601 format
let ts_secs: i64 = (*best_ts).into();
let iso_timestamp = JiffTimestamp::from_second(ts_secs)
.map(|t| t.to_string())
.unwrap_or_else(|_| best_ts.to_string());
Ok(BlockTimestamp {
height,
hash: blockhash,
timestamp: iso_timestamp,
})
}
}

View File

@@ -0,0 +1,128 @@
use brk_error::{Error, Result};
use brk_types::{Height, Transaction, TxIndex, Txid};
use vecdb::{AnyVec, GenericStoredVec, TypedVecIterator};
use super::BLOCK_TXS_PAGE_SIZE;
use crate::Query;
impl Query {
pub fn block_txids(&self, hash: &str) -> Result<Vec<Txid>> {
let height = self.height_by_hash(hash)?;
self.block_txids_by_height(height)
}
pub fn block_txs(&self, hash: &str, start_index: usize) -> Result<Vec<Transaction>> {
let height = self.height_by_hash(hash)?;
self.block_txs_by_height(height, start_index)
}
pub fn block_txid_at_index(&self, hash: &str, index: usize) -> Result<Txid> {
let height = self.height_by_hash(hash)?;
self.block_txid_at_index_by_height(height, index)
}
// === Helper methods ===
fn block_txids_by_height(&self, height: Height) -> Result<Vec<Txid>> {
let indexer = self.indexer();
let max_height = self.height();
if height > max_height {
return Err(Error::Str("Block height out of range"));
}
let first_txindex = indexer.vecs.tx.height_to_first_txindex.read_once(height)?;
let next_first_txindex = indexer
.vecs
.tx
.height_to_first_txindex
.read_once(height.incremented())
.unwrap_or_else(|_| TxIndex::from(indexer.vecs.tx.txindex_to_txid.len()));
let first: usize = first_txindex.into();
let next: usize = next_first_txindex.into();
let count = next - first;
let txids: Vec<Txid> = indexer
.vecs
.tx
.txindex_to_txid
.iter()?
.skip(first)
.take(count)
.collect();
Ok(txids)
}
fn block_txs_by_height(
&self,
height: Height,
start_index: usize,
) -> Result<Vec<Transaction>> {
let indexer = self.indexer();
let max_height = self.height();
if height > max_height {
return Err(Error::Str("Block height out of range"));
}
let first_txindex = indexer.vecs.tx.height_to_first_txindex.read_once(height)?;
let next_first_txindex = indexer
.vecs
.tx
.height_to_first_txindex
.read_once(height.incremented())
.unwrap_or_else(|_| TxIndex::from(indexer.vecs.tx.txindex_to_txid.len()));
let first: usize = first_txindex.into();
let next: usize = next_first_txindex.into();
let tx_count = next - first;
if start_index >= tx_count {
return Ok(Vec::new());
}
let end_index = (start_index + BLOCK_TXS_PAGE_SIZE).min(tx_count);
let count = end_index - start_index;
let mut txs = Vec::with_capacity(count);
for i in start_index..end_index {
let txindex = TxIndex::from(first + i);
let tx = self.transaction_by_index(txindex)?;
txs.push(tx);
}
Ok(txs)
}
fn block_txid_at_index_by_height(&self, height: Height, index: usize) -> Result<Txid> {
let indexer = self.indexer();
let max_height = self.height();
if height > max_height {
return Err(Error::Str("Block height out of range"));
}
let first_txindex = indexer.vecs.tx.height_to_first_txindex.read_once(height)?;
let next_first_txindex = indexer
.vecs
.tx
.height_to_first_txindex
.read_once(height.incremented())
.unwrap_or_else(|_| TxIndex::from(indexer.vecs.tx.txindex_to_txid.len()));
let first: usize = first_txindex.into();
let next: usize = next_first_txindex.into();
let tx_count = next - first;
if index >= tx_count {
return Err(Error::Str("Transaction index out of range"));
}
let txindex = TxIndex::from(first + index);
let txid = indexer.vecs.tx.txindex_to_txid.iter()?.get_unwrap(txindex);
Ok(txid)
}
}

View File

@@ -0,0 +1,38 @@
use brk_error::{Error, Result};
use brk_types::{MempoolBlock, MempoolInfo, RecommendedFees, Txid};
use crate::Query;
impl Query {
pub fn mempool_info(&self) -> Result<MempoolInfo> {
let mempool = self.mempool().ok_or(Error::Str("Mempool not available"))?;
Ok(mempool.get_info())
}
pub fn mempool_txids(&self) -> Result<Vec<Txid>> {
let mempool = self.mempool().ok_or(Error::Str("Mempool not available"))?;
let txs = mempool.get_txs();
Ok(txs.keys().cloned().collect())
}
pub fn recommended_fees(&self) -> Result<RecommendedFees> {
self.mempool()
.map(|mempool| mempool.get_fees())
.ok_or(Error::MempoolNotAvailable)
}
pub fn mempool_blocks(&self) -> Result<Vec<MempoolBlock>> {
let mempool = self.mempool().ok_or(Error::Str("Mempool not available"))?;
let block_stats = mempool.get_block_stats();
let blocks = block_stats
.into_iter()
.map(|stats| {
MempoolBlock::new(stats.tx_count, stats.total_vsize, stats.total_fee, stats.fee_range)
})
.collect();
Ok(blocks)
}
}

View File

@@ -0,0 +1,43 @@
use brk_error::Result;
use brk_types::{BlockFeeRatesEntry, FeeRatePercentiles, TimePeriod};
use vecdb::{IterableVec, VecIndex};
use super::dateindex_iter::DateIndexIter;
use crate::Query;
impl Query {
pub fn block_fee_rates(&self, time_period: TimePeriod) -> Result<Vec<BlockFeeRatesEntry>> {
let computer = self.computer();
let current_height = self.height();
let start = current_height
.to_usize()
.saturating_sub(time_period.block_count());
let iter = DateIndexIter::new(computer, start, current_height.to_usize());
let vecs = &computer.chain.indexes_to_fee_rate.dateindex;
let mut min = vecs.unwrap_min().iter();
let mut pct10 = vecs.unwrap_pct10().iter();
let mut pct25 = vecs.unwrap_pct25().iter();
let mut median = vecs.unwrap_median().iter();
let mut pct75 = vecs.unwrap_pct75().iter();
let mut pct90 = vecs.unwrap_pct90().iter();
let mut max = vecs.unwrap_max().iter();
Ok(iter.collect(|di, ts, h| {
Some(BlockFeeRatesEntry {
avg_height: h,
timestamp: ts,
percentiles: FeeRatePercentiles::new(
min.get(di).unwrap_or_default(),
pct10.get(di).unwrap_or_default(),
pct25.get(di).unwrap_or_default(),
median.get(di).unwrap_or_default(),
pct75.get(di).unwrap_or_default(),
pct90.get(di).unwrap_or_default(),
max.get(di).unwrap_or_default(),
),
})
}))
}
}

View File

@@ -0,0 +1,34 @@
use brk_error::Result;
use brk_types::{BlockFeesEntry, TimePeriod};
use vecdb::{IterableVec, VecIndex};
use super::dateindex_iter::DateIndexIter;
use crate::Query;
impl Query {
pub fn block_fees(&self, time_period: TimePeriod) -> Result<Vec<BlockFeesEntry>> {
let computer = self.computer();
let current_height = self.height();
let start = current_height
.to_usize()
.saturating_sub(time_period.block_count());
let iter = DateIndexIter::new(computer, start, current_height.to_usize());
let mut fees = computer
.chain
.indexes_to_fee
.sats
.dateindex
.unwrap_average()
.iter();
Ok(iter.collect(|di, ts, h| {
fees.get(di).map(|fee| BlockFeesEntry {
avg_height: h,
timestamp: ts,
avg_fees: fee,
})
}))
}
}

View File

@@ -0,0 +1,35 @@
use brk_error::Result;
use brk_types::{BlockRewardsEntry, TimePeriod};
use vecdb::{IterableVec, VecIndex};
use super::dateindex_iter::DateIndexIter;
use crate::Query;
impl Query {
pub fn block_rewards(&self, time_period: TimePeriod) -> Result<Vec<BlockRewardsEntry>> {
let computer = self.computer();
let current_height = self.height();
let start = current_height
.to_usize()
.saturating_sub(time_period.block_count());
let iter = DateIndexIter::new(computer, start, current_height.to_usize());
// coinbase = subsidy + fees
let mut rewards = computer
.chain
.indexes_to_coinbase
.sats
.dateindex
.unwrap_average()
.iter();
Ok(iter.collect(|di, ts, h| {
rewards.get(di).map(|reward| BlockRewardsEntry {
avg_height: h.into(),
timestamp: *ts,
avg_rewards: *reward,
})
}))
}
}

View File

@@ -0,0 +1,61 @@
use brk_error::Result;
use brk_types::{BlockSizeEntry, BlockSizesWeights, BlockWeightEntry, TimePeriod};
use vecdb::{IterableVec, VecIndex};
use super::dateindex_iter::DateIndexIter;
use crate::Query;
impl Query {
pub fn block_sizes_weights(&self, time_period: TimePeriod) -> Result<BlockSizesWeights> {
let computer = self.computer();
let current_height = self.height();
let start = current_height
.to_usize()
.saturating_sub(time_period.block_count());
let iter = DateIndexIter::new(computer, start, current_height.to_usize());
let mut sizes_vec = computer
.chain
.indexes_to_block_size
.dateindex
.unwrap_average()
.iter();
let mut weights_vec = computer
.chain
.indexes_to_block_weight
.dateindex
.unwrap_average()
.iter();
let entries: Vec<_> = iter.collect(|di, ts, h| {
let size = sizes_vec.get(di).map(|s| *s);
let weight = weights_vec.get(di).map(|w| *w);
Some((h.into(), (*ts), size, weight))
});
let sizes = entries
.iter()
.filter_map(|(h, ts, size, _)| {
size.map(|s| BlockSizeEntry {
avg_height: *h,
timestamp: *ts,
avg_size: s,
})
})
.collect();
let weights = entries
.iter()
.filter_map(|(h, ts, _, weight)| {
weight.map(|w| BlockWeightEntry {
avg_height: *h,
timestamp: *ts,
avg_weight: w,
})
})
.collect();
Ok(BlockSizesWeights { sizes, weights })
}
}

View File

@@ -0,0 +1,71 @@
use brk_computer::Computer;
use brk_types::{DateIndex, Height, Timestamp};
use vecdb::{GenericStoredVec, IterableVec, VecIndex};
/// Helper for iterating over dateindex ranges with sampling.
pub struct DateIndexIter<'a> {
computer: &'a Computer,
start_di: DateIndex,
end_di: DateIndex,
step: usize,
}
impl<'a> DateIndexIter<'a> {
pub fn new(computer: &'a Computer, start_height: usize, end_height: usize) -> Self {
let start_di = computer
.indexes
.height_to_dateindex
.read_once(Height::from(start_height))
.unwrap_or_default();
let end_di = computer
.indexes
.height_to_dateindex
.read_once(Height::from(end_height))
.unwrap_or_default();
let total = end_di.to_usize().saturating_sub(start_di.to_usize()) + 1;
let step = (total / 200).max(1);
Self {
computer,
start_di,
end_di,
step,
}
}
/// Iterate and collect entries using the provided transform function.
pub fn collect<T, F>(&self, mut transform: F) -> Vec<T>
where
F: FnMut(DateIndex, Timestamp, Height) -> Option<T>,
{
let total = self
.end_di
.to_usize()
.saturating_sub(self.start_di.to_usize())
+ 1;
let mut timestamps = self
.computer
.chain
.timeindexes_to_timestamp
.dateindex_extra
.unwrap_first()
.iter();
let mut heights = self.computer.indexes.dateindex_to_first_height.iter();
let mut entries = Vec::with_capacity(total / self.step + 1);
let mut i = self.start_di.to_usize();
while i <= self.end_di.to_usize() {
let di = DateIndex::from(i);
if let (Some(ts), Some(h)) = (timestamps.get(di), heights.get(di))
&& let Some(entry) = transform(di, ts, h)
{
entries.push(entry);
}
i += self.step;
}
entries
}
}

View File

@@ -0,0 +1,121 @@
use std::time::{SystemTime, UNIX_EPOCH};
use brk_error::Result;
use brk_types::{DifficultyAdjustment, DifficultyEpoch, Height};
use vecdb::GenericStoredVec;
use crate::Query;
/// Blocks per difficulty epoch (2 weeks target)
const BLOCKS_PER_EPOCH: u32 = 2016;
/// Target block time in seconds (10 minutes)
const TARGET_BLOCK_TIME: u64 = 600;
impl Query {
pub fn difficulty_adjustment(&self) -> Result<DifficultyAdjustment> {
let indexer = self.indexer();
let computer = self.computer();
let current_height = self.height();
let current_height_u32: u32 = current_height.into();
// Get current epoch
let current_epoch = computer
.indexes
.height_to_difficultyepoch
.read_once(current_height)?;
let current_epoch_usize: usize = current_epoch.into();
// Get epoch start height
let epoch_start_height = computer
.indexes
.difficultyepoch_to_first_height
.read_once(current_epoch)?;
let epoch_start_u32: u32 = epoch_start_height.into();
// Calculate epoch progress
let next_retarget_height = epoch_start_u32 + BLOCKS_PER_EPOCH;
let blocks_into_epoch = current_height_u32 - epoch_start_u32;
let remaining_blocks = next_retarget_height - current_height_u32;
let progress_percent = (blocks_into_epoch as f64 / BLOCKS_PER_EPOCH as f64) * 100.0;
// Get timestamps using difficultyepoch_to_timestamp for epoch start
let epoch_start_timestamp = computer
.chain
.difficultyepoch_to_timestamp
.read_once(current_epoch)?;
let current_timestamp = indexer
.vecs
.block
.height_to_timestamp
.read_once(current_height)?;
// Calculate average block time in current epoch
let elapsed_time = (*current_timestamp - *epoch_start_timestamp) as u64;
let time_avg = if blocks_into_epoch > 0 {
elapsed_time / blocks_into_epoch as u64
} else {
TARGET_BLOCK_TIME
};
// Estimate remaining time and retarget date
let remaining_time = remaining_blocks as u64 * time_avg;
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|d| d.as_secs())
.unwrap_or(*current_timestamp as u64);
let estimated_retarget_date = now + remaining_time;
// Calculate expected vs actual time for difficulty change estimate
let expected_time = blocks_into_epoch as u64 * TARGET_BLOCK_TIME;
let difficulty_change = if elapsed_time > 0 && blocks_into_epoch > 0 {
((expected_time as f64 / elapsed_time as f64) - 1.0) * 100.0
} else {
0.0
};
// Time offset from expected schedule
let time_offset = expected_time as i64 - elapsed_time as i64;
// Calculate previous retarget using stored difficulty values
let previous_retarget = if current_epoch_usize > 0 {
let prev_epoch = DifficultyEpoch::from(current_epoch_usize - 1);
let prev_epoch_start = computer
.indexes
.difficultyepoch_to_first_height
.read_once(prev_epoch)?;
let prev_difficulty = indexer
.vecs
.block
.height_to_difficulty
.read_once(prev_epoch_start)?;
let curr_difficulty = indexer
.vecs
.block
.height_to_difficulty
.read_once(epoch_start_height)?;
if *prev_difficulty > 0.0 {
((*curr_difficulty / *prev_difficulty) - 1.0) * 100.0
} else {
0.0
}
} else {
0.0
};
Ok(DifficultyAdjustment {
progress_percent,
difficulty_change,
estimated_retarget_date,
remaining_blocks,
remaining_time,
previous_retarget,
next_retarget_height: Height::from(next_retarget_height),
time_avg,
adjusted_time_avg: time_avg,
time_offset,
})
}
}

View File

@@ -0,0 +1,26 @@
use brk_error::Result;
use brk_types::{DifficultyAdjustmentEntry, TimePeriod};
use vecdb::VecIndex;
use super::epochs::iter_difficulty_epochs;
use crate::Query;
impl Query {
pub fn difficulty_adjustments(
&self,
time_period: Option<TimePeriod>,
) -> Result<Vec<DifficultyAdjustmentEntry>> {
let current_height = self.height();
let end = current_height.to_usize();
let start = match time_period {
Some(tp) => end.saturating_sub(tp.block_count()),
None => 0,
};
let mut entries = iter_difficulty_epochs(self.computer(), start, end);
// Return in reverse chronological order (newest first)
entries.reverse();
Ok(entries)
}
}

View File

@@ -0,0 +1,63 @@
use brk_computer::Computer;
use brk_types::{DifficultyAdjustmentEntry, DifficultyEpoch, Height};
use vecdb::{GenericStoredVec, IterableVec, VecIndex};
/// Iterate over difficulty epochs within a height range.
pub fn iter_difficulty_epochs(
computer: &Computer,
start_height: usize,
end_height: usize,
) -> Vec<DifficultyAdjustmentEntry> {
let start_epoch = computer
.indexes
.height_to_difficultyepoch
.read_once(Height::from(start_height))
.unwrap_or_default();
let end_epoch = computer
.indexes
.height_to_difficultyepoch
.read_once(Height::from(end_height))
.unwrap_or_default();
let mut epoch_to_height_iter = computer.indexes.difficultyepoch_to_first_height.iter();
let mut epoch_to_timestamp_iter = computer.chain.difficultyepoch_to_timestamp.iter();
let mut epoch_to_difficulty_iter = computer
.chain
.indexes_to_difficulty
.difficultyepoch
.unwrap_last()
.iter();
let mut results = Vec::with_capacity(end_epoch.to_usize() - start_epoch.to_usize() + 1);
let mut prev_difficulty: Option<f64> = None;
for epoch_usize in start_epoch.to_usize()..=end_epoch.to_usize() {
let epoch = DifficultyEpoch::from(epoch_usize);
let epoch_height = epoch_to_height_iter.get(epoch).unwrap_or_default();
// Skip epochs before our start height but track difficulty
if epoch_height.to_usize() < start_height {
prev_difficulty = epoch_to_difficulty_iter.get(epoch).map(|d| *d);
continue;
}
let epoch_timestamp = epoch_to_timestamp_iter.get(epoch).unwrap_or_default();
let epoch_difficulty = *epoch_to_difficulty_iter.get(epoch).unwrap_or_default();
let change_percent = match prev_difficulty {
Some(prev) if prev > 0.0 => ((epoch_difficulty / prev) - 1.0) * 100.0,
_ => 0.0,
};
results.push(DifficultyAdjustmentEntry {
timestamp: epoch_timestamp,
height: epoch_height,
difficulty: epoch_difficulty,
change_percent,
});
prev_difficulty = Some(epoch_difficulty);
}
results
}

View File

@@ -0,0 +1,100 @@
use brk_error::Result;
use brk_types::{DateIndex, DifficultyEntry, HashrateEntry, HashrateSummary, Height, TimePeriod};
use vecdb::{GenericStoredVec, IterableVec, VecIndex};
use super::epochs::iter_difficulty_epochs;
use crate::Query;
impl Query {
pub fn hashrate(&self, time_period: Option<TimePeriod>) -> Result<HashrateSummary> {
let indexer = self.indexer();
let computer = self.computer();
let current_height = self.height();
// Get current difficulty
let current_difficulty = *indexer
.vecs
.block
.height_to_difficulty
.read_once(current_height)?;
// Get current hashrate
let current_dateindex = computer
.indexes
.height_to_dateindex
.read_once(current_height)?;
let current_hashrate = *computer
.chain
.indexes_to_hash_rate
.dateindex
.unwrap_last()
.read_once(current_dateindex)? as u128;
// Calculate start height based on time period
let end = current_height.to_usize();
let start = match time_period {
Some(tp) => end.saturating_sub(tp.block_count()),
None => 0,
};
// Get hashrate entries using iterators for efficiency
let start_dateindex = computer
.indexes
.height_to_dateindex
.read_once(Height::from(start))?;
let end_dateindex = current_dateindex;
// Sample at regular intervals to avoid too many data points
let total_days = end_dateindex
.to_usize()
.saturating_sub(start_dateindex.to_usize())
+ 1;
let step = (total_days / 200).max(1); // Max ~200 data points
// Create iterators for the loop
let mut hashrate_iter = computer
.chain
.indexes_to_hash_rate
.dateindex
.unwrap_last()
.iter();
let mut timestamp_iter = computer
.chain
.timeindexes_to_timestamp
.dateindex_extra
.unwrap_first()
.iter();
let mut hashrates = Vec::with_capacity(total_days / step + 1);
let mut di = start_dateindex.to_usize();
while di <= end_dateindex.to_usize() {
let dateindex = DateIndex::from(di);
if let (Some(hr), Some(timestamp)) =
(hashrate_iter.get(dateindex), timestamp_iter.get(dateindex))
{
hashrates.push(HashrateEntry {
timestamp,
avg_hashrate: (*hr) as u128,
});
}
di += step;
}
// Get difficulty adjustments within the period
let difficulty: Vec<DifficultyEntry> = iter_difficulty_epochs(computer, start, end)
.into_iter()
.map(|e| DifficultyEntry {
timestamp: e.timestamp,
difficulty: e.difficulty,
height: e.height,
})
.collect();
Ok(HashrateSummary {
hashrates,
difficulty,
current_hashrate,
current_difficulty,
})
}
}

View File

@@ -0,0 +1,11 @@
mod block_fee_rates;
mod block_fees;
mod block_rewards;
mod block_sizes;
mod dateindex_iter;
mod difficulty;
mod difficulty_adjustments;
mod epochs;
mod hashrate;
mod pools;
mod reward_stats;

View File

@@ -0,0 +1,171 @@
use brk_error::{Error, Result};
use brk_types::{
Height, PoolBlockCounts, PoolBlockShares, PoolDetail, PoolDetailInfo, PoolInfo, PoolSlug,
PoolStats, PoolsSummary, TimePeriod, pools,
};
use vecdb::{AnyVec, IterableVec, VecIndex};
use crate::Query;
impl Query {
pub fn mining_pools(&self, time_period: TimePeriod) -> Result<PoolsSummary> {
let computer = self.computer();
let current_height = self.height();
let end = current_height.to_usize();
// No blocks indexed yet
if computer.pools.height_to_pool.len() == 0 {
return Ok(PoolsSummary {
pools: vec![],
block_count: 0,
last_estimated_hashrate: 0,
});
}
// Calculate start height based on time period
let start = end.saturating_sub(time_period.block_count());
let pools = pools();
let mut pool_data: Vec<(&'static brk_types::Pool, u32)> = Vec::new();
// For each pool, get cumulative count at end and start, subtract to get range count
for (pool_id, pool_vecs) in &computer.pools.vecs {
let mut cumulative = pool_vecs
.indexes_to_blocks_mined
.height_extra
.unwrap_cumulative()
.iter();
let count_at_end: u32 = *cumulative.get(current_height).unwrap_or_default();
let count_at_start: u32 = if start == 0 {
0
} else {
*cumulative.get(Height::from(start - 1)).unwrap_or_default()
};
let block_count = count_at_end.saturating_sub(count_at_start);
// Only include pools that mined at least one block in the period
if block_count > 0 {
pool_data.push((pools.get(*pool_id), block_count));
}
}
// Sort by block count descending
pool_data.sort_by(|a, b| b.1.cmp(&a.1));
let total_blocks: u32 = pool_data.iter().map(|(_, count)| count).sum();
// Build stats with ranks
let pool_stats: Vec<PoolStats> = pool_data
.into_iter()
.enumerate()
.map(|(idx, (pool, block_count))| {
let share = if total_blocks > 0 {
block_count as f64 / total_blocks as f64
} else {
0.0
};
PoolStats::new(pool, block_count, (idx + 1) as u32, share)
})
.collect();
// TODO: Calculate actual hashrate from difficulty
let last_estimated_hashrate = 0u128;
Ok(PoolsSummary {
pools: pool_stats,
block_count: total_blocks,
last_estimated_hashrate,
})
}
pub fn all_pools(&self) -> Vec<PoolInfo> {
pools().iter().map(PoolInfo::from).collect()
}
pub fn pool_detail(&self, slug: PoolSlug) -> Result<PoolDetail> {
let computer = self.computer();
let current_height = self.height();
let end = current_height.to_usize();
let pools_list = pools();
let pool = pools_list.get(slug);
// Get pool vecs for this specific pool
let pool_vecs = computer
.pools
.vecs
.get(&slug)
.ok_or_else(|| Error::Str("Pool data not found"))?;
let mut cumulative = pool_vecs
.indexes_to_blocks_mined
.height_extra
.unwrap_cumulative()
.iter();
// Get total blocks (all time)
let total_all: u32 = *cumulative.get(current_height).unwrap_or_default();
// Get blocks for 24h (144 blocks)
let start_24h = end.saturating_sub(144);
let count_before_24h: u32 = if start_24h == 0 {
0
} else {
*cumulative
.get(Height::from(start_24h - 1))
.unwrap_or_default()
};
let total_24h = total_all.saturating_sub(count_before_24h);
// Get blocks for 1w (1008 blocks)
let start_1w = end.saturating_sub(1008);
let count_before_1w: u32 = if start_1w == 0 {
0
} else {
*cumulative
.get(Height::from(start_1w - 1))
.unwrap_or_default()
};
let total_1w = total_all.saturating_sub(count_before_1w);
// Calculate total network blocks for share calculation
let network_blocks_all = (end + 1) as u32;
let network_blocks_24h = (end - start_24h + 1) as u32;
let network_blocks_1w = (end - start_1w + 1) as u32;
let share_all = if network_blocks_all > 0 {
total_all as f64 / network_blocks_all as f64
} else {
0.0
};
let share_24h = if network_blocks_24h > 0 {
total_24h as f64 / network_blocks_24h as f64
} else {
0.0
};
let share_1w = if network_blocks_1w > 0 {
total_1w as f64 / network_blocks_1w as f64
} else {
0.0
};
Ok(PoolDetail {
pool: PoolDetailInfo::from(pool),
block_count: PoolBlockCounts {
all: total_all,
day: total_24h,
week: total_1w,
},
block_share: PoolBlockShares {
all: share_all,
day: share_24h,
week: share_1w,
},
estimated_hashrate: 0, // TODO: Calculate from share and network hashrate
reported_hashrate: None,
})
}
}

View File

@@ -0,0 +1,66 @@
use brk_error::Result;
use brk_types::{Height, RewardStats, Sats};
use vecdb::{IterableVec, VecIndex};
use crate::Query;
impl Query {
pub fn reward_stats(&self, block_count: usize) -> Result<RewardStats> {
let computer = self.computer();
let current_height = self.height();
let end_block = current_height;
let start_block = Height::from(current_height.to_usize().saturating_sub(block_count - 1));
let mut coinbase_iter = computer
.chain
.indexes_to_coinbase
.sats
.height
.as_ref()
.unwrap()
.iter();
let mut fee_iter = computer
.chain
.indexes_to_fee
.sats
.height
.unwrap_sum()
.iter();
let mut tx_count_iter = computer
.chain
.indexes_to_tx_count
.height
.as_ref()
.unwrap()
.iter();
let mut total_reward = Sats::ZERO;
let mut total_fee = Sats::ZERO;
let mut total_tx: u64 = 0;
for height in start_block.to_usize()..=end_block.to_usize() {
let h = Height::from(height);
if let Some(coinbase) = coinbase_iter.get(h) {
total_reward += coinbase;
}
if let Some(fee) = fee_iter.get(h) {
total_fee += fee;
}
if let Some(tx_count) = tx_count_iter.get(h) {
total_tx += *tx_count;
}
}
Ok(RewardStats {
start_block,
end_block,
total_reward,
total_fee,
total_tx,
})
}
}

View File

@@ -0,0 +1,11 @@
//! Query implementation modules.
//!
//! Each module extends `Query` with domain-specific methods using `impl Query` blocks.
mod address;
mod block;
mod mempool;
mod mining;
mod transaction;
pub use block::BLOCK_TXS_PAGE_SIZE;

View File

@@ -0,0 +1,405 @@
use std::{io::Cursor, str::FromStr};
use bitcoin::{consensus::Decodable, hex::DisplayHex};
use brk_error::{Error, Result};
use brk_types::{
Sats, Transaction, TxIn, TxInIndex, TxIndex, TxOut, TxOutspend, TxStatus, Txid, TxidPath,
TxidPrefix, Vin, Vout, Weight,
};
use vecdb::{GenericStoredVec, TypedVecIterator};
use crate::Query;
impl Query {
pub fn transaction(&self, TxidPath { txid }: TxidPath) -> Result<Transaction> {
let Ok(txid) = bitcoin::Txid::from_str(&txid) else {
return Err(Error::InvalidTxid);
};
let txid = Txid::from(txid);
// First check mempool for unconfirmed transactions
if let Some(mempool) = self.mempool()
&& let Some(tx_with_hex) = mempool.get_txs().get(&txid)
{
return Ok(tx_with_hex.tx().clone());
}
// Look up confirmed transaction by txid prefix
let prefix = TxidPrefix::from(&txid);
let indexer = self.indexer();
let Ok(Some(txindex)) = indexer
.stores
.txidprefix_to_txindex
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
self.transaction_by_index(txindex)
}
pub fn transaction_status(&self, TxidPath { txid }: TxidPath) -> Result<TxStatus> {
let Ok(txid) = bitcoin::Txid::from_str(&txid) else {
return Err(Error::InvalidTxid);
};
let txid = Txid::from(txid);
// First check mempool for unconfirmed transactions
if let Some(mempool) = self.mempool()
&& mempool.get_txs().contains_key(&txid)
{
return Ok(TxStatus::UNCONFIRMED);
}
// Look up confirmed transaction by txid prefix
let prefix = TxidPrefix::from(&txid);
let indexer = self.indexer();
let Ok(Some(txindex)) = indexer
.stores
.txidprefix_to_txindex
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
// Get block info for status
let height = indexer.vecs.tx.txindex_to_height.read_once(txindex)?;
let block_hash = indexer.vecs.block.height_to_blockhash.read_once(height)?;
let block_time = indexer.vecs.block.height_to_timestamp.read_once(height)?;
Ok(TxStatus {
confirmed: true,
block_height: Some(height),
block_hash: Some(block_hash),
block_time: Some(block_time),
})
}
pub fn transaction_hex(&self, TxidPath { txid }: TxidPath) -> Result<String> {
let Ok(txid) = bitcoin::Txid::from_str(&txid) else {
return Err(Error::InvalidTxid);
};
let txid = Txid::from(txid);
// First check mempool for unconfirmed transactions
if let Some(mempool) = self.mempool()
&& let Some(tx_with_hex) = mempool.get_txs().get(&txid)
{
return Ok(tx_with_hex.hex().to_string());
}
// Look up confirmed transaction by txid prefix
let prefix = TxidPrefix::from(&txid);
let indexer = self.indexer();
let Ok(Some(txindex)) = indexer
.stores
.txidprefix_to_txindex
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
self.transaction_hex_by_index(txindex)
}
pub fn outspend(&self, TxidPath { txid }: TxidPath, vout: Vout) -> Result<TxOutspend> {
let Ok(txid) = bitcoin::Txid::from_str(&txid) else {
return Err(Error::InvalidTxid);
};
let txid = Txid::from(txid);
// Mempool outputs are unspent in on-chain terms
if let Some(mempool) = self.mempool()
&& mempool.get_txs().contains_key(&txid)
{
return Ok(TxOutspend::UNSPENT);
}
// Look up confirmed transaction
let prefix = TxidPrefix::from(&txid);
let indexer = self.indexer();
let Ok(Some(txindex)) = indexer
.stores
.txidprefix_to_txindex
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
// Calculate txoutindex
let first_txoutindex = indexer
.vecs
.tx
.txindex_to_first_txoutindex
.read_once(txindex)?;
let txoutindex = first_txoutindex + vout;
// Look up spend status
let computer = self.computer();
let txinindex = computer
.stateful
.txoutindex_to_txinindex
.read_once(txoutindex)?;
if txinindex == TxInIndex::UNSPENT {
return Ok(TxOutspend::UNSPENT);
}
self.outspend_details(txinindex)
}
pub fn outspends(&self, TxidPath { txid }: TxidPath) -> Result<Vec<TxOutspend>> {
let Ok(txid) = bitcoin::Txid::from_str(&txid) else {
return Err(Error::InvalidTxid);
};
let txid = Txid::from(txid);
// Mempool outputs are unspent in on-chain terms
if let Some(mempool) = self.mempool()
&& let Some(tx_with_hex) = mempool.get_txs().get(&txid)
{
let output_count = tx_with_hex.tx().output.len();
return Ok(vec![TxOutspend::UNSPENT; output_count]);
}
// Look up confirmed transaction
let prefix = TxidPrefix::from(&txid);
let indexer = self.indexer();
let Ok(Some(txindex)) = indexer
.stores
.txidprefix_to_txindex
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
// Get output range
let first_txoutindex = indexer
.vecs
.tx
.txindex_to_first_txoutindex
.read_once(txindex)?;
let next_first_txoutindex = indexer
.vecs
.tx
.txindex_to_first_txoutindex
.read_once(txindex.incremented())?;
let output_count = usize::from(next_first_txoutindex) - usize::from(first_txoutindex);
// Get spend status for each output
let computer = self.computer();
let mut txoutindex_to_txinindex_iter = computer.stateful.txoutindex_to_txinindex.iter()?;
let mut outspends = Vec::with_capacity(output_count);
for i in 0..output_count {
let txoutindex = first_txoutindex + Vout::from(i);
let txinindex = txoutindex_to_txinindex_iter.get_unwrap(txoutindex);
if txinindex == TxInIndex::UNSPENT {
outspends.push(TxOutspend::UNSPENT);
} else {
outspends.push(self.outspend_details(txinindex)?);
}
}
Ok(outspends)
}
// === Helper methods ===
pub fn transaction_by_index(&self, txindex: TxIndex) -> Result<Transaction> {
let indexer = self.indexer();
let reader = self.reader();
let computer = self.computer();
// Get tx metadata using read_once for single lookups
let txid = indexer.vecs.tx.txindex_to_txid.read_once(txindex)?;
let height = indexer.vecs.tx.txindex_to_height.read_once(txindex)?;
let version = indexer.vecs.tx.txindex_to_txversion.read_once(txindex)?;
let lock_time = indexer.vecs.tx.txindex_to_rawlocktime.read_once(txindex)?;
let total_size = indexer.vecs.tx.txindex_to_total_size.read_once(txindex)?;
let first_txinindex = indexer
.vecs
.tx
.txindex_to_first_txinindex
.read_once(txindex)?;
let position = computer.blks.txindex_to_position.read_once(txindex)?;
// Get block info for status
let block_hash = indexer.vecs.block.height_to_blockhash.read_once(height)?;
let block_time = indexer.vecs.block.height_to_timestamp.read_once(height)?;
// Read and decode the raw transaction from blk file
let buffer = reader.read_raw_bytes(position, *total_size as usize)?;
let mut cursor = Cursor::new(buffer);
let tx = bitcoin::Transaction::consensus_decode(&mut cursor)
.map_err(|_| Error::Str("Failed to decode transaction"))?;
// For iterating through inputs, we need iterators (multiple lookups)
let mut txindex_to_txid_iter = indexer.vecs.tx.txindex_to_txid.iter()?;
let mut txindex_to_first_txoutindex_iter =
indexer.vecs.tx.txindex_to_first_txoutindex.iter()?;
let mut txinindex_to_outpoint_iter = indexer.vecs.txin.txinindex_to_outpoint.iter()?;
let mut txoutindex_to_value_iter = indexer.vecs.txout.txoutindex_to_value.iter()?;
// Build inputs with prevout information
let input: Vec<TxIn> = tx
.input
.iter()
.enumerate()
.map(|(i, txin)| {
let txinindex = first_txinindex + i;
let outpoint = txinindex_to_outpoint_iter.get_unwrap(txinindex);
let is_coinbase = outpoint.is_coinbase();
// Get prevout info if not coinbase
let (prev_txid, prev_vout, prevout) = if is_coinbase {
(Txid::COINBASE, Vout::MAX, None)
} else {
let prev_txindex = outpoint.txindex();
let prev_vout = outpoint.vout();
let prev_txid = txindex_to_txid_iter.get_unwrap(prev_txindex);
// Calculate the txoutindex for the prevout
let prev_first_txoutindex =
txindex_to_first_txoutindex_iter.get_unwrap(prev_txindex);
let prev_txoutindex = prev_first_txoutindex + prev_vout;
// Get the value of the prevout
let prev_value = txoutindex_to_value_iter.get_unwrap(prev_txoutindex);
let prevout = Some(TxOut::from((
bitcoin::ScriptBuf::new(), // Placeholder - would need to reconstruct
prev_value,
)));
(prev_txid, prev_vout, prevout)
};
TxIn {
txid: prev_txid,
vout: prev_vout,
prevout,
script_sig: txin.script_sig.clone(),
script_sig_asm: (),
is_coinbase,
sequence: txin.sequence.0,
inner_redeem_script_asm: (),
}
})
.collect();
// Calculate weight before consuming tx.output
let weight = Weight::from(tx.weight());
// Calculate sigop cost
let total_sigop_cost = tx.total_sigop_cost(|_| None);
// Build outputs
let output: Vec<TxOut> = tx.output.into_iter().map(TxOut::from).collect();
// Build status
let status = TxStatus {
confirmed: true,
block_height: Some(height),
block_hash: Some(block_hash),
block_time: Some(block_time),
};
let mut transaction = Transaction {
index: Some(txindex),
txid,
version,
lock_time,
total_size: *total_size as usize,
weight,
total_sigop_cost,
fee: Sats::ZERO, // Will be computed below
input,
output,
status,
};
// Compute fee from inputs - outputs
transaction.compute_fee();
Ok(transaction)
}
fn transaction_hex_by_index(&self, txindex: TxIndex) -> Result<String> {
let indexer = self.indexer();
let reader = self.reader();
let computer = self.computer();
let total_size = indexer.vecs.tx.txindex_to_total_size.read_once(txindex)?;
let position = computer.blks.txindex_to_position.read_once(txindex)?;
let buffer = reader.read_raw_bytes(position, *total_size as usize)?;
Ok(buffer.to_lower_hex_string())
}
fn outspend_details(&self, txinindex: TxInIndex) -> Result<TxOutspend> {
let indexer = self.indexer();
// Look up spending txindex directly
let spending_txindex = indexer
.vecs
.txin
.txinindex_to_txindex
.read_once(txinindex)?;
// Calculate vin
let spending_first_txinindex = indexer
.vecs
.tx
.txindex_to_first_txinindex
.read_once(spending_txindex)?;
let vin = Vin::from(usize::from(txinindex) - usize::from(spending_first_txinindex));
// Get spending tx details
let spending_txid = indexer
.vecs
.tx
.txindex_to_txid
.read_once(spending_txindex)?;
let spending_height = indexer
.vecs
.tx
.txindex_to_height
.read_once(spending_txindex)?;
let block_hash = indexer
.vecs
.block
.height_to_blockhash
.read_once(spending_height)?;
let block_time = indexer
.vecs
.block
.height_to_timestamp
.read_once(spending_height)?;
Ok(TxOutspend {
spent: true,
txid: Some(spending_txid),
vin: Some(vin),
status: Some(TxStatus {
confirmed: true,
block_height: Some(spending_height),
block_hash: Some(block_hash),
block_time: Some(block_time),
}),
})
}
}