mirror of
https://github.com/bitcoinresearchkit/brk.git
synced 2026-04-28 16:49:58 -07:00
server: ms endpoint fixes
This commit is contained in:
@@ -4,8 +4,8 @@ use bitcoin::{Network, PublicKey, ScriptBuf};
|
||||
use brk_error::{Error, Result};
|
||||
use brk_types::{
|
||||
Addr, AddrBytes, AddrChainStats, AddrHash, AddrIndexOutPoint, AddrIndexTxIndex, AddrStats,
|
||||
AnyAddrDataIndexEnum, OutputType, Sats, Transaction, TxIndex, TxStatus, Txid, TypeIndex, Unit,
|
||||
Utxo, Vout,
|
||||
AnyAddrDataIndexEnum, Height, OutputType, Transaction, TxIndex, TxStatus, Txid, TypeIndex,
|
||||
Unit, Utxo, Vout,
|
||||
};
|
||||
use vecdb::{ReadableVec, VecIndex};
|
||||
|
||||
@@ -186,25 +186,19 @@ impl Query {
|
||||
let first_txout_index_reader = vecs.transactions.first_txout_index.reader();
|
||||
let value_reader = vecs.outputs.value.reader();
|
||||
let blockhash_reader = vecs.blocks.blockhash.reader();
|
||||
let mut height_cursor = vecs.transactions.height.cursor();
|
||||
let mut block_ts_cursor = vecs.blocks.timestamp.cursor();
|
||||
|
||||
let utxos: Vec<Utxo> = outpoints
|
||||
.into_iter()
|
||||
.map(|(tx_index, vout)| {
|
||||
let txid: Txid = txid_reader.get(tx_index.to_usize());
|
||||
let height = vecs
|
||||
.transactions
|
||||
.height
|
||||
.collect_one_at(tx_index.to_usize())
|
||||
.unwrap();
|
||||
let txid = txid_reader.get(tx_index.to_usize());
|
||||
let height = height_cursor.get(tx_index.to_usize()).unwrap();
|
||||
let first_txout_index = first_txout_index_reader.get(tx_index.to_usize());
|
||||
let txout_index = first_txout_index + vout;
|
||||
let value: Sats = value_reader.get(usize::from(txout_index));
|
||||
let value = value_reader.get(usize::from(txout_index));
|
||||
let block_hash = blockhash_reader.get(usize::from(height));
|
||||
let block_time = vecs
|
||||
.blocks
|
||||
.timestamp
|
||||
.collect_one_at(usize::from(height))
|
||||
.unwrap();
|
||||
let block_time = block_ts_cursor.get(height.to_usize()).unwrap();
|
||||
|
||||
Utxo {
|
||||
txid,
|
||||
@@ -247,6 +241,29 @@ impl Query {
|
||||
Ok(txids)
|
||||
}
|
||||
|
||||
/// Height of the last on-chain activity for an address (last tx_index → height).
|
||||
pub fn addr_last_activity_height(&self, addr: &Addr) -> Result<Height> {
|
||||
let (output_type, type_index) = self.resolve_addr(addr)?;
|
||||
let store = self
|
||||
.indexer()
|
||||
.stores
|
||||
.addr_type_to_addr_index_and_tx_index
|
||||
.get(output_type)
|
||||
.unwrap();
|
||||
let prefix = u32::from(type_index).to_be_bytes();
|
||||
let last_tx_index = store
|
||||
.prefix(prefix)
|
||||
.next_back()
|
||||
.map(|(key, _): (AddrIndexTxIndex, Unit)| key.tx_index())
|
||||
.ok_or(Error::UnknownAddr)?;
|
||||
self.indexer()
|
||||
.vecs
|
||||
.transactions
|
||||
.height
|
||||
.collect_one(last_tx_index)
|
||||
.ok_or(Error::UnknownAddr)
|
||||
}
|
||||
|
||||
/// Resolve an address string to its output type and type_index
|
||||
fn resolve_addr(&self, addr: &Addr) -> Result<(OutputType, TypeIndex)> {
|
||||
let stores = &self.indexer().stores;
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
use bitcoin::consensus::Decodable;
|
||||
use bitcoin::hex::DisplayHex;
|
||||
use brk_error::{Error, Result};
|
||||
use brk_reader::Reader;
|
||||
use brk_types::{
|
||||
BlockExtras, BlockHash, BlockHashPrefix, BlockHeader, BlockInfo, BlockInfoV1, BlockPool,
|
||||
FeeRate, Height, Sats, Timestamp, TxIndex, VSize, pools,
|
||||
BlkPosition, BlockExtras, BlockHash, BlockHashPrefix, BlockHeader, BlockInfo, BlockInfoV1,
|
||||
BlockPool, FeeRate, Height, Sats, Timestamp, TxIndex, VSize, pools,
|
||||
};
|
||||
use vecdb::{AnyVec, ReadableVec, VecIndex};
|
||||
|
||||
@@ -123,12 +124,16 @@ impl Query {
|
||||
blocks.push(BlockInfo {
|
||||
id: blockhashes[i].clone(),
|
||||
height: Height::from(begin + i),
|
||||
header,
|
||||
version: header.version,
|
||||
timestamp: timestamps[i],
|
||||
tx_count,
|
||||
size: *sizes[i],
|
||||
weight: weights[i],
|
||||
merkle_root: header.merkle_root,
|
||||
previous_block_hash: header.previous_block_hash,
|
||||
median_time,
|
||||
nonce: header.nonce,
|
||||
bits: header.bits,
|
||||
difficulty: *difficulties[i],
|
||||
});
|
||||
}
|
||||
@@ -138,7 +143,7 @@ impl Query {
|
||||
|
||||
pub(crate) fn blocks_v1_range(&self, begin: usize, end: usize) -> Result<Vec<BlockInfoV1>> {
|
||||
if begin >= end {
|
||||
return Ok(Vec::new());
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
let count = end - begin;
|
||||
@@ -304,12 +309,16 @@ impl Query {
|
||||
let info = BlockInfo {
|
||||
id: blockhashes[i].clone(),
|
||||
height: Height::from(begin + i),
|
||||
header,
|
||||
version: header.version,
|
||||
timestamp: timestamps[i],
|
||||
tx_count,
|
||||
size,
|
||||
weight,
|
||||
merkle_root: header.merkle_root,
|
||||
previous_block_hash: header.previous_block_hash,
|
||||
median_time,
|
||||
nonce: header.nonce,
|
||||
bits: header.bits,
|
||||
difficulty: *difficulties[i],
|
||||
};
|
||||
|
||||
@@ -333,6 +342,7 @@ impl Query {
|
||||
id: pool.unique_id(),
|
||||
name: pool.name.to_string(),
|
||||
slug: pool_slug,
|
||||
miner_names: None,
|
||||
},
|
||||
avg_fee: Sats::from(if non_coinbase > 0 {
|
||||
total_fees_u64 / non_coinbase
|
||||
@@ -441,8 +451,8 @@ impl Query {
|
||||
}
|
||||
|
||||
fn parse_coinbase_tx(
|
||||
reader: &brk_reader::Reader,
|
||||
position: brk_types::BlkPosition,
|
||||
reader: &Reader,
|
||||
position: BlkPosition,
|
||||
) -> (String, Option<String>, Vec<String>, String, String) {
|
||||
let raw_bytes = match reader.read_raw_bytes(position, 1000) {
|
||||
Ok(bytes) => bytes,
|
||||
@@ -463,7 +473,14 @@ impl Query {
|
||||
let coinbase_signature_ascii = tx
|
||||
.input
|
||||
.first()
|
||||
.map(|input| input.script_sig.as_bytes().iter().map(|&b| b as char).collect::<String>())
|
||||
.map(|input| {
|
||||
input
|
||||
.script_sig
|
||||
.as_bytes()
|
||||
.iter()
|
||||
.map(|&b| b as char)
|
||||
.collect::<String>()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
let coinbase_addresses: Vec<String> = tx
|
||||
|
||||
@@ -67,7 +67,7 @@ impl Query {
|
||||
// Convert timestamp to ISO 8601 format
|
||||
let ts_secs: i64 = (*best_ts).into();
|
||||
let iso_timestamp = JiffTimestamp::from_second(ts_secs)
|
||||
.map(|t| t.to_string())
|
||||
.map(|t| t.strftime("%Y-%m-%dT%H:%M:%S%.3fZ").to_string())
|
||||
.unwrap_or_else(|_| best_ts.to_string());
|
||||
|
||||
Ok(BlockTimestamp {
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
use std::io::Cursor;
|
||||
|
||||
use bitcoin::{consensus::Decodable, hex::DisplayHex};
|
||||
use brk_error::{Error, Result};
|
||||
use brk_types::{BlockHash, Height, Transaction, TxIndex, Txid};
|
||||
use vecdb::{AnyVec, ReadableVec};
|
||||
use brk_types::{
|
||||
BlockHash, Height, OutputType, Sats, Timestamp, Transaction, TxIn, TxIndex, TxOut, TxStatus,
|
||||
Txid, Vout, Weight,
|
||||
};
|
||||
use vecdb::{AnyVec, ReadableVec, VecIndex};
|
||||
|
||||
use super::BLOCK_TXS_PAGE_SIZE;
|
||||
use crate::Query;
|
||||
@@ -13,7 +19,13 @@ impl Query {
|
||||
|
||||
pub fn block_txs(&self, hash: &BlockHash, start_index: TxIndex) -> Result<Vec<Transaction>> {
|
||||
let height = self.height_by_hash(hash)?;
|
||||
self.block_txs_by_height(height, start_index.into())
|
||||
let (first, tx_count) = self.block_tx_range(height)?;
|
||||
let start: usize = start_index.into();
|
||||
if start >= tx_count {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
let count = BLOCK_TXS_PAGE_SIZE.min(tx_count - start);
|
||||
self.transactions_by_range(first + start, count)
|
||||
}
|
||||
|
||||
pub fn block_txid_at_index(&self, hash: &BlockHash, index: TxIndex) -> Result<Txid> {
|
||||
@@ -21,111 +33,198 @@ impl Query {
|
||||
self.block_txid_at_index_by_height(height, index.into())
|
||||
}
|
||||
|
||||
// === Helper methods ===
|
||||
// === Bulk transaction read ===
|
||||
|
||||
pub(crate) fn block_txids_by_height(&self, height: Height) -> Result<Vec<Txid>> {
|
||||
let indexer = self.indexer();
|
||||
|
||||
let max_height = self.indexed_height();
|
||||
if height > max_height {
|
||||
return Err(Error::OutOfRange("Block height out of range".into()));
|
||||
}
|
||||
|
||||
let first_tx_index = indexer
|
||||
.vecs
|
||||
.transactions
|
||||
.first_tx_index
|
||||
.collect_one(height)
|
||||
.unwrap();
|
||||
let next_first_tx_index = indexer
|
||||
.vecs
|
||||
.transactions
|
||||
.first_tx_index
|
||||
.collect_one(height.incremented())
|
||||
.unwrap_or_else(|| TxIndex::from(indexer.vecs.transactions.txid.len()));
|
||||
|
||||
let first: usize = first_tx_index.into();
|
||||
let next: usize = next_first_tx_index.into();
|
||||
|
||||
let txids: Vec<Txid> = indexer.vecs.transactions.txid.collect_range_at(first, next);
|
||||
|
||||
Ok(txids)
|
||||
}
|
||||
|
||||
fn block_txs_by_height(&self, height: Height, start_index: usize) -> Result<Vec<Transaction>> {
|
||||
let indexer = self.indexer();
|
||||
|
||||
let max_height = self.indexed_height();
|
||||
if height > max_height {
|
||||
return Err(Error::OutOfRange("Block height out of range".into()));
|
||||
}
|
||||
|
||||
let first_tx_index = indexer
|
||||
.vecs
|
||||
.transactions
|
||||
.first_tx_index
|
||||
.collect_one(height)
|
||||
.unwrap();
|
||||
let next_first_tx_index = indexer
|
||||
.vecs
|
||||
.transactions
|
||||
.first_tx_index
|
||||
.collect_one(height.incremented())
|
||||
.unwrap_or_else(|| TxIndex::from(indexer.vecs.transactions.txid.len()));
|
||||
|
||||
let first: usize = first_tx_index.into();
|
||||
let next: usize = next_first_tx_index.into();
|
||||
let tx_count = next - first;
|
||||
|
||||
if start_index >= tx_count {
|
||||
/// Batch-read `count` consecutive transactions starting at raw index `start`.
|
||||
/// Block info is cached per unique height — free for same-block batches.
|
||||
pub fn transactions_by_range(&self, start: usize, count: usize) -> Result<Vec<Transaction>> {
|
||||
if count == 0 {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let end_index = (start_index + BLOCK_TXS_PAGE_SIZE).min(tx_count);
|
||||
let count = end_index - start_index;
|
||||
let indexer = self.indexer();
|
||||
let reader = self.reader();
|
||||
let end = start + count;
|
||||
|
||||
// 7 range reads instead of count * 7 point reads
|
||||
let txids: Vec<Txid> = indexer.vecs.transactions.txid.collect_range_at(start, end);
|
||||
let heights: Vec<Height> = indexer.vecs.transactions.height.collect_range_at(start, end);
|
||||
let versions = indexer.vecs.transactions.tx_version.collect_range_at(start, end);
|
||||
let lock_times = indexer.vecs.transactions.raw_locktime.collect_range_at(start, end);
|
||||
let total_sizes = indexer.vecs.transactions.total_size.collect_range_at(start, end);
|
||||
let first_txin_indices = indexer
|
||||
.vecs
|
||||
.transactions
|
||||
.first_txin_index
|
||||
.collect_range_at(start, end);
|
||||
let positions = indexer.vecs.transactions.position.collect_range_at(start, end);
|
||||
|
||||
// Readers for prevout lookups (created once)
|
||||
let txid_reader = indexer.vecs.transactions.txid.reader();
|
||||
let first_txout_index_reader = indexer.vecs.transactions.first_txout_index.reader();
|
||||
let value_reader = indexer.vecs.outputs.value.reader();
|
||||
let output_type_reader = indexer.vecs.outputs.output_type.reader();
|
||||
let type_index_reader = indexer.vecs.outputs.type_index.reader();
|
||||
let addr_readers = indexer.vecs.addrs.addr_readers();
|
||||
|
||||
// Block info cache — for same-block batches, read once
|
||||
let mut cached_block: Option<(Height, BlockHash, Timestamp)> = None;
|
||||
|
||||
let mut txs = Vec::with_capacity(count);
|
||||
for i in start_index..end_index {
|
||||
let tx_index = TxIndex::from(first + i);
|
||||
let tx = self.transaction_by_index(tx_index)?;
|
||||
txs.push(tx);
|
||||
|
||||
for i in 0..count {
|
||||
let height = heights[i];
|
||||
|
||||
// Reuse block info if same height as previous tx
|
||||
let (block_hash, block_time) =
|
||||
if let Some((h, ref bh, bt)) = cached_block && h == height {
|
||||
(bh.clone(), bt)
|
||||
} else {
|
||||
let bh = indexer.vecs.blocks.blockhash.read_once(height)?;
|
||||
let bt = indexer.vecs.blocks.timestamp.collect_one(height).unwrap();
|
||||
cached_block = Some((height, bh.clone(), bt));
|
||||
(bh, bt)
|
||||
};
|
||||
|
||||
// Decode raw transaction from blk file
|
||||
let buffer = reader.read_raw_bytes(positions[i], *total_sizes[i] as usize)?;
|
||||
let tx = bitcoin::Transaction::consensus_decode(&mut Cursor::new(buffer))
|
||||
.map_err(|_| Error::Parse("Failed to decode transaction".into()))?;
|
||||
|
||||
// Batch-read outpoints for this tx's inputs
|
||||
let outpoints = indexer.vecs.inputs.outpoint.collect_range_at(
|
||||
usize::from(first_txin_indices[i]),
|
||||
usize::from(first_txin_indices[i]) + tx.input.len(),
|
||||
);
|
||||
|
||||
let input: Vec<TxIn> = tx
|
||||
.input
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(j, txin)| {
|
||||
let outpoint = outpoints[j];
|
||||
let is_coinbase = outpoint.is_coinbase();
|
||||
|
||||
let (prev_txid, prev_vout, prevout) = if is_coinbase {
|
||||
(Txid::COINBASE, Vout::MAX, None)
|
||||
} else {
|
||||
let prev_tx_index = outpoint.tx_index();
|
||||
let prev_vout = outpoint.vout();
|
||||
let prev_txid = txid_reader.get(prev_tx_index.to_usize());
|
||||
let prev_first_txout_index =
|
||||
first_txout_index_reader.get(prev_tx_index.to_usize());
|
||||
let prev_txout_index = prev_first_txout_index + prev_vout;
|
||||
let prev_value = value_reader.get(usize::from(prev_txout_index));
|
||||
let prev_output_type: OutputType =
|
||||
output_type_reader.get(usize::from(prev_txout_index));
|
||||
let prev_type_index =
|
||||
type_index_reader.get(usize::from(prev_txout_index));
|
||||
let script_pubkey =
|
||||
addr_readers.script_pubkey(prev_output_type, prev_type_index);
|
||||
(
|
||||
prev_txid,
|
||||
prev_vout,
|
||||
Some(TxOut::from((script_pubkey, prev_value))),
|
||||
)
|
||||
};
|
||||
|
||||
let witness = txin
|
||||
.witness
|
||||
.iter()
|
||||
.map(|w| w.to_lower_hex_string())
|
||||
.collect();
|
||||
|
||||
TxIn {
|
||||
txid: prev_txid,
|
||||
vout: prev_vout,
|
||||
prevout,
|
||||
script_sig: txin.script_sig.clone(),
|
||||
script_sig_asm: (),
|
||||
witness,
|
||||
is_coinbase,
|
||||
sequence: txin.sequence.0,
|
||||
inner_redeem_script_asm: (),
|
||||
inner_witness_script_asm: (),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
let weight = Weight::from(tx.weight());
|
||||
let total_sigop_cost = tx.total_sigop_cost(|_| None);
|
||||
let output: Vec<TxOut> = tx.output.into_iter().map(TxOut::from).collect();
|
||||
|
||||
let mut transaction = Transaction {
|
||||
index: Some(TxIndex::from(start + i)),
|
||||
txid: txids[i].clone(),
|
||||
version: versions[i],
|
||||
lock_time: lock_times[i],
|
||||
total_size: *total_sizes[i] as usize,
|
||||
weight,
|
||||
total_sigop_cost,
|
||||
fee: Sats::ZERO,
|
||||
input,
|
||||
output,
|
||||
status: TxStatus {
|
||||
confirmed: true,
|
||||
block_height: Some(height),
|
||||
block_hash: Some(block_hash),
|
||||
block_time: Some(block_time),
|
||||
},
|
||||
};
|
||||
|
||||
transaction.compute_fee();
|
||||
txs.push(transaction);
|
||||
}
|
||||
|
||||
Ok(txs)
|
||||
}
|
||||
|
||||
fn block_txid_at_index_by_height(&self, height: Height, index: usize) -> Result<Txid> {
|
||||
let indexer = self.indexer();
|
||||
// === Helper methods ===
|
||||
|
||||
let max_height = self.indexed_height();
|
||||
if height > max_height {
|
||||
pub(crate) fn block_txids_by_height(&self, height: Height) -> Result<Vec<Txid>> {
|
||||
let (first, tx_count) = self.block_tx_range(height)?;
|
||||
Ok(self
|
||||
.indexer()
|
||||
.vecs
|
||||
.transactions
|
||||
.txid
|
||||
.collect_range_at(first, first + tx_count))
|
||||
}
|
||||
|
||||
fn block_txid_at_index_by_height(&self, height: Height, index: usize) -> Result<Txid> {
|
||||
let (first, tx_count) = self.block_tx_range(height)?;
|
||||
if index >= tx_count {
|
||||
return Err(Error::OutOfRange("Transaction index out of range".into()));
|
||||
}
|
||||
Ok(self
|
||||
.indexer()
|
||||
.vecs
|
||||
.transactions
|
||||
.txid
|
||||
.reader()
|
||||
.get(first + index))
|
||||
}
|
||||
|
||||
/// Returns (first_tx_raw_index, tx_count) for a block at `height`.
|
||||
fn block_tx_range(&self, height: Height) -> Result<(usize, usize)> {
|
||||
let indexer = self.indexer();
|
||||
if height > self.indexed_height() {
|
||||
return Err(Error::OutOfRange("Block height out of range".into()));
|
||||
}
|
||||
|
||||
let first_tx_index = indexer
|
||||
let first: usize = indexer
|
||||
.vecs
|
||||
.transactions
|
||||
.first_tx_index
|
||||
.collect_one(height)
|
||||
.unwrap();
|
||||
let next_first_tx_index = indexer
|
||||
.unwrap()
|
||||
.into();
|
||||
let next: usize = indexer
|
||||
.vecs
|
||||
.transactions
|
||||
.first_tx_index
|
||||
.collect_one(height.incremented())
|
||||
.unwrap_or_else(|| TxIndex::from(indexer.vecs.transactions.txid.len()));
|
||||
|
||||
let first: usize = first_tx_index.into();
|
||||
let next: usize = next_first_tx_index.into();
|
||||
let tx_count = next - first;
|
||||
|
||||
if index >= tx_count {
|
||||
return Err(Error::OutOfRange("Transaction index out of range".into()));
|
||||
}
|
||||
|
||||
let tx_index = first + index;
|
||||
let txid = indexer.vecs.transactions.txid.reader().get(tx_index);
|
||||
|
||||
Ok(txid)
|
||||
.unwrap_or_else(|| TxIndex::from(indexer.vecs.transactions.txid.len()))
|
||||
.into();
|
||||
Ok((first, next - first))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use brk_error::{Error, Result};
|
||||
use brk_types::{
|
||||
CpfpEntry, CpfpInfo, MempoolBlock, MempoolInfo, MempoolRecentTx, RecommendedFees, Txid,
|
||||
TxidParam, TxidPrefix, Weight,
|
||||
CpfpEntry, CpfpInfo, FeeRate, MempoolBlock, MempoolInfo, MempoolRecentTx, RecommendedFees,
|
||||
Txid, TxidParam, TxidPrefix, Weight,
|
||||
};
|
||||
|
||||
use crate::Query;
|
||||
@@ -86,10 +88,22 @@ impl Query {
|
||||
|
||||
let effective_fee_per_vsize = entry.effective_fee_rate();
|
||||
|
||||
let best_descendant = descendants
|
||||
.iter()
|
||||
.max_by(|a, b| {
|
||||
FeeRate::from((a.fee, a.weight))
|
||||
.partial_cmp(&FeeRate::from((b.fee, b.weight)))
|
||||
.unwrap_or(Ordering::Equal)
|
||||
})
|
||||
.cloned();
|
||||
|
||||
Ok(CpfpInfo {
|
||||
ancestors,
|
||||
best_descendant,
|
||||
descendants,
|
||||
effective_fee_per_vsize,
|
||||
fee: entry.fee,
|
||||
adjusted_vsize: entry.vsize,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -1,49 +1,22 @@
|
||||
use brk_error::Result;
|
||||
use brk_types::{BlockFeesEntry, Height, Sats, TimePeriod};
|
||||
use vecdb::{ReadableVec, VecIndex};
|
||||
use brk_types::{BlockFeesEntry, TimePeriod};
|
||||
|
||||
use super::day1_iter::Day1Iter;
|
||||
use super::block_window::BlockWindow;
|
||||
use crate::Query;
|
||||
|
||||
impl Query {
|
||||
pub fn block_fees(&self, time_period: TimePeriod) -> Result<Vec<BlockFeesEntry>> {
|
||||
let computer = self.computer();
|
||||
let current_height = self.height();
|
||||
let start = current_height
|
||||
.to_usize()
|
||||
.saturating_sub(time_period.block_count());
|
||||
|
||||
let iter = Day1Iter::new(computer, start, current_height.to_usize());
|
||||
|
||||
let cumulative = &computer.mining.rewards.fees.cumulative.sats.height;
|
||||
let first_height = &computer.indexes.day1.first_height;
|
||||
|
||||
Ok(iter.collect(|di, ts, h| {
|
||||
let h_start = first_height.collect_one(di)?;
|
||||
let h_end = first_height
|
||||
.collect_one(di + 1_usize)
|
||||
.unwrap_or(Height::from(current_height.to_usize() + 1));
|
||||
let block_count = h_end.to_usize() - h_start.to_usize();
|
||||
if block_count == 0 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let cumulative_end = cumulative.collect_one_at(h_end.to_usize() - 1)?;
|
||||
let cumulative_start = if h_start.to_usize() > 0 {
|
||||
cumulative
|
||||
.collect_one_at(h_start.to_usize() - 1)
|
||||
.unwrap_or(Sats::ZERO)
|
||||
} else {
|
||||
Sats::ZERO
|
||||
};
|
||||
let daily_sum = cumulative_end - cumulative_start;
|
||||
let avg_fees = Sats::from(*daily_sum / block_count as u64);
|
||||
|
||||
Some(BlockFeesEntry {
|
||||
avg_height: h,
|
||||
timestamp: ts,
|
||||
avg_fees,
|
||||
let bw = BlockWindow::new(self, time_period);
|
||||
let cumulative = &self.computer().mining.rewards.fees.cumulative.sats.height;
|
||||
Ok(bw
|
||||
.cumulative_averages(self, cumulative)
|
||||
.into_iter()
|
||||
.map(|w| BlockFeesEntry {
|
||||
avg_height: w.avg_height,
|
||||
timestamp: w.timestamp,
|
||||
avg_fees: w.avg_value,
|
||||
usd: w.usd,
|
||||
})
|
||||
}))
|
||||
.collect())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,86 +1,22 @@
|
||||
use brk_error::Result;
|
||||
use brk_types::{BlockRewardsEntry, Height, Sats, TimePeriod};
|
||||
use vecdb::{ReadableVec, VecIndex};
|
||||
use brk_types::{BlockRewardsEntry, TimePeriod};
|
||||
|
||||
use super::block_window::BlockWindow;
|
||||
use crate::Query;
|
||||
|
||||
impl Query {
|
||||
pub fn block_rewards(&self, time_period: TimePeriod) -> Result<Vec<BlockRewardsEntry>> {
|
||||
let computer = self.computer();
|
||||
let indexer = self.indexer();
|
||||
let current_height = self.height().to_usize();
|
||||
let start = current_height.saturating_sub(time_period.block_count());
|
||||
|
||||
let coinbase_vec = &computer.mining.rewards.coinbase.block.sats;
|
||||
let timestamp_vec = &indexer.vecs.blocks.timestamp;
|
||||
|
||||
match time_period {
|
||||
// Per-block, exact rewards
|
||||
TimePeriod::Day | TimePeriod::ThreeDays => {
|
||||
let rewards: Vec<Sats> = coinbase_vec.collect_range_at(start, current_height + 1);
|
||||
let timestamps: Vec<brk_types::Timestamp> =
|
||||
timestamp_vec.collect_range_at(start, current_height + 1);
|
||||
|
||||
Ok(rewards
|
||||
.iter()
|
||||
.zip(timestamps.iter())
|
||||
.enumerate()
|
||||
.map(|(i, (reward, ts))| BlockRewardsEntry {
|
||||
avg_height: (start + i) as u32,
|
||||
timestamp: **ts,
|
||||
avg_rewards: **reward,
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
// Daily averages, sampled to ~200 points
|
||||
_ => {
|
||||
let first_height_vec = &computer.indexes.day1.first_height;
|
||||
let day1_vec = &computer.indexes.height.day1;
|
||||
|
||||
let start_di = day1_vec
|
||||
.collect_one(Height::from(start))
|
||||
.unwrap_or_default();
|
||||
let end_di = day1_vec
|
||||
.collect_one(Height::from(current_height))
|
||||
.unwrap_or_default();
|
||||
|
||||
let total_days = end_di.to_usize().saturating_sub(start_di.to_usize()) + 1;
|
||||
let step = (total_days / 200).max(1);
|
||||
|
||||
let mut entries = Vec::with_capacity(total_days / step + 1);
|
||||
let mut di = start_di.to_usize();
|
||||
|
||||
while di <= end_di.to_usize() {
|
||||
let day = brk_types::Day1::from(di);
|
||||
let next_day = brk_types::Day1::from(di + 1);
|
||||
|
||||
if let Some(first_h) = first_height_vec.collect_one(day) {
|
||||
let next_h = first_height_vec
|
||||
.collect_one(next_day)
|
||||
.unwrap_or(Height::from(current_height + 1));
|
||||
|
||||
let block_count = next_h.to_usize() - first_h.to_usize();
|
||||
if block_count > 0 {
|
||||
let sum =
|
||||
coinbase_vec
|
||||
.fold_range(first_h, next_h, Sats::ZERO, |acc, v| acc + v);
|
||||
let avg = *sum / block_count as u64;
|
||||
|
||||
if let Some(ts) = timestamp_vec.collect_one(first_h) {
|
||||
entries.push(BlockRewardsEntry {
|
||||
avg_height: first_h.to_usize() as u32,
|
||||
timestamp: *ts,
|
||||
avg_rewards: avg,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
di += step;
|
||||
}
|
||||
|
||||
Ok(entries)
|
||||
}
|
||||
}
|
||||
let bw = BlockWindow::new(self, time_period);
|
||||
let cumulative = &self.computer().mining.rewards.coinbase.cumulative.sats.height;
|
||||
Ok(bw
|
||||
.cumulative_averages(self, cumulative)
|
||||
.into_iter()
|
||||
.map(|w| BlockRewardsEntry {
|
||||
avg_height: w.avg_height,
|
||||
timestamp: w.timestamp,
|
||||
avg_rewards: w.avg_value,
|
||||
usd: w.usd,
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,22 +1,18 @@
|
||||
use brk_error::Result;
|
||||
use brk_types::{BlockSizeEntry, BlockSizesWeights, BlockWeightEntry, TimePeriod};
|
||||
use vecdb::{ReadableOptionVec, VecIndex};
|
||||
use brk_types::{BlockSizeEntry, BlockSizesWeights, BlockWeightEntry, TimePeriod, Weight};
|
||||
use vecdb::ReadableVec;
|
||||
|
||||
use super::day1_iter::Day1Iter;
|
||||
use super::block_window::BlockWindow;
|
||||
use crate::Query;
|
||||
|
||||
impl Query {
|
||||
pub fn block_sizes_weights(&self, time_period: TimePeriod) -> Result<BlockSizesWeights> {
|
||||
let computer = self.computer();
|
||||
let current_height = self.height();
|
||||
let start = current_height
|
||||
.to_usize()
|
||||
.saturating_sub(time_period.block_count());
|
||||
let bw = BlockWindow::new(self, time_period);
|
||||
let timestamps = bw.timestamps(self);
|
||||
|
||||
let iter = Day1Iter::new(computer, start, current_height.to_usize());
|
||||
|
||||
// Rolling 24h median, sampled at day1 boundaries
|
||||
let sizes_vec = &computer
|
||||
// Batch read per-block rolling 24h medians for the range
|
||||
let all_sizes = computer
|
||||
.blocks
|
||||
.size
|
||||
.size
|
||||
@@ -24,8 +20,9 @@ impl Query {
|
||||
.distribution
|
||||
.median
|
||||
._24h
|
||||
.day1;
|
||||
let weights_vec = &computer
|
||||
.height
|
||||
.collect_range_at(bw.start, bw.end);
|
||||
let all_weights = computer
|
||||
.blocks
|
||||
.weight
|
||||
.weight
|
||||
@@ -33,35 +30,30 @@ impl Query {
|
||||
.distribution
|
||||
.median
|
||||
._24h
|
||||
.day1;
|
||||
.height
|
||||
.collect_range_at(bw.start, bw.end);
|
||||
|
||||
let entries: Vec<_> = iter.collect(|di, ts, h| {
|
||||
let size: Option<u64> = sizes_vec.collect_one_flat(di).map(|s| *s);
|
||||
let weight: Option<u64> = weights_vec.collect_one_flat(di).map(|w| *w);
|
||||
Some((u32::from(h), (*ts), size, weight))
|
||||
});
|
||||
// Sample at window midpoints
|
||||
let mut sizes = Vec::with_capacity(timestamps.len());
|
||||
let mut weights = Vec::with_capacity(timestamps.len());
|
||||
|
||||
let sizes = entries
|
||||
.iter()
|
||||
.filter_map(|(h, ts, size, _)| {
|
||||
size.map(|s| BlockSizeEntry {
|
||||
avg_height: *h,
|
||||
for ((avg_height, start, _end), ts) in bw.iter().zip(×tamps) {
|
||||
let mid = start - bw.start + (bw.window / 2).min(all_sizes.len().saturating_sub(1));
|
||||
if let Some(&size) = all_sizes.get(mid) {
|
||||
sizes.push(BlockSizeEntry {
|
||||
avg_height,
|
||||
timestamp: *ts,
|
||||
avg_size: s,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let weights = entries
|
||||
.iter()
|
||||
.filter_map(|(h, ts, _, weight)| {
|
||||
weight.map(|w| BlockWeightEntry {
|
||||
avg_height: *h,
|
||||
avg_size: *size,
|
||||
});
|
||||
}
|
||||
if let Some(&weight) = all_weights.get(mid) {
|
||||
weights.push(BlockWeightEntry {
|
||||
avg_height,
|
||||
timestamp: *ts,
|
||||
avg_weight: w,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
avg_weight: Weight::from(*weight),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(BlockSizesWeights { sizes, weights })
|
||||
}
|
||||
|
||||
154
crates/brk_query/src/impl/mining/block_window.rs
Normal file
154
crates/brk_query/src/impl/mining/block_window.rs
Normal file
@@ -0,0 +1,154 @@
|
||||
use brk_types::{Cents, Dollars, Height, Sats, Timestamp, TimePeriod};
|
||||
use vecdb::{ReadableVec, VecIndex};
|
||||
|
||||
use crate::Query;
|
||||
|
||||
/// Number of blocks per aggregation window, matching mempool.space's granularity.
|
||||
fn block_window(period: TimePeriod) -> usize {
|
||||
match period {
|
||||
TimePeriod::Day | TimePeriod::ThreeDays | TimePeriod::Week => 1,
|
||||
TimePeriod::Month => 3,
|
||||
TimePeriod::ThreeMonths => 12,
|
||||
TimePeriod::SixMonths => 18,
|
||||
TimePeriod::Year | TimePeriod::TwoYears => 48,
|
||||
TimePeriod::ThreeYears => 72,
|
||||
}
|
||||
}
|
||||
|
||||
/// Per-window average with metadata.
|
||||
pub struct WindowAvg {
|
||||
pub avg_height: Height,
|
||||
pub timestamp: Timestamp,
|
||||
pub avg_value: Sats,
|
||||
pub usd: Dollars,
|
||||
}
|
||||
|
||||
/// Block range and window size for a time period.
|
||||
pub struct BlockWindow {
|
||||
pub start: usize,
|
||||
pub end: usize,
|
||||
pub window: usize,
|
||||
}
|
||||
|
||||
impl BlockWindow {
|
||||
pub fn new(query: &Query, time_period: TimePeriod) -> Self {
|
||||
let current_height = query.height();
|
||||
let computer = query.computer();
|
||||
let lookback = &computer.blocks.lookback;
|
||||
|
||||
// Use pre-computed timestamp-based lookback for accurate time boundaries.
|
||||
// 24h, 1w, 1m, 1y use in-memory CachedVec; others fall back to PcoVec.
|
||||
let cached = &lookback.cached_window_starts.0;
|
||||
let start_height = match time_period {
|
||||
TimePeriod::Day => cached._24h.collect_one(current_height),
|
||||
TimePeriod::ThreeDays => lookback._3d.collect_one(current_height),
|
||||
TimePeriod::Week => cached._1w.collect_one(current_height),
|
||||
TimePeriod::Month => cached._1m.collect_one(current_height),
|
||||
TimePeriod::ThreeMonths => lookback._3m.collect_one(current_height),
|
||||
TimePeriod::SixMonths => lookback._6m.collect_one(current_height),
|
||||
TimePeriod::Year => cached._1y.collect_one(current_height),
|
||||
TimePeriod::TwoYears => lookback._2y.collect_one(current_height),
|
||||
TimePeriod::ThreeYears => lookback._3y.collect_one(current_height),
|
||||
}
|
||||
.unwrap_or_default();
|
||||
|
||||
Self {
|
||||
start: start_height.to_usize(),
|
||||
end: current_height.to_usize() + 1,
|
||||
window: block_window(time_period),
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute per-window averages from a cumulative sats vec.
|
||||
/// Batch-reads timestamps, prices, and the cumulative in one pass.
|
||||
pub fn cumulative_averages(
|
||||
&self,
|
||||
query: &Query,
|
||||
cumulative: &impl ReadableVec<Height, Sats>,
|
||||
) -> Vec<WindowAvg> {
|
||||
let indexer = query.indexer();
|
||||
let computer = query.computer();
|
||||
|
||||
// Batch read all needed data for the range
|
||||
let all_ts = indexer
|
||||
.vecs
|
||||
.blocks
|
||||
.timestamp
|
||||
.collect_range_at(self.start, self.end);
|
||||
let all_prices: Vec<Cents> = computer
|
||||
.prices
|
||||
.spot
|
||||
.cents
|
||||
.height
|
||||
.collect_range_at(self.start, self.end);
|
||||
let read_start = self.start.saturating_sub(1).max(0);
|
||||
let all_cum = cumulative.collect_range_at(read_start, self.end);
|
||||
let offset = if self.start > 0 { 1 } else { 0 };
|
||||
|
||||
let mut results = Vec::with_capacity(self.count());
|
||||
let mut pos = 0;
|
||||
let total = all_ts.len();
|
||||
|
||||
while pos < total {
|
||||
let window_end = (pos + self.window).min(total);
|
||||
let block_count = (window_end - pos) as u64;
|
||||
if block_count > 0 {
|
||||
let mid = (pos + window_end) / 2;
|
||||
let cum_end = all_cum[window_end - 1 + offset];
|
||||
let cum_start = if pos + offset > 0 {
|
||||
all_cum[pos + offset - 1]
|
||||
} else {
|
||||
Sats::ZERO
|
||||
};
|
||||
let total_sats = cum_end - cum_start;
|
||||
results.push(WindowAvg {
|
||||
avg_height: Height::from(self.start + mid),
|
||||
timestamp: all_ts[mid],
|
||||
avg_value: Sats::from(*total_sats / block_count),
|
||||
usd: Dollars::from(all_prices[mid]),
|
||||
});
|
||||
}
|
||||
pos = window_end;
|
||||
}
|
||||
|
||||
results
|
||||
}
|
||||
|
||||
/// Batch-read timestamps for the midpoint of each window.
|
||||
pub fn timestamps(&self, query: &Query) -> Vec<Timestamp> {
|
||||
let all_ts = query
|
||||
.indexer()
|
||||
.vecs
|
||||
.blocks
|
||||
.timestamp
|
||||
.collect_range_at(self.start, self.end);
|
||||
let mut timestamps = Vec::with_capacity(self.count());
|
||||
let mut pos = 0;
|
||||
while pos < all_ts.len() {
|
||||
let window_end = (pos + self.window).min(all_ts.len());
|
||||
timestamps.push(all_ts[(pos + window_end) / 2]);
|
||||
pos = window_end;
|
||||
}
|
||||
timestamps
|
||||
}
|
||||
|
||||
/// Number of windows in this range.
|
||||
fn count(&self) -> usize {
|
||||
(self.end - self.start + self.window - 1) / self.window
|
||||
}
|
||||
|
||||
/// Iterate windows, yielding (avg_height, window_start, window_end) for each.
|
||||
pub fn iter(&self) -> impl Iterator<Item = (Height, usize, usize)> + '_ {
|
||||
let mut pos = self.start;
|
||||
std::iter::from_fn(move || {
|
||||
if pos >= self.end {
|
||||
return None;
|
||||
}
|
||||
let window_end = (pos + self.window).min(self.end);
|
||||
let avg_height = Height::from((pos + window_end) / 2);
|
||||
let start = pos;
|
||||
pos = window_end;
|
||||
Some((avg_height, start, window_end))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
use brk_computer::Computer;
|
||||
use brk_types::{Day1, Height, Timestamp};
|
||||
use vecdb::{ReadableVec, Ro, VecIndex};
|
||||
|
||||
/// Helper for iterating over day1 ranges with sampling.
|
||||
pub struct Day1Iter<'a> {
|
||||
computer: &'a Computer<Ro>,
|
||||
start_di: Day1,
|
||||
end_di: Day1,
|
||||
step: usize,
|
||||
}
|
||||
|
||||
impl<'a> Day1Iter<'a> {
|
||||
pub fn new(computer: &'a Computer<Ro>, start_height: usize, end_height: usize) -> Self {
|
||||
let start_di = computer
|
||||
.indexes
|
||||
.height
|
||||
.day1
|
||||
.collect_one(Height::from(start_height))
|
||||
.unwrap_or_default();
|
||||
let end_di = computer
|
||||
.indexes
|
||||
.height
|
||||
.day1
|
||||
.collect_one(Height::from(end_height))
|
||||
.unwrap_or_default();
|
||||
|
||||
let total = end_di.to_usize().saturating_sub(start_di.to_usize()) + 1;
|
||||
let step = (total / 200).max(1);
|
||||
|
||||
Self {
|
||||
computer,
|
||||
start_di,
|
||||
end_di,
|
||||
step,
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterate and collect entries using the provided transform function.
|
||||
pub fn collect<T, F>(&self, mut transform: F) -> Vec<T>
|
||||
where
|
||||
F: FnMut(Day1, Timestamp, Height) -> Option<T>,
|
||||
{
|
||||
let total = self
|
||||
.end_di
|
||||
.to_usize()
|
||||
.saturating_sub(self.start_di.to_usize())
|
||||
+ 1;
|
||||
let timestamps = &self.computer.indexes.timestamp.day1;
|
||||
let heights = &self.computer.indexes.day1.first_height;
|
||||
|
||||
let mut entries = Vec::with_capacity(total / self.step + 1);
|
||||
let mut i = self.start_di.to_usize();
|
||||
|
||||
while i <= self.end_di.to_usize() {
|
||||
let di = Day1::from(i);
|
||||
if let (Some(ts), Some(h)) = (timestamps.collect_one(di), heights.collect_one(di))
|
||||
&& let Some(entry) = transform(di, ts, h)
|
||||
{
|
||||
entries.push(entry);
|
||||
}
|
||||
i += self.step;
|
||||
}
|
||||
|
||||
entries
|
||||
}
|
||||
}
|
||||
@@ -85,7 +85,7 @@ impl Query {
|
||||
let time_offset = expected_time as i64 - elapsed_time as i64;
|
||||
|
||||
// Calculate previous retarget using stored difficulty values
|
||||
let previous_retarget = if current_epoch_usize > 0 {
|
||||
let (previous_retarget, previous_time) = if current_epoch_usize > 0 {
|
||||
let prev_epoch = Epoch::from(current_epoch_usize - 1);
|
||||
let prev_epoch_start = computer
|
||||
.indexes
|
||||
@@ -107,26 +107,33 @@ impl Query {
|
||||
.collect_one(epoch_start_height)
|
||||
.unwrap();
|
||||
|
||||
if *prev_difficulty > 0.0 {
|
||||
let retarget = if *prev_difficulty > 0.0 {
|
||||
((*curr_difficulty / *prev_difficulty) - 1.0) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
}
|
||||
};
|
||||
|
||||
(retarget, epoch_start_timestamp)
|
||||
} else {
|
||||
0.0
|
||||
(0.0, epoch_start_timestamp)
|
||||
};
|
||||
|
||||
// Expected blocks based on wall clock time since epoch start
|
||||
let expected_blocks = elapsed_time as f64 / TARGET_BLOCK_TIME as f64;
|
||||
|
||||
Ok(DifficultyAdjustment {
|
||||
progress_percent,
|
||||
difficulty_change,
|
||||
estimated_retarget_date,
|
||||
estimated_retarget_date: estimated_retarget_date * 1000,
|
||||
remaining_blocks,
|
||||
remaining_time,
|
||||
remaining_time: remaining_time * 1000,
|
||||
previous_retarget,
|
||||
previous_time,
|
||||
next_retarget_height: Height::from(next_retarget_height),
|
||||
time_avg,
|
||||
adjusted_time_avg: time_avg,
|
||||
time_avg: time_avg * 1000,
|
||||
adjusted_time_avg: time_avg * 1000,
|
||||
time_offset,
|
||||
expected_blocks,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ pub fn iter_difficulty_epochs(
|
||||
let epoch_difficulty = *epoch_to_difficulty.collect_one(epoch).unwrap_or_default();
|
||||
|
||||
let change_percent = match prev_difficulty {
|
||||
Some(prev) if prev > 0.0 => ((epoch_difficulty / prev) - 1.0) * 100.0,
|
||||
Some(prev) if prev > 0.0 => epoch_difficulty / prev,
|
||||
_ => 0.0,
|
||||
};
|
||||
|
||||
|
||||
@@ -79,9 +79,10 @@ impl Query {
|
||||
let difficulty: Vec<DifficultyEntry> = iter_difficulty_epochs(computer, start, end)
|
||||
.into_iter()
|
||||
.map(|e| DifficultyEntry {
|
||||
timestamp: e.timestamp,
|
||||
difficulty: e.difficulty,
|
||||
time: e.timestamp,
|
||||
height: e.height,
|
||||
difficulty: e.difficulty,
|
||||
adjustment: e.change_percent,
|
||||
})
|
||||
.collect();
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ mod block_fee_rates;
|
||||
mod block_fees;
|
||||
mod block_rewards;
|
||||
mod block_sizes;
|
||||
mod day1_iter;
|
||||
mod block_window;
|
||||
mod difficulty;
|
||||
mod difficulty_adjustments;
|
||||
mod epochs;
|
||||
|
||||
@@ -1,17 +1,30 @@
|
||||
use brk_error::{Error, Result};
|
||||
use brk_types::{
|
||||
BlockInfoV1, Height, PoolBlockCounts, PoolBlockShares, PoolDetail, PoolDetailInfo,
|
||||
PoolHashrateEntry, PoolInfo, PoolSlug, PoolStats, PoolsSummary, TimePeriod, pools,
|
||||
BlockInfoV1, Day1, Height, Pool, PoolBlockCounts, PoolBlockShares, PoolDetail,
|
||||
PoolDetailInfo, PoolHashrateEntry, PoolInfo, PoolSlug, PoolStats, PoolsSummary, StoredF64,
|
||||
StoredU64, TimePeriod, pools,
|
||||
};
|
||||
use vecdb::{AnyVec, ReadableVec, VecIndex};
|
||||
|
||||
use crate::Query;
|
||||
|
||||
/// 7-day lookback for share computation (matching mempool.space)
|
||||
const LOOKBACK_DAYS: usize = 7;
|
||||
/// Weekly sample interval (matching mempool.space's 604800s interval)
|
||||
const SAMPLE_WEEKLY: usize = 7;
|
||||
|
||||
/// Pre-read shared data for hashrate computation.
|
||||
struct HashrateSharedData {
|
||||
start_day: usize,
|
||||
end_day: usize,
|
||||
daily_hashrate: Vec<Option<StoredF64>>,
|
||||
first_heights: Vec<Height>,
|
||||
}
|
||||
|
||||
impl Query {
|
||||
pub fn mining_pools(&self, time_period: TimePeriod) -> Result<PoolsSummary> {
|
||||
let computer = self.computer();
|
||||
let current_height = self.height();
|
||||
let end = current_height.to_usize();
|
||||
|
||||
// No blocks indexed yet
|
||||
if computer.pools.pool.len() == 0 {
|
||||
@@ -19,14 +32,29 @@ impl Query {
|
||||
pools: vec![],
|
||||
block_count: 0,
|
||||
last_estimated_hashrate: 0,
|
||||
last_estimated_hashrate3d: 0,
|
||||
last_estimated_hashrate1w: 0,
|
||||
});
|
||||
}
|
||||
|
||||
// Calculate start height based on time period
|
||||
let start = end.saturating_sub(time_period.block_count());
|
||||
// Use timestamp-based lookback for accurate time boundaries
|
||||
let lookback = &computer.blocks.lookback;
|
||||
let start = match time_period {
|
||||
TimePeriod::Day => lookback.cached_window_starts.0._24h.collect_one(current_height),
|
||||
TimePeriod::ThreeDays => lookback._3d.collect_one(current_height),
|
||||
TimePeriod::Week => lookback.cached_window_starts.0._1w.collect_one(current_height),
|
||||
TimePeriod::Month => lookback.cached_window_starts.0._1m.collect_one(current_height),
|
||||
TimePeriod::ThreeMonths => lookback._3m.collect_one(current_height),
|
||||
TimePeriod::SixMonths => lookback._6m.collect_one(current_height),
|
||||
TimePeriod::Year => lookback.cached_window_starts.0._1y.collect_one(current_height),
|
||||
TimePeriod::TwoYears => lookback._2y.collect_one(current_height),
|
||||
TimePeriod::ThreeYears => lookback._3y.collect_one(current_height),
|
||||
}
|
||||
.unwrap_or_default()
|
||||
.to_usize();
|
||||
|
||||
let pools = pools();
|
||||
let mut pool_data: Vec<(&'static brk_types::Pool, u64)> = Vec::new();
|
||||
let mut pool_data: Vec<(&'static Pool, u64)> = Vec::new();
|
||||
|
||||
// For each pool, get cumulative count at end and start, subtract to get range count
|
||||
for (pool_id, cumulative) in computer
|
||||
@@ -78,13 +106,33 @@ impl Query {
|
||||
})
|
||||
.collect();
|
||||
|
||||
// TODO: Calculate actual hashrate from difficulty
|
||||
let last_estimated_hashrate = 0u128;
|
||||
let hashrate_at = |height: Height| -> u128 {
|
||||
let day = computer.indexes.height.day1.collect_one(height).unwrap_or_default();
|
||||
computer
|
||||
.mining
|
||||
.hashrate
|
||||
.rate
|
||||
.base
|
||||
.day1
|
||||
.collect_one(day)
|
||||
.flatten()
|
||||
.map(|v| *v as u128)
|
||||
.unwrap_or(0)
|
||||
};
|
||||
|
||||
let lookback = &computer.blocks.lookback;
|
||||
let last_estimated_hashrate = hashrate_at(current_height);
|
||||
let last_estimated_hashrate3d =
|
||||
hashrate_at(lookback._3d.collect_one(current_height).unwrap_or_default());
|
||||
let last_estimated_hashrate1w =
|
||||
hashrate_at(lookback._1w.collect_one(current_height).unwrap_or_default());
|
||||
|
||||
Ok(PoolsSummary {
|
||||
pools: pool_stats,
|
||||
block_count: total_blocks,
|
||||
last_estimated_hashrate,
|
||||
last_estimated_hashrate3d,
|
||||
last_estimated_hashrate1w,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -118,8 +166,15 @@ impl Query {
|
||||
// Get total blocks (all time)
|
||||
let total_all: u64 = *cumulative.collect_one(current_height).unwrap_or_default();
|
||||
|
||||
// Get blocks for 24h (144 blocks)
|
||||
let start_24h = end.saturating_sub(144);
|
||||
// Use timestamp-based lookback for accurate time boundaries
|
||||
let lookback = &computer.blocks.lookback;
|
||||
let start_24h = lookback
|
||||
.cached_window_starts
|
||||
.0
|
||||
._24h
|
||||
.collect_one(current_height)
|
||||
.unwrap_or_default()
|
||||
.to_usize();
|
||||
let count_before_24h: u64 = if start_24h == 0 {
|
||||
0
|
||||
} else {
|
||||
@@ -129,8 +184,13 @@ impl Query {
|
||||
};
|
||||
let total_24h = total_all.saturating_sub(count_before_24h);
|
||||
|
||||
// Get blocks for 1w (1008 blocks)
|
||||
let start_1w = end.saturating_sub(1008);
|
||||
let start_1w = lookback
|
||||
.cached_window_starts
|
||||
.0
|
||||
._1w
|
||||
.collect_one(current_height)
|
||||
.unwrap_or_default()
|
||||
.to_usize();
|
||||
let count_before_1w: u64 = if start_1w == 0 {
|
||||
0
|
||||
} else {
|
||||
@@ -191,11 +251,12 @@ impl Query {
|
||||
let reader = computer.pools.pool.reader();
|
||||
let end = start.min(reader.len().saturating_sub(1));
|
||||
|
||||
let mut heights = Vec::with_capacity(10);
|
||||
const POOL_BLOCKS_LIMIT: usize = 100;
|
||||
let mut heights = Vec::with_capacity(POOL_BLOCKS_LIMIT);
|
||||
for h in (0..=end).rev() {
|
||||
if reader.get(h) == slug {
|
||||
heights.push(h);
|
||||
if heights.len() >= 10 {
|
||||
if heights.len() >= POOL_BLOCKS_LIMIT {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -211,98 +272,166 @@ impl Query {
|
||||
}
|
||||
|
||||
pub fn pool_hashrate(&self, slug: PoolSlug) -> Result<Vec<PoolHashrateEntry>> {
|
||||
let pools_list = pools();
|
||||
let pool = pools_list.get(slug);
|
||||
let entries = self.compute_pool_hashrate_entries(slug, 0)?;
|
||||
Ok(entries
|
||||
.into_iter()
|
||||
.map(|(ts, hr, share)| PoolHashrateEntry {
|
||||
timestamp: ts,
|
||||
avg_hashrate: hr,
|
||||
share,
|
||||
pool_name: pool.name.to_string(),
|
||||
})
|
||||
.collect())
|
||||
let pool_name = pools().get(slug).name.to_string();
|
||||
let shared = self.hashrate_shared_data(0)?;
|
||||
let pool_cum = self.pool_daily_cumulative(slug, shared.start_day, shared.end_day)?;
|
||||
Ok(Self::compute_hashrate_entries(
|
||||
&shared, &pool_cum, &pool_name, SAMPLE_WEEKLY,
|
||||
))
|
||||
}
|
||||
|
||||
pub fn pools_hashrate(
|
||||
&self,
|
||||
time_period: Option<TimePeriod>,
|
||||
) -> Result<Vec<PoolHashrateEntry>> {
|
||||
let current_height = self.height().to_usize();
|
||||
let start = match time_period {
|
||||
Some(tp) => current_height.saturating_sub(tp.block_count()),
|
||||
let start_height = match time_period {
|
||||
Some(tp) => {
|
||||
let lookback = &self.computer().blocks.lookback;
|
||||
let current_height = self.height();
|
||||
match tp {
|
||||
TimePeriod::Day => lookback.cached_window_starts.0._24h.collect_one(current_height),
|
||||
TimePeriod::ThreeDays => lookback._3d.collect_one(current_height),
|
||||
TimePeriod::Week => lookback.cached_window_starts.0._1w.collect_one(current_height),
|
||||
TimePeriod::Month => lookback.cached_window_starts.0._1m.collect_one(current_height),
|
||||
TimePeriod::ThreeMonths => lookback._3m.collect_one(current_height),
|
||||
TimePeriod::SixMonths => lookback._6m.collect_one(current_height),
|
||||
TimePeriod::Year => lookback.cached_window_starts.0._1y.collect_one(current_height),
|
||||
TimePeriod::TwoYears => lookback._2y.collect_one(current_height),
|
||||
TimePeriod::ThreeYears => lookback._3y.collect_one(current_height),
|
||||
}
|
||||
.unwrap_or_default()
|
||||
.to_usize()
|
||||
}
|
||||
None => 0,
|
||||
};
|
||||
|
||||
let shared = self.hashrate_shared_data(start_height)?;
|
||||
let pools_list = pools();
|
||||
let mut entries = Vec::new();
|
||||
|
||||
for pool in pools_list.iter() {
|
||||
if let Ok(pool_entries) = self.compute_pool_hashrate_entries(pool.slug, start) {
|
||||
for (ts, hr, share) in pool_entries {
|
||||
if share > 0.0 {
|
||||
entries.push(PoolHashrateEntry {
|
||||
timestamp: ts,
|
||||
avg_hashrate: hr,
|
||||
share,
|
||||
pool_name: pool.name.to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
let Ok(pool_cum) =
|
||||
self.pool_daily_cumulative(pool.slug, shared.start_day, shared.end_day)
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
entries.extend(Self::compute_hashrate_entries(
|
||||
&shared,
|
||||
&pool_cum,
|
||||
&pool.name,
|
||||
SAMPLE_WEEKLY,
|
||||
));
|
||||
}
|
||||
|
||||
Ok(entries)
|
||||
}
|
||||
|
||||
/// Compute (timestamp, hashrate, share) tuples for a pool from `start_height`.
|
||||
fn compute_pool_hashrate_entries(
|
||||
/// Shared data needed for hashrate computation (read once, reuse across pools).
|
||||
fn hashrate_shared_data(&self, start_height: usize) -> Result<HashrateSharedData> {
|
||||
let computer = self.computer();
|
||||
let current_height = self.height();
|
||||
let start_day = computer
|
||||
.indexes
|
||||
.height
|
||||
.day1
|
||||
.collect_one_at(start_height)
|
||||
.unwrap_or_default()
|
||||
.to_usize();
|
||||
let end_day = computer
|
||||
.indexes
|
||||
.height
|
||||
.day1
|
||||
.collect_one(current_height)
|
||||
.unwrap_or_default()
|
||||
.to_usize()
|
||||
+ 1;
|
||||
let daily_hashrate = computer
|
||||
.mining
|
||||
.hashrate
|
||||
.rate
|
||||
.base
|
||||
.day1
|
||||
.collect_range_at(start_day, end_day);
|
||||
let first_heights = computer
|
||||
.indexes
|
||||
.day1
|
||||
.first_height
|
||||
.collect_range_at(start_day, end_day);
|
||||
|
||||
Ok(HashrateSharedData {
|
||||
start_day,
|
||||
end_day,
|
||||
daily_hashrate,
|
||||
first_heights,
|
||||
})
|
||||
}
|
||||
|
||||
/// Read daily cumulative blocks mined for a pool.
|
||||
fn pool_daily_cumulative(
|
||||
&self,
|
||||
slug: PoolSlug,
|
||||
start_height: usize,
|
||||
) -> Result<Vec<(brk_types::Timestamp, u128, f64)>> {
|
||||
start_day: usize,
|
||||
end_day: usize,
|
||||
) -> Result<Vec<Option<StoredU64>>> {
|
||||
let computer = self.computer();
|
||||
let indexer = self.indexer();
|
||||
let end = self.height().to_usize() + 1;
|
||||
let start = start_height;
|
||||
|
||||
let dominance_bps = computer
|
||||
computer
|
||||
.pools
|
||||
.major
|
||||
.get(&slug)
|
||||
.map(|v| &v.base.dominance.bps.height)
|
||||
.map(|v| v.base.blocks_mined.cumulative.day1.collect_range_at(start_day, end_day))
|
||||
.or_else(|| {
|
||||
computer
|
||||
.pools
|
||||
.minor
|
||||
.get(&slug)
|
||||
.map(|v| &v.dominance.bps.height)
|
||||
.map(|v| v.blocks_mined.cumulative.day1.collect_range_at(start_day, end_day))
|
||||
})
|
||||
.ok_or_else(|| Error::NotFound("Pool not found".into()))?;
|
||||
.ok_or_else(|| Error::NotFound("Pool not found".into()))
|
||||
}
|
||||
|
||||
let total = end - start;
|
||||
let step = (total / 200).max(1);
|
||||
/// Compute hashrate entries from daily cumulative blocks + shared data.
|
||||
/// Uses 7-day windowed share: pool_blocks_in_week / total_blocks_in_week.
|
||||
fn compute_hashrate_entries(
|
||||
shared: &HashrateSharedData,
|
||||
pool_cum: &[Option<StoredU64>],
|
||||
pool_name: &str,
|
||||
sample_days: usize,
|
||||
) -> Vec<PoolHashrateEntry> {
|
||||
let total = pool_cum.len();
|
||||
if total <= LOOKBACK_DAYS {
|
||||
return vec![];
|
||||
}
|
||||
|
||||
// Batch read everything for the range
|
||||
let timestamps = indexer.vecs.blocks.timestamp.collect_range_at(start, end);
|
||||
let bps_values = dominance_bps.collect_range_at(start, end);
|
||||
let day1_values = computer.indexes.height.day1.collect_range_at(start, end);
|
||||
let hashrate_vec = &computer.mining.hashrate.rate.base.day1;
|
||||
let mut entries = Vec::new();
|
||||
let mut i = LOOKBACK_DAYS;
|
||||
while i < total {
|
||||
if let (Some(cum_now), Some(cum_prev)) =
|
||||
(pool_cum[i], pool_cum[i - LOOKBACK_DAYS])
|
||||
{
|
||||
let pool_blocks = (*cum_now).saturating_sub(*cum_prev);
|
||||
if pool_blocks > 0 {
|
||||
let h_now = shared.first_heights[i].to_usize();
|
||||
let h_prev = shared.first_heights[i - LOOKBACK_DAYS].to_usize();
|
||||
let total_blocks = h_now.saturating_sub(h_prev);
|
||||
|
||||
// Pre-read all needed hashrates by collecting unique day1 values
|
||||
let max_day = day1_values.iter().map(|d| d.to_usize()).max().unwrap_or(0);
|
||||
let min_day = day1_values.iter().map(|d| d.to_usize()).min().unwrap_or(0);
|
||||
let hashrates = hashrate_vec.collect_range_dyn(min_day, max_day + 1);
|
||||
if total_blocks > 0 {
|
||||
if let Some(hr) = shared.daily_hashrate[i].as_ref() {
|
||||
let network_hr = f64::from(**hr);
|
||||
let share = pool_blocks as f64 / total_blocks as f64;
|
||||
let day = Day1::from(shared.start_day + i);
|
||||
entries.push(PoolHashrateEntry {
|
||||
timestamp: day.to_timestamp(),
|
||||
avg_hashrate: (network_hr * share) as u128,
|
||||
share,
|
||||
pool_name: pool_name.to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
i += sample_days;
|
||||
}
|
||||
|
||||
Ok((0..total)
|
||||
.step_by(step)
|
||||
.filter_map(|i| {
|
||||
let bps = *bps_values[i];
|
||||
let share = bps as f64 / 10000.0;
|
||||
let day_idx = day1_values[i].to_usize() - min_day;
|
||||
let network_hr = f64::from(*hashrates.get(day_idx)?.as_ref()?);
|
||||
Some((timestamps[i], (network_hr * share) as u128, share))
|
||||
})
|
||||
.collect())
|
||||
entries
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use brk_error::Result;
|
||||
use brk_types::{Dollars, ExchangeRates, HistoricalPrice, HistoricalPriceEntry, Timestamp};
|
||||
use vecdb::{ReadableVec, VecIndex};
|
||||
use brk_types::{Dollars, ExchangeRates, HistoricalPrice, HistoricalPriceEntry, Hour4, Timestamp};
|
||||
use vecdb::ReadableVec;
|
||||
|
||||
use crate::Query;
|
||||
|
||||
@@ -21,38 +21,41 @@ impl Query {
|
||||
}
|
||||
|
||||
pub fn historical_price(&self, timestamp: Option<Timestamp>) -> Result<HistoricalPrice> {
|
||||
let indexer = self.indexer();
|
||||
let computer = self.computer();
|
||||
let max_height = self.height().to_usize();
|
||||
let end = max_height + 1;
|
||||
|
||||
let timestamps = indexer.vecs.blocks.timestamp.collect();
|
||||
let all_prices = computer.prices.spot.cents.height.collect();
|
||||
|
||||
let prices = if let Some(target_ts) = timestamp {
|
||||
let target = usize::from(target_ts);
|
||||
let h = timestamps
|
||||
.binary_search_by_key(&target, |t| usize::from(*t))
|
||||
.unwrap_or_else(|i| i.min(max_height));
|
||||
|
||||
vec![HistoricalPriceEntry {
|
||||
time: usize::from(timestamps[h]) as u64,
|
||||
usd: Dollars::from(all_prices[h]),
|
||||
}]
|
||||
} else {
|
||||
let step = (max_height / 200).max(1);
|
||||
(0..end)
|
||||
.step_by(step)
|
||||
.map(|h| HistoricalPriceEntry {
|
||||
time: usize::from(timestamps[h]) as u64,
|
||||
usd: Dollars::from(all_prices[h]),
|
||||
})
|
||||
.collect()
|
||||
let prices = match timestamp {
|
||||
Some(ts) => self.price_at(ts)?,
|
||||
None => self.all_prices()?,
|
||||
};
|
||||
|
||||
Ok(HistoricalPrice {
|
||||
prices,
|
||||
exchange_rates: ExchangeRates {},
|
||||
})
|
||||
}
|
||||
|
||||
fn price_at(&self, target: Timestamp) -> Result<Vec<HistoricalPriceEntry>> {
|
||||
let h4 = Hour4::from_timestamp(target);
|
||||
let cents = self.computer().prices.spot.cents.hour4.collect_one(h4);
|
||||
Ok(vec![HistoricalPriceEntry {
|
||||
time: usize::from(h4.to_timestamp()) as u64,
|
||||
usd: Dollars::from(cents.flatten().unwrap_or_default()),
|
||||
}])
|
||||
}
|
||||
|
||||
fn all_prices(&self) -> Result<Vec<HistoricalPriceEntry>> {
|
||||
let computer = self.computer();
|
||||
Ok(computer
|
||||
.prices
|
||||
.spot
|
||||
.cents
|
||||
.hour4
|
||||
.collect()
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.filter_map(|(i, cents)| {
|
||||
Some(HistoricalPriceEntry {
|
||||
time: usize::from(Hour4::from(i).to_timestamp()) as u64,
|
||||
usd: Dollars::from(cents?),
|
||||
})
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
use std::io::Cursor;
|
||||
|
||||
use bitcoin::{consensus::Decodable, hex::DisplayHex};
|
||||
use bitcoin::hex::DisplayHex;
|
||||
use brk_error::{Error, Result};
|
||||
use brk_types::{
|
||||
Height, MerkleProof, OutputType, Sats, Transaction, TxIn, TxInIndex, TxIndex, TxOut,
|
||||
TxOutspend, TxStatus, Txid, TxidParam, TxidPrefix, Vin, Vout, Weight,
|
||||
BlockHash, Height, MerkleProof, Timestamp, TxInIndex, TxIndex, TxOutspend, TxStatus,
|
||||
Transaction, Txid, TxidParam, TxidPrefix, Vin, Vout,
|
||||
};
|
||||
use vecdb::{ReadableVec, VecIndex};
|
||||
|
||||
@@ -109,43 +107,11 @@ impl Query {
|
||||
self.transaction_hex_by_index(tx_index)
|
||||
}
|
||||
|
||||
pub fn outspend(&self, TxidParam { txid }: TxidParam, vout: Vout) -> Result<TxOutspend> {
|
||||
// Mempool outputs are unspent in on-chain terms
|
||||
if let Some(mempool) = self.mempool()
|
||||
&& mempool.get_txs().contains_key(&txid)
|
||||
{
|
||||
return Ok(TxOutspend::UNSPENT);
|
||||
}
|
||||
|
||||
// Look up confirmed transaction
|
||||
let prefix = TxidPrefix::from(&txid);
|
||||
let indexer = self.indexer();
|
||||
let Ok(Some(tx_index)) = indexer
|
||||
.stores
|
||||
.txid_prefix_to_tx_index
|
||||
.get(&prefix)
|
||||
.map(|opt| opt.map(|cow| cow.into_owned()))
|
||||
else {
|
||||
return Err(Error::UnknownTxid);
|
||||
};
|
||||
|
||||
// Calculate txout_index
|
||||
let first_txout_index = indexer
|
||||
.vecs
|
||||
.transactions
|
||||
.first_txout_index
|
||||
.read_once(tx_index)?;
|
||||
let txout_index = first_txout_index + vout;
|
||||
|
||||
// Look up spend status
|
||||
let computer = self.computer();
|
||||
let txin_index = computer.outputs.spent.txin_index.read_once(txout_index)?;
|
||||
|
||||
if txin_index == TxInIndex::UNSPENT {
|
||||
return Ok(TxOutspend::UNSPENT);
|
||||
}
|
||||
|
||||
self.outspend_details(txin_index)
|
||||
pub fn outspend(&self, txid: TxidParam, vout: Vout) -> Result<TxOutspend> {
|
||||
let all = self.outspends(txid)?;
|
||||
all.into_iter()
|
||||
.nth(usize::from(vout))
|
||||
.ok_or(Error::OutOfRange("Output index out of range".into()))
|
||||
}
|
||||
|
||||
pub fn outspends(&self, TxidParam { txid }: TxidParam) -> Result<Vec<TxOutspend>> {
|
||||
@@ -185,6 +151,16 @@ impl Query {
|
||||
// Get spend status for each output
|
||||
let computer = self.computer();
|
||||
let txin_index_reader = computer.outputs.spent.txin_index.reader();
|
||||
let txid_reader = indexer.vecs.transactions.txid.reader();
|
||||
|
||||
// Cursors for PcoVec reads — buffer chunks so nearby indices share decompression
|
||||
let mut input_tx_cursor = indexer.vecs.inputs.tx_index.cursor();
|
||||
let mut first_txin_cursor = indexer.vecs.transactions.first_txin_index.cursor();
|
||||
let mut height_cursor = indexer.vecs.transactions.height.cursor();
|
||||
let mut block_ts_cursor = indexer.vecs.blocks.timestamp.cursor();
|
||||
|
||||
// Block info cache — spending txs in the same block share block hash/time
|
||||
let mut cached_block: Option<(Height, BlockHash, Timestamp)> = None;
|
||||
|
||||
let mut outspends = Vec::with_capacity(output_count);
|
||||
for i in 0..output_count {
|
||||
@@ -193,9 +169,38 @@ impl Query {
|
||||
|
||||
if txin_index == TxInIndex::UNSPENT {
|
||||
outspends.push(TxOutspend::UNSPENT);
|
||||
} else {
|
||||
outspends.push(self.outspend_details(txin_index)?);
|
||||
continue;
|
||||
}
|
||||
|
||||
let spending_tx_index = input_tx_cursor.get(usize::from(txin_index)).unwrap();
|
||||
let spending_first_txin_index =
|
||||
first_txin_cursor.get(spending_tx_index.to_usize()).unwrap();
|
||||
let vin =
|
||||
Vin::from(usize::from(txin_index) - usize::from(spending_first_txin_index));
|
||||
let spending_txid = txid_reader.get(spending_tx_index.to_usize());
|
||||
let spending_height = height_cursor.get(spending_tx_index.to_usize()).unwrap();
|
||||
|
||||
let (block_hash, block_time) =
|
||||
if let Some((h, ref bh, bt)) = cached_block && h == spending_height {
|
||||
(bh.clone(), bt)
|
||||
} else {
|
||||
let bh = indexer.vecs.blocks.blockhash.read_once(spending_height)?;
|
||||
let bt = block_ts_cursor.get(spending_height.to_usize()).unwrap();
|
||||
cached_block = Some((spending_height, bh.clone(), bt));
|
||||
(bh, bt)
|
||||
};
|
||||
|
||||
outspends.push(TxOutspend {
|
||||
spent: true,
|
||||
txid: Some(spending_txid),
|
||||
vin: Some(vin),
|
||||
status: Some(TxStatus {
|
||||
confirmed: true,
|
||||
block_height: Some(spending_height),
|
||||
block_hash: Some(block_hash),
|
||||
block_time: Some(block_time),
|
||||
}),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(outspends)
|
||||
@@ -204,155 +209,10 @@ impl Query {
|
||||
// === Helper methods ===
|
||||
|
||||
pub fn transaction_by_index(&self, tx_index: TxIndex) -> Result<Transaction> {
|
||||
let indexer = self.indexer();
|
||||
let reader = self.reader();
|
||||
|
||||
// Get tx metadata using collect_one for PcoVec, read_once for BytesVec
|
||||
let txid = indexer.vecs.transactions.txid.read_once(tx_index)?;
|
||||
let height = indexer
|
||||
.vecs
|
||||
.transactions
|
||||
.height
|
||||
.collect_one(tx_index)
|
||||
.unwrap();
|
||||
let version = indexer
|
||||
.vecs
|
||||
.transactions
|
||||
.tx_version
|
||||
.collect_one(tx_index)
|
||||
.unwrap();
|
||||
let lock_time = indexer
|
||||
.vecs
|
||||
.transactions
|
||||
.raw_locktime
|
||||
.collect_one(tx_index)
|
||||
.unwrap();
|
||||
let total_size = indexer
|
||||
.vecs
|
||||
.transactions
|
||||
.total_size
|
||||
.collect_one(tx_index)
|
||||
.unwrap();
|
||||
let first_txin_index = indexer
|
||||
.vecs
|
||||
.transactions
|
||||
.first_txin_index
|
||||
.collect_one(tx_index)
|
||||
.unwrap();
|
||||
let position = indexer
|
||||
.vecs
|
||||
.transactions
|
||||
.position
|
||||
.collect_one(tx_index)
|
||||
.unwrap();
|
||||
|
||||
// Get block info for status
|
||||
let block_hash = indexer.vecs.blocks.blockhash.read_once(height)?;
|
||||
let block_time = indexer.vecs.blocks.timestamp.collect_one(height).unwrap();
|
||||
|
||||
// Read and decode the raw transaction from blk file
|
||||
let buffer = reader.read_raw_bytes(position, *total_size as usize)?;
|
||||
let mut cursor = Cursor::new(buffer);
|
||||
let tx = bitcoin::Transaction::consensus_decode(&mut cursor)
|
||||
.map_err(|_| Error::Parse("Failed to decode transaction".into()))?;
|
||||
|
||||
// Create readers for random access lookups
|
||||
let txid_reader = indexer.vecs.transactions.txid.reader();
|
||||
let first_txout_index_reader = indexer.vecs.transactions.first_txout_index.reader();
|
||||
let value_reader = indexer.vecs.outputs.value.reader();
|
||||
let output_type_reader = indexer.vecs.outputs.output_type.reader();
|
||||
let type_index_reader = indexer.vecs.outputs.type_index.reader();
|
||||
let addr_readers = indexer.vecs.addrs.addr_readers();
|
||||
|
||||
// Batch-read outpoints for all inputs (avoids per-input PcoVec page decompression)
|
||||
let outpoints: Vec<_> = indexer.vecs.inputs.outpoint.collect_range_at(
|
||||
usize::from(first_txin_index),
|
||||
usize::from(first_txin_index) + tx.input.len(),
|
||||
);
|
||||
|
||||
// Build inputs with prevout information
|
||||
let input: Vec<TxIn> = tx
|
||||
.input
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, txin)| {
|
||||
let outpoint = outpoints[i];
|
||||
|
||||
let is_coinbase = outpoint.is_coinbase();
|
||||
|
||||
// Get prevout info if not coinbase
|
||||
let (prev_txid, prev_vout, prevout) = if is_coinbase {
|
||||
(Txid::COINBASE, Vout::MAX, None)
|
||||
} else {
|
||||
let prev_tx_index = outpoint.tx_index();
|
||||
let prev_vout = outpoint.vout();
|
||||
let prev_txid = txid_reader.get(prev_tx_index.to_usize());
|
||||
|
||||
// Calculate the txout_index for the prevout
|
||||
let prev_first_txout_index =
|
||||
first_txout_index_reader.get(prev_tx_index.to_usize());
|
||||
let prev_txout_index = prev_first_txout_index + prev_vout;
|
||||
|
||||
let prev_value = value_reader.get(usize::from(prev_txout_index));
|
||||
let prev_output_type: OutputType =
|
||||
output_type_reader.get(usize::from(prev_txout_index));
|
||||
let prev_type_index = type_index_reader.get(usize::from(prev_txout_index));
|
||||
let script_pubkey =
|
||||
addr_readers.script_pubkey(prev_output_type, prev_type_index);
|
||||
|
||||
let prevout = Some(TxOut::from((script_pubkey, prev_value)));
|
||||
|
||||
(prev_txid, prev_vout, prevout)
|
||||
};
|
||||
|
||||
TxIn {
|
||||
txid: prev_txid,
|
||||
vout: prev_vout,
|
||||
prevout,
|
||||
script_sig: txin.script_sig.clone(),
|
||||
script_sig_asm: (),
|
||||
is_coinbase,
|
||||
sequence: txin.sequence.0,
|
||||
inner_redeem_script_asm: (),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Calculate weight before consuming tx.output
|
||||
let weight = Weight::from(tx.weight());
|
||||
|
||||
// Calculate sigop cost
|
||||
let total_sigop_cost = tx.total_sigop_cost(|_| None);
|
||||
|
||||
// Build outputs
|
||||
let output: Vec<TxOut> = tx.output.into_iter().map(TxOut::from).collect();
|
||||
|
||||
// Build status
|
||||
let status = TxStatus {
|
||||
confirmed: true,
|
||||
block_height: Some(height),
|
||||
block_hash: Some(block_hash),
|
||||
block_time: Some(block_time),
|
||||
};
|
||||
|
||||
let mut transaction = Transaction {
|
||||
index: Some(tx_index),
|
||||
txid,
|
||||
version,
|
||||
lock_time,
|
||||
total_size: *total_size as usize,
|
||||
weight,
|
||||
total_sigop_cost,
|
||||
fee: Sats::ZERO, // Will be computed below
|
||||
input,
|
||||
output,
|
||||
status,
|
||||
};
|
||||
|
||||
// Compute fee from inputs - outputs
|
||||
transaction.compute_fee();
|
||||
|
||||
Ok(transaction)
|
||||
self.transactions_by_range(tx_index.to_usize(), 1)?
|
||||
.into_iter()
|
||||
.next()
|
||||
.ok_or(Error::NotFound("Transaction not found".into()))
|
||||
}
|
||||
|
||||
fn transaction_raw_by_index(&self, tx_index: TxIndex) -> Result<Vec<u8>> {
|
||||
@@ -366,60 +226,7 @@ impl Query {
|
||||
Ok(self.transaction_raw_by_index(tx_index)?.to_lower_hex_string())
|
||||
}
|
||||
|
||||
fn outspend_details(&self, txin_index: TxInIndex) -> Result<TxOutspend> {
|
||||
let indexer = self.indexer();
|
||||
|
||||
// Look up spending tx_index directly
|
||||
let spending_tx_index = indexer
|
||||
.vecs
|
||||
.inputs
|
||||
.tx_index
|
||||
.collect_one(txin_index)
|
||||
.unwrap();
|
||||
|
||||
// Calculate vin
|
||||
let spending_first_txin_index = indexer
|
||||
.vecs
|
||||
.transactions
|
||||
.first_txin_index
|
||||
.collect_one(spending_tx_index)
|
||||
.unwrap();
|
||||
let vin = Vin::from(usize::from(txin_index) - usize::from(spending_first_txin_index));
|
||||
|
||||
// Get spending tx details
|
||||
let spending_txid = indexer
|
||||
.vecs
|
||||
.transactions
|
||||
.txid
|
||||
.read_once(spending_tx_index)?;
|
||||
let spending_height = indexer
|
||||
.vecs
|
||||
.transactions
|
||||
.height
|
||||
.collect_one(spending_tx_index)
|
||||
.unwrap();
|
||||
let block_hash = indexer.vecs.blocks.blockhash.read_once(spending_height)?;
|
||||
let block_time = indexer
|
||||
.vecs
|
||||
.blocks
|
||||
.timestamp
|
||||
.collect_one(spending_height)
|
||||
.unwrap();
|
||||
|
||||
Ok(TxOutspend {
|
||||
spent: true,
|
||||
txid: Some(spending_txid),
|
||||
vin: Some(vin),
|
||||
status: Some(TxStatus {
|
||||
confirmed: true,
|
||||
block_height: Some(spending_height),
|
||||
block_hash: Some(block_hash),
|
||||
block_time: Some(block_time),
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
fn resolve_tx(&self, txid: &Txid) -> Result<(TxIndex, Height)> {
|
||||
pub fn resolve_tx(&self, txid: &Txid) -> Result<(TxIndex, Height)> {
|
||||
let indexer = self.indexer();
|
||||
let prefix = TxidPrefix::from(txid);
|
||||
let tx_index: TxIndex = indexer
|
||||
|
||||
@@ -8,7 +8,7 @@ use brk_indexer::Indexer;
|
||||
use brk_mempool::Mempool;
|
||||
use brk_reader::Reader;
|
||||
use brk_rpc::Client;
|
||||
use brk_types::{Height, SyncStatus};
|
||||
use brk_types::{BlockHash, BlockHashPrefix, Height, SyncStatus};
|
||||
use vecdb::{AnyVec, ReadOnlyClone, ReadableVec, Ro};
|
||||
|
||||
#[cfg(feature = "tokio")]
|
||||
@@ -72,6 +72,16 @@ impl Query {
|
||||
self.indexed_height().min(self.computed_height())
|
||||
}
|
||||
|
||||
/// Tip block hash, cached in the indexer.
|
||||
pub fn tip_blockhash(&self) -> BlockHash {
|
||||
self.indexer().tip_blockhash()
|
||||
}
|
||||
|
||||
/// Tip block hash prefix for cache etags.
|
||||
pub fn tip_hash_prefix(&self) -> BlockHashPrefix {
|
||||
BlockHashPrefix::from(&self.tip_blockhash())
|
||||
}
|
||||
|
||||
/// Build sync status with the given tip height
|
||||
pub fn sync_status(&self, tip_height: Height) -> SyncStatus {
|
||||
let indexed_height = self.indexed_height();
|
||||
|
||||
Reference in New Issue
Block a user