global: speed improvement part3

This commit is contained in:
nym21
2026-04-09 14:58:25 +02:00
parent 5a3e1b4e6e
commit db5b3887f9
4 changed files with 186 additions and 295 deletions

View File

@@ -39,9 +39,13 @@ impl Query {
let addr_type = output_type;
let hash = AddrHash::from(&bytes);
let Ok(Some(type_index)) = stores
let Some(store) = stores
.addr_type_to_addr_hash_to_addr_index
.get_unwrap(addr_type)
.get(addr_type)
else {
return Err(Error::InvalidAddr);
};
let Ok(Some(type_index)) = store
.get(&hash)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
@@ -139,12 +143,7 @@ impl Query {
.data()?;
if let Some(after_txid) = after_txid {
let after_tx_index = stores
.txid_prefix_to_tx_index
.get(&after_txid.into())
.map_err(|_| Error::UnknownTxid)?
.ok_or(Error::UnknownTxid)?
.into_owned();
let after_tx_index = self.resolve_tx_index(&after_txid)?;
// Seek directly to after_tx_index and iterate backward — O(limit)
let min = AddrIndexTxIndex::min_for_addr(type_index);
@@ -189,28 +188,30 @@ impl Query {
let txid_reader = vecs.transactions.txid.reader();
let first_txout_index_reader = vecs.transactions.first_txout_index.reader();
let value_reader = vecs.outputs.value.reader();
let tx_heights = &self.computer().indexes.tx_heights;
let mut cached_status: Option<(Height, TxStatus)> = None;
let mut utxos = Vec::with_capacity(outpoints.len());
for (tx_index, vout) in outpoints {
let txid = txid_reader.get(tx_index.to_usize());
let height: Height = tx_heights.get_shared(tx_index).data()?;
let first_txout_index = first_txout_index_reader.get(tx_index.to_usize());
let value = value_reader.get(usize::from(first_txout_index + vout));
let block_hash = vecs.blocks.cached_blockhash.collect_one(height).data()?;
let block_time = vecs.blocks.cached_timestamp.collect_one(height).data()?;
let height = self.confirmed_status_height(tx_index)?;
let status = if let Some((h, ref s)) = cached_status
&& h == height
{
s.clone()
} else {
let s = self.confirmed_status_at(height)?;
cached_status = Some((height, s.clone()));
s
};
utxos.push(Utxo {
txid,
vout,
status: TxStatus {
confirmed: true,
block_height: Some(height),
block_hash: Some(block_hash),
block_time: Some(block_time),
},
status,
value,
});
}

View File

@@ -3,7 +3,7 @@ use std::io::Cursor;
use bitcoin::{consensus::Decodable, hex::DisplayHex};
use brk_error::{Error, OptionData, Result};
use brk_types::{
BlkPosition, BlockHash, Height, OutPoint, OutputType, RawLockTime, Sats, StoredU32, Timestamp,
BlkPosition, BlockHash, Height, OutPoint, OutputType, RawLockTime, Sats, StoredU32,
Transaction, TxIn, TxInIndex, TxIndex, TxOut, TxStatus, Txid, TypeIndex, Vout, Weight,
};
use rustc_hash::FxHashMap;
@@ -75,7 +75,6 @@ impl Query {
return Ok(Vec::new());
}
let t0 = std::time::Instant::now();
let len = indices.len();
// Sort positions ascending for sequential I/O (O(n) when already sorted)
@@ -87,21 +86,19 @@ impl Query {
// ── Phase 1: Decode all transactions, collect outpoints ─────────
let tx_heights = &self.computer().indexes.tx_heights;
let mut txid_cursor = indexer.vecs.transactions.txid.cursor();
let mut locktime_cursor = indexer.vecs.transactions.raw_locktime.cursor();
let mut total_size_cursor = indexer.vecs.transactions.total_size.cursor();
let mut first_txin_cursor = indexer.vecs.transactions.first_txin_index.cursor();
let mut position_cursor = indexer.vecs.transactions.position.cursor();
struct DecodedTx {
pos: usize,
tx_index: TxIndex,
txid: Txid,
height: Height,
lock_time: RawLockTime,
total_size: StoredU32,
block_hash: BlockHash,
block_time: Timestamp,
status: TxStatus,
decoded: bitcoin::Transaction,
first_txin_index: TxInIndex,
outpoints: Vec<OutPoint>,
@@ -109,6 +106,7 @@ impl Query {
let mut decoded_txs: Vec<DecodedTx> = Vec::with_capacity(len);
let mut total_inputs: usize = 0;
let mut cached_status: Option<(Height, TxStatus)> = None;
// Phase 1a: Read metadata + decode transactions (no outpoint reads yet)
for &pos in &order {
@@ -116,24 +114,21 @@ impl Query {
let idx = tx_index.to_usize();
let txid: Txid = txid_cursor.get(idx).data()?;
let height: Height = tx_heights.get_shared(tx_index).data()?;
let lock_time: RawLockTime = locktime_cursor.get(idx).data()?;
let total_size: StoredU32 = total_size_cursor.get(idx).data()?;
let first_txin_index: TxInIndex = first_txin_cursor.get(idx).data()?;
let position: BlkPosition = position_cursor.get(idx).data()?;
let block_hash = indexer
.vecs
.blocks
.cached_blockhash
.collect_one(height)
.data()?;
let block_time = indexer
.vecs
.blocks
.cached_timestamp
.collect_one(height)
.data()?;
let height = self.confirmed_status_height(tx_index)?;
let status = if let Some((h, ref s)) = cached_status
&& h == height
{
s.clone()
} else {
let s = self.confirmed_status_at(height)?;
cached_status = Some((height, s.clone()));
s
};
let buffer = reader.read_raw_bytes(position, *total_size as usize)?;
let decoded = bitcoin::Transaction::consensus_decode(&mut Cursor::new(buffer))
@@ -145,19 +140,15 @@ impl Query {
pos,
tx_index,
txid,
height,
lock_time,
total_size,
block_hash,
block_time,
status,
decoded,
first_txin_index,
outpoints: Vec::new(),
});
}
let t_phase1a = t0.elapsed();
// Phase 1b: Batch-read outpoints + prevout data via cursors (PcoVec —
// sequential cursor avoids re-decompressing the same pages).
// Reading output_type/type_index/value HERE from inputs vecs (sequential)
@@ -187,41 +178,18 @@ impl Query {
dtx.outpoints = outpoints;
}
let t_phase1b = t0.elapsed();
// ── Phase 2: Batch-read prevout data in sorted order ────────────
// Collect all non-coinbase outpoints, deduplicate, sort by tx_index
let mut prevout_keys: Vec<OutPoint> = Vec::with_capacity(total_inputs);
for dtx in &decoded_txs {
for &op in &dtx.outpoints {
if op.is_not_coinbase() {
prevout_keys.push(op);
}
}
}
prevout_keys.sort_unstable();
prevout_keys.dedup();
// Batch-read txid sorted by prev_tx_index (only remaining random read)
let txid_reader = indexer.vecs.transactions.txid.reader();
// ── Phase 2: Build prevout TxOut map (script_pubkey from addr vecs) ──
let addr_readers = indexer.vecs.addrs.addr_readers();
let mut prevout_map: FxHashMap<OutPoint, (Txid, TxOut)> =
FxHashMap::with_capacity_and_hasher(prevout_keys.len(), Default::default());
let mut prevout_map: FxHashMap<OutPoint, TxOut> =
FxHashMap::with_capacity_and_hasher(total_inputs, Default::default());
for &op in &prevout_keys {
let txid = txid_reader.get(op.tx_index().to_usize());
// output_type, type_index, value pre-read from inputs vecs (sequential)
let &(output_type, type_index, value) =
prevout_input_data.get(&op).unwrap();
for (&op, &(output_type, type_index, value)) in &prevout_input_data {
let script_pubkey = addr_readers.script_pubkey(output_type, type_index);
prevout_map.insert(op, (txid, TxOut::from((script_pubkey, value))));
prevout_map.insert(op, TxOut::from((script_pubkey, value)));
}
let t_phase2 = t0.elapsed();
// ── Phase 3: Assemble Transaction objects ───────────────────────
let mut txs: Vec<Option<Transaction>> = (0..len).map(|_| None).collect();
@@ -239,8 +207,8 @@ impl Query {
let (prev_txid, prev_vout, prevout) = if is_coinbase {
(Txid::COINBASE, Vout::MAX, None)
} else {
let (prev_txid, prev_txout) =
prevout_map.get(&outpoint).data()?.clone();
let prev_txid = Txid::from(txin.previous_output.txid);
let prev_txout = prevout_map.get(&outpoint).data()?.clone();
(prev_txid, outpoint.vout(), Some(prev_txout))
};
@@ -299,34 +267,13 @@ impl Query {
fee: Sats::ZERO,
input,
output,
status: TxStatus {
confirmed: true,
block_height: Some(dtx.height),
block_hash: Some(dtx.block_hash),
block_time: Some(dtx.block_time),
},
status: dtx.status,
};
transaction.compute_fee();
txs[dtx.pos] = Some(transaction);
}
let t_phase3 = t0.elapsed();
if t_phase3.as_millis() > 50 {
eprintln!(
"[perf:txs] n={} vin={} prevouts={} | 1a={:.1?} 1b={:.1?} | 2={:.1?} | 3={:.1?} | total={:.1?}",
len,
total_inputs,
prevout_keys.len(),
t_phase1a,
t_phase1b - t_phase1a,
t_phase2 - t_phase1b,
t_phase3 - t_phase2,
t_phase3,
);
}
Ok(txs.into_iter().map(Option::unwrap).collect())
}

View File

@@ -1,7 +1,7 @@
use bitcoin::hex::{DisplayHex, FromHex};
use brk_error::{Error, OptionData, Result};
use brk_types::{
Height, MerkleProof, Transaction, TxInIndex, TxIndex, TxOutIndex,
BlockHash, Height, MerkleProof, Timestamp, Transaction, TxInIndex, TxIndex, TxOutIndex,
TxOutspend, TxStatus, Txid, TxidPrefix, Vin, Vout,
};
use vecdb::{ReadableVec, VecIndex};
@@ -9,59 +9,50 @@ use vecdb::{ReadableVec, VecIndex};
use crate::Query;
impl Query {
pub fn transaction(&self, txid: &Txid) -> Result<Transaction> {
// First check mempool for unconfirmed transactions
if let Some(mempool) = self.mempool()
&& let Some(tx_with_hex) = mempool.get_txs().get(txid)
{
return Ok(tx_with_hex.tx().clone());
}
// ── Txid → TxIndex resolution (single source of truth) ─────────
// Look up confirmed transaction by txid prefix
let prefix = TxidPrefix::from(txid);
let indexer = self.indexer();
let Ok(Some(tx_index)) = indexer
/// Resolve a txid to its internal TxIndex via prefix lookup.
#[inline]
pub(crate) fn resolve_tx_index(&self, txid: &Txid) -> Result<TxIndex> {
self.indexer()
.stores
.txid_prefix_to_tx_index
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
self.transaction_by_index(tx_index)
.get(&TxidPrefix::from(txid))
.map_err(|_| Error::UnknownTxid)?
.map(|cow| cow.into_owned())
.ok_or(Error::UnknownTxid)
}
pub fn transaction_status(&self, txid: &Txid) -> Result<TxStatus> {
// First check mempool for unconfirmed transactions
if let Some(mempool) = self.mempool()
&& mempool.get_txs().contains_key(txid)
{
return Ok(TxStatus::UNCONFIRMED);
}
/// Resolve a txid to (TxIndex, Height).
pub fn resolve_tx(&self, txid: &Txid) -> Result<(TxIndex, Height)> {
let tx_index = self.resolve_tx_index(txid)?;
let height = self.confirmed_status_height(tx_index)?;
Ok((tx_index, height))
}
// Look up confirmed transaction by txid prefix
let prefix = TxidPrefix::from(txid);
let indexer = self.indexer();
let Ok(Some(tx_index)) = indexer
.stores
.txid_prefix_to_tx_index
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
// ── TxStatus construction (single source of truth) ─────────────
// Get block info for status
let height = self
.computer()
/// Height for a confirmed tx_index via in-memory TxHeights lookup.
#[inline]
pub(crate) fn confirmed_status_height(&self, tx_index: TxIndex) -> Result<Height> {
self.computer()
.indexes
.tx_heights
.get_shared(tx_index)
.data()?;
let block_hash = indexer.vecs.blocks.cached_blockhash.collect_one(height).data()?;
let block_time = indexer.vecs.blocks.cached_timestamp.collect_one(height).data()?;
.data()
}
/// Full confirmed TxStatus from a tx_index.
#[inline]
pub(crate) fn confirmed_status(&self, tx_index: TxIndex) -> Result<TxStatus> {
let height = self.confirmed_status_height(tx_index)?;
self.confirmed_status_at(height)
}
/// Full confirmed TxStatus from a known height.
#[inline]
pub(crate) fn confirmed_status_at(&self, height: Height) -> Result<TxStatus> {
let (block_hash, block_time) = self.block_hash_and_time(height)?;
Ok(TxStatus {
confirmed: true,
block_height: Some(height),
@@ -70,6 +61,33 @@ impl Query {
})
}
/// Block hash + timestamp for a height (cached vecs, fast).
#[inline]
pub(crate) fn block_hash_and_time(&self, height: Height) -> Result<(BlockHash, Timestamp)> {
let indexer = self.indexer();
let hash = indexer.vecs.blocks.cached_blockhash.collect_one(height).data()?;
let time = indexer.vecs.blocks.cached_timestamp.collect_one(height).data()?;
Ok((hash, time))
}
// ── Transaction queries ────────────────────────────────────────
pub fn transaction(&self, txid: &Txid) -> Result<Transaction> {
if let Some(mempool) = self.mempool()
&& let Some(tx_with_hex) = mempool.get_txs().get(txid)
{
return Ok(tx_with_hex.tx().clone());
}
self.transaction_by_index(self.resolve_tx_index(txid)?)
}
pub fn transaction_status(&self, txid: &Txid) -> Result<TxStatus> {
if self.mempool().is_some_and(|m| m.get_txs().contains_key(txid)) {
return Ok(TxStatus::UNCONFIRMED);
}
self.confirmed_status(self.resolve_tx_index(txid)?)
}
pub fn transaction_raw(&self, txid: &Txid) -> Result<Vec<u8>> {
if let Some(mempool) = self.mempool()
&& let Some(tx_with_hex) = mempool.get_txs().get(txid)
@@ -77,48 +95,22 @@ impl Query {
return Vec::from_hex(tx_with_hex.hex())
.map_err(|_| Error::Parse("Failed to decode mempool tx hex".into()));
}
let prefix = TxidPrefix::from(txid);
let indexer = self.indexer();
let Ok(Some(tx_index)) = indexer
.stores
.txid_prefix_to_tx_index
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
self.transaction_raw_by_index(tx_index)
self.transaction_raw_by_index(self.resolve_tx_index(txid)?)
}
pub fn transaction_hex(&self, txid: &Txid) -> Result<String> {
// First check mempool for unconfirmed transactions
if let Some(mempool) = self.mempool()
&& let Some(tx_with_hex) = mempool.get_txs().get(txid)
{
return Ok(tx_with_hex.hex().to_string());
}
// Look up confirmed transaction by txid prefix
let prefix = TxidPrefix::from(txid);
let indexer = self.indexer();
let Ok(Some(tx_index)) = indexer
.stores
.txid_prefix_to_tx_index
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
self.transaction_hex_by_index(tx_index)
self.transaction_hex_by_index(self.resolve_tx_index(txid)?)
}
// ── Outspend queries ───────────────────────────────────────────
pub fn outspend(&self, txid: &Txid, vout: Vout) -> Result<TxOutspend> {
if self
.mempool()
.is_some_and(|m| m.get_txs().contains_key(txid))
{
if self.mempool().is_some_and(|m| m.get_txs().contains_key(txid)) {
return Ok(TxOutspend::UNSPENT);
}
let (_, first_txout, output_count) = self.resolve_tx_outputs(txid)?;
@@ -135,7 +127,33 @@ impl Query {
return Ok(vec![TxOutspend::UNSPENT; tx_with_hex.tx().output.len()]);
}
let (_, first_txout, output_count) = self.resolve_tx_outputs(txid)?;
self.resolve_outspends(first_txout, output_count)
}
/// Resolve spend status for a single output. Minimal reads.
fn resolve_outspend(&self, txout_index: TxOutIndex) -> Result<TxOutspend> {
let txin_index = self
.computer()
.outputs
.spent
.txin_index
.reader()
.get(usize::from(txout_index));
if txin_index == TxInIndex::UNSPENT {
return Ok(TxOutspend::UNSPENT);
}
self.build_outspend(txin_index)
}
/// Resolve spend status for a contiguous range of outputs.
/// Readers/cursors created once, reused for all outputs.
fn resolve_outspends(
&self,
first_txout: TxOutIndex,
output_count: usize,
) -> Result<Vec<TxOutspend>> {
let indexer = self.indexer();
let txin_index_reader = self.computer().outputs.spent.txin_index.reader();
let txid_reader = indexer.vecs.transactions.txid.reader();
@@ -144,6 +162,7 @@ impl Query {
let mut input_tx_cursor = indexer.vecs.inputs.tx_index.cursor();
let mut first_txin_cursor = indexer.vecs.transactions.first_txin_index.cursor();
let mut cached_status: Option<(Height, BlockHash, Timestamp)> = None;
let mut outspends = Vec::with_capacity(output_count);
for i in 0..output_count {
let txin_index = txin_index_reader.get(usize::from(first_txout + Vout::from(i)));
@@ -157,20 +176,18 @@ impl Query {
let spending_first_txin = first_txin_cursor.get(spending_tx_index.to_usize()).data()?;
let vin = Vin::from(usize::from(txin_index) - usize::from(spending_first_txin));
let spending_txid = txid_reader.get(spending_tx_index.to_usize());
let spending_height = tx_heights.get_shared(spending_tx_index).data()?;
let spending_height: Height = tx_heights.get_shared(spending_tx_index).data()?;
let block_hash = indexer
.vecs
.blocks
.cached_blockhash
.collect_one(spending_height)
.data()?;
let block_time = indexer
.vecs
.blocks
.cached_timestamp
.collect_one(spending_height)
.data()?;
let (block_hash, block_time) =
if let Some((h, ref bh, bt)) = cached_status
&& h == spending_height
{
(bh.clone(), bt)
} else {
let (bh, bt) = self.block_hash_and_time(spending_height)?;
cached_status = Some((spending_height, bh.clone(), bt));
(bh, bt)
};
outspends.push(TxOutspend {
spent: true,
@@ -188,16 +205,48 @@ impl Query {
Ok(outspends)
}
/// Build a single TxOutspend from a known-spent TxInIndex.
fn build_outspend(&self, txin_index: TxInIndex) -> Result<TxOutspend> {
let indexer = self.indexer();
let spending_tx_index: TxIndex = indexer
.vecs
.inputs
.tx_index
.collect_one_at(usize::from(txin_index))
.data()?;
let spending_first_txin: TxInIndex = indexer
.vecs
.transactions
.first_txin_index
.collect_one(spending_tx_index)
.data()?;
let vin = Vin::from(usize::from(txin_index) - usize::from(spending_first_txin));
let spending_txid = indexer
.vecs
.transactions
.txid
.reader()
.get(spending_tx_index.to_usize());
let spending_height = self.confirmed_status_height(spending_tx_index)?;
let (block_hash, block_time) = self.block_hash_and_time(spending_height)?;
Ok(TxOutspend {
spent: true,
txid: Some(spending_txid),
vin: Some(vin),
status: Some(TxStatus {
confirmed: true,
block_height: Some(spending_height),
block_hash: Some(block_hash),
block_time: Some(block_time),
}),
})
}
/// Resolve txid to (tx_index, first_txout_index, output_count).
fn resolve_tx_outputs(&self, txid: &Txid) -> Result<(TxIndex, TxOutIndex, usize)> {
let prefix = TxidPrefix::from(txid);
let tx_index = self.resolve_tx_index(txid)?;
let indexer = self.indexer();
let tx_index: TxIndex = indexer
.stores
.txid_prefix_to_tx_index
.get(&prefix)?
.map(|cow| cow.into_owned())
.ok_or(Error::UnknownTxid)?;
let first = indexer
.vecs
.transactions
@@ -211,76 +260,6 @@ impl Query {
Ok((tx_index, first, usize::from(next) - usize::from(first)))
}
/// Resolve spend status for a single output.
fn resolve_outspend(&self, txout_index: TxOutIndex) -> Result<TxOutspend> {
let indexer = self.indexer();
let txin_index = self
.computer()
.outputs
.spent
.txin_index
.reader()
.get(usize::from(txout_index));
if txin_index == TxInIndex::UNSPENT {
return Ok(TxOutspend::UNSPENT);
}
let spending_tx_index = indexer
.vecs
.inputs
.tx_index
.collect_one_at(usize::from(txin_index))
.data()?;
let spending_first_txin = indexer
.vecs
.transactions
.first_txin_index
.collect_one(spending_tx_index)
.data()?;
let spending_height = self
.computer()
.indexes
.tx_heights
.get_shared(spending_tx_index)
.data()?;
Ok(TxOutspend {
spent: true,
txid: Some(
indexer
.vecs
.transactions
.txid
.reader()
.get(spending_tx_index.to_usize()),
),
vin: Some(Vin::from(
usize::from(txin_index) - usize::from(spending_first_txin),
)),
status: Some(TxStatus {
confirmed: true,
block_height: Some(spending_height),
block_hash: Some(
indexer
.vecs
.blocks
.blockhash
.reader()
.get(spending_height.to_usize()),
),
block_time: Some(
indexer
.vecs
.blocks
.timestamp
.collect_one(spending_height)
.data()?,
),
}),
})
}
// === Helper methods ===
pub fn transaction_by_index(&self, tx_index: TxIndex) -> Result<Transaction> {
@@ -313,24 +292,6 @@ impl Query {
.to_lower_hex_string())
}
pub fn resolve_tx(&self, txid: &Txid) -> Result<(TxIndex, Height)> {
let indexer = self.indexer();
let prefix = TxidPrefix::from(txid);
let tx_index: TxIndex = indexer
.stores
.txid_prefix_to_tx_index
.get(&prefix)?
.map(|cow| cow.into_owned())
.ok_or(Error::UnknownTxid)?;
let height: Height = self
.computer()
.indexes
.tx_heights
.get_shared(tx_index)
.data()?;
Ok((tx_index, height))
}
pub fn broadcast_transaction(&self, hex: &str) -> Result<Txid> {
self.client().send_raw_transaction(hex)
}

View File

@@ -189,26 +189,8 @@ impl AppState {
F: FnOnce(&brk_query::Query) -> brk_error::Result<T> + Send + 'static,
{
self.cached(headers, strategy, uri, "application/json", move |q, enc| {
let t0 = std::time::Instant::now();
let value = f(q)?;
let t_query = t0.elapsed();
let json = serde_json::to_vec(&value).unwrap();
let t_json = t0.elapsed();
let json_len = json.len();
let compressed = enc.compress(Bytes::from(json));
let t_total = t0.elapsed();
if t_total.as_millis() > 100 {
eprintln!(
"[perf] query={:.1?} json={:.1?}({:.1}MB) compress={:.1?}({}) total={:.1?}",
t_query,
t_json - t_query,
json_len as f64 / 1_048_576.0,
t_total - t_json,
enc.as_str(),
t_total,
);
}
Ok(compressed)
Ok(enc.compress(Bytes::from(serde_json::to_vec(&value).unwrap())))
})
.await
}