server: ms endpoint fixes

This commit is contained in:
nym21
2026-04-02 22:37:34 +02:00
parent d92cf43c57
commit 8dfc1bc932
62 changed files with 1639 additions and 1698 deletions

7
Cargo.lock generated
View File

@@ -558,6 +558,7 @@ dependencies = [
"brk_types",
"color-eyre",
"fjall",
"parking_lot",
"rayon",
"rlimit",
"rustc-hash",
@@ -2535,8 +2536,6 @@ dependencies = [
[[package]]
name = "rawdb"
version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "83fd9f9db42fd2d1adfbd7cf447f021776b3b8fd15e09788988fc18c61e1f6bc"
dependencies = [
"libc",
"log",
@@ -3429,8 +3428,6 @@ checksum = "8f54a172d0620933a27a4360d3db3e2ae0dd6cceae9730751a036bbf182c4b23"
[[package]]
name = "vecdb"
version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5422c45d12de71456700c199f9553319cb99e76311e413316dca7e9efd5133b6"
dependencies = [
"itoa",
"libc",
@@ -3452,8 +3449,6 @@ dependencies = [
[[package]]
name = "vecdb_derive"
version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b075be4cec2d718d40dc422cef038c10d6fcce4aad594199cc0a301a4985146"
dependencies = [
"quote",
"syn",

View File

@@ -87,8 +87,8 @@ tower-http = { version = "0.6.8", features = ["catch-panic", "compression-br", "
tower-layer = "0.3"
tracing = { version = "0.1", default-features = false, features = ["std"] }
ureq = { version = "3.3.0", features = ["json"] }
vecdb = { version = "0.9.2", features = ["derive", "serde_json", "pco", "schemars"] }
# vecdb = { path = "../anydb/crates/vecdb", features = ["derive", "serde_json", "pco", "schemars"] }
# vecdb = { version = "0.9.2", features = ["derive", "serde_json", "pco", "schemars"] }
vecdb = { path = "../anydb/crates/vecdb", features = ["derive", "serde_json", "pco", "schemars"] }
[workspace.metadata.release]
shared-version = true

View File

@@ -6139,6 +6139,8 @@ pub struct SeriesTree_Pools_Minor {
pub parasite: BlocksDominancePattern,
pub redrockpool: BlocksDominancePattern,
pub est3lar: BlocksDominancePattern,
pub braiinssolo: BlocksDominancePattern,
pub solopool: BlocksDominancePattern,
}
impl SeriesTree_Pools_Minor {
@@ -6284,6 +6286,8 @@ impl SeriesTree_Pools_Minor {
parasite: BlocksDominancePattern::new(client.clone(), "parasite".to_string()),
redrockpool: BlocksDominancePattern::new(client.clone(), "redrockpool".to_string()),
est3lar: BlocksDominancePattern::new(client.clone(), "est3lar".to_string()),
braiinssolo: BlocksDominancePattern::new(client.clone(), "braiinssolo".to_string()),
solopool: BlocksDominancePattern::new(client.clone(), "solopool".to_string()),
}
}
}

View File

@@ -44,7 +44,7 @@ impl Vecs {
let db = open_db(parent_path, DB_NAME, 100_000)?;
let pools = pools();
let version = parent_version + Version::new(3) + Version::new(pools.len() as u32);
let version = parent_version + Version::new(4) + Version::new(pools.len() as u32);
let mut major_map = BTreeMap::new();
let mut minor_map = BTreeMap::new();
@@ -123,8 +123,7 @@ impl Vecs {
self.pool.len()
);
}
self.pool
.validate_computed_version_or_reset(dep_version)?;
self.pool.validate_computed_version_or_reset(dep_version)?;
let first_txout_index = indexer.vecs.transactions.first_txout_index.reader();
let output_type = indexer.vecs.outputs.output_type.reader();

View File

@@ -19,6 +19,7 @@ brk_store = { workspace = true }
brk_types = { workspace = true }
brk_traversable = { workspace = true }
fjall = { workspace = true }
parking_lot = { workspace = true }
schemars = { workspace = true }
serde = { workspace = true }
tracing = { workspace = true }

View File

@@ -3,6 +3,7 @@
use std::{
fs,
path::{Path, PathBuf},
sync::Arc,
thread::{self, sleep},
time::{Duration, Instant},
};
@@ -10,7 +11,8 @@ use std::{
use brk_error::Result;
use brk_reader::{Reader, XORBytes};
use brk_rpc::Client;
use brk_types::Height;
use brk_types::{BlockHash, Height};
use parking_lot::RwLock;
use fjall::PersistMode;
use tracing::{debug, info};
use vecdb::{
@@ -36,6 +38,13 @@ pub struct Indexer<M: StorageMode = Rw> {
path: PathBuf,
pub vecs: Vecs<M>,
pub stores: Stores,
tip_blockhash: Arc<RwLock<BlockHash>>,
}
impl<M: StorageMode> Indexer<M> {
pub fn tip_blockhash(&self) -> BlockHash {
self.tip_blockhash.read().clone()
}
}
impl ReadOnlyClone for Indexer {
@@ -46,6 +55,7 @@ impl ReadOnlyClone for Indexer {
path: self.path.clone(),
vecs: self.vecs.read_only_clone(),
stores: self.stores.clone(),
tip_blockhash: self.tip_blockhash.clone(),
}
}
}
@@ -77,10 +87,17 @@ impl Indexer {
let stores = Stores::forced_import(&indexed_path, VERSION)?;
info!("Imported stores in {:?}", i.elapsed());
let tip_blockhash = vecs
.blocks
.blockhash
.collect_last()
.unwrap_or_default();
Ok(Self {
path: indexed_path.clone(),
vecs,
stores,
tip_blockhash: Arc::new(RwLock::new(tip_blockhash)),
})
};
@@ -288,6 +305,8 @@ impl Indexer {
export(stores, vecs, height)?;
readers = Readers::new(vecs);
}
*self.tip_blockhash.write() = block.block_hash().into();
}
drop(readers);
@@ -302,6 +321,9 @@ impl Indexer {
sleep(Duration::from_secs(5));
info!("Exporting...");
let i = Instant::now();
if !tasks.is_empty() {
let i = Instant::now();
for task in tasks {
@@ -317,6 +339,8 @@ impl Indexer {
}
db.compact()?;
info!("Exported in {:?}", i.elapsed());
Ok(())
});

View File

@@ -4,8 +4,8 @@ use bitcoin::{Network, PublicKey, ScriptBuf};
use brk_error::{Error, Result};
use brk_types::{
Addr, AddrBytes, AddrChainStats, AddrHash, AddrIndexOutPoint, AddrIndexTxIndex, AddrStats,
AnyAddrDataIndexEnum, OutputType, Sats, Transaction, TxIndex, TxStatus, Txid, TypeIndex, Unit,
Utxo, Vout,
AnyAddrDataIndexEnum, Height, OutputType, Transaction, TxIndex, TxStatus, Txid, TypeIndex,
Unit, Utxo, Vout,
};
use vecdb::{ReadableVec, VecIndex};
@@ -186,25 +186,19 @@ impl Query {
let first_txout_index_reader = vecs.transactions.first_txout_index.reader();
let value_reader = vecs.outputs.value.reader();
let blockhash_reader = vecs.blocks.blockhash.reader();
let mut height_cursor = vecs.transactions.height.cursor();
let mut block_ts_cursor = vecs.blocks.timestamp.cursor();
let utxos: Vec<Utxo> = outpoints
.into_iter()
.map(|(tx_index, vout)| {
let txid: Txid = txid_reader.get(tx_index.to_usize());
let height = vecs
.transactions
.height
.collect_one_at(tx_index.to_usize())
.unwrap();
let txid = txid_reader.get(tx_index.to_usize());
let height = height_cursor.get(tx_index.to_usize()).unwrap();
let first_txout_index = first_txout_index_reader.get(tx_index.to_usize());
let txout_index = first_txout_index + vout;
let value: Sats = value_reader.get(usize::from(txout_index));
let value = value_reader.get(usize::from(txout_index));
let block_hash = blockhash_reader.get(usize::from(height));
let block_time = vecs
.blocks
.timestamp
.collect_one_at(usize::from(height))
.unwrap();
let block_time = block_ts_cursor.get(height.to_usize()).unwrap();
Utxo {
txid,
@@ -247,6 +241,29 @@ impl Query {
Ok(txids)
}
/// Height of the last on-chain activity for an address (last tx_index → height).
pub fn addr_last_activity_height(&self, addr: &Addr) -> Result<Height> {
let (output_type, type_index) = self.resolve_addr(addr)?;
let store = self
.indexer()
.stores
.addr_type_to_addr_index_and_tx_index
.get(output_type)
.unwrap();
let prefix = u32::from(type_index).to_be_bytes();
let last_tx_index = store
.prefix(prefix)
.next_back()
.map(|(key, _): (AddrIndexTxIndex, Unit)| key.tx_index())
.ok_or(Error::UnknownAddr)?;
self.indexer()
.vecs
.transactions
.height
.collect_one(last_tx_index)
.ok_or(Error::UnknownAddr)
}
/// Resolve an address string to its output type and type_index
fn resolve_addr(&self, addr: &Addr) -> Result<(OutputType, TypeIndex)> {
let stores = &self.indexer().stores;

View File

@@ -1,9 +1,10 @@
use bitcoin::consensus::Decodable;
use bitcoin::hex::DisplayHex;
use brk_error::{Error, Result};
use brk_reader::Reader;
use brk_types::{
BlockExtras, BlockHash, BlockHashPrefix, BlockHeader, BlockInfo, BlockInfoV1, BlockPool,
FeeRate, Height, Sats, Timestamp, TxIndex, VSize, pools,
BlkPosition, BlockExtras, BlockHash, BlockHashPrefix, BlockHeader, BlockInfo, BlockInfoV1,
BlockPool, FeeRate, Height, Sats, Timestamp, TxIndex, VSize, pools,
};
use vecdb::{AnyVec, ReadableVec, VecIndex};
@@ -123,12 +124,16 @@ impl Query {
blocks.push(BlockInfo {
id: blockhashes[i].clone(),
height: Height::from(begin + i),
header,
version: header.version,
timestamp: timestamps[i],
tx_count,
size: *sizes[i],
weight: weights[i],
merkle_root: header.merkle_root,
previous_block_hash: header.previous_block_hash,
median_time,
nonce: header.nonce,
bits: header.bits,
difficulty: *difficulties[i],
});
}
@@ -138,7 +143,7 @@ impl Query {
pub(crate) fn blocks_v1_range(&self, begin: usize, end: usize) -> Result<Vec<BlockInfoV1>> {
if begin >= end {
return Ok(Vec::new());
return Ok(vec![]);
}
let count = end - begin;
@@ -304,12 +309,16 @@ impl Query {
let info = BlockInfo {
id: blockhashes[i].clone(),
height: Height::from(begin + i),
header,
version: header.version,
timestamp: timestamps[i],
tx_count,
size,
weight,
merkle_root: header.merkle_root,
previous_block_hash: header.previous_block_hash,
median_time,
nonce: header.nonce,
bits: header.bits,
difficulty: *difficulties[i],
};
@@ -333,6 +342,7 @@ impl Query {
id: pool.unique_id(),
name: pool.name.to_string(),
slug: pool_slug,
miner_names: None,
},
avg_fee: Sats::from(if non_coinbase > 0 {
total_fees_u64 / non_coinbase
@@ -441,8 +451,8 @@ impl Query {
}
fn parse_coinbase_tx(
reader: &brk_reader::Reader,
position: brk_types::BlkPosition,
reader: &Reader,
position: BlkPosition,
) -> (String, Option<String>, Vec<String>, String, String) {
let raw_bytes = match reader.read_raw_bytes(position, 1000) {
Ok(bytes) => bytes,
@@ -463,7 +473,14 @@ impl Query {
let coinbase_signature_ascii = tx
.input
.first()
.map(|input| input.script_sig.as_bytes().iter().map(|&b| b as char).collect::<String>())
.map(|input| {
input
.script_sig
.as_bytes()
.iter()
.map(|&b| b as char)
.collect::<String>()
})
.unwrap_or_default();
let coinbase_addresses: Vec<String> = tx

View File

@@ -67,7 +67,7 @@ impl Query {
// Convert timestamp to ISO 8601 format
let ts_secs: i64 = (*best_ts).into();
let iso_timestamp = JiffTimestamp::from_second(ts_secs)
.map(|t| t.to_string())
.map(|t| t.strftime("%Y-%m-%dT%H:%M:%S%.3fZ").to_string())
.unwrap_or_else(|_| best_ts.to_string());
Ok(BlockTimestamp {

View File

@@ -1,6 +1,12 @@
use std::io::Cursor;
use bitcoin::{consensus::Decodable, hex::DisplayHex};
use brk_error::{Error, Result};
use brk_types::{BlockHash, Height, Transaction, TxIndex, Txid};
use vecdb::{AnyVec, ReadableVec};
use brk_types::{
BlockHash, Height, OutputType, Sats, Timestamp, Transaction, TxIn, TxIndex, TxOut, TxStatus,
Txid, Vout, Weight,
};
use vecdb::{AnyVec, ReadableVec, VecIndex};
use super::BLOCK_TXS_PAGE_SIZE;
use crate::Query;
@@ -13,7 +19,13 @@ impl Query {
pub fn block_txs(&self, hash: &BlockHash, start_index: TxIndex) -> Result<Vec<Transaction>> {
let height = self.height_by_hash(hash)?;
self.block_txs_by_height(height, start_index.into())
let (first, tx_count) = self.block_tx_range(height)?;
let start: usize = start_index.into();
if start >= tx_count {
return Ok(Vec::new());
}
let count = BLOCK_TXS_PAGE_SIZE.min(tx_count - start);
self.transactions_by_range(first + start, count)
}
pub fn block_txid_at_index(&self, hash: &BlockHash, index: TxIndex) -> Result<Txid> {
@@ -21,111 +33,198 @@ impl Query {
self.block_txid_at_index_by_height(height, index.into())
}
// === Helper methods ===
// === Bulk transaction read ===
pub(crate) fn block_txids_by_height(&self, height: Height) -> Result<Vec<Txid>> {
let indexer = self.indexer();
let max_height = self.indexed_height();
if height > max_height {
return Err(Error::OutOfRange("Block height out of range".into()));
}
let first_tx_index = indexer
.vecs
.transactions
.first_tx_index
.collect_one(height)
.unwrap();
let next_first_tx_index = indexer
.vecs
.transactions
.first_tx_index
.collect_one(height.incremented())
.unwrap_or_else(|| TxIndex::from(indexer.vecs.transactions.txid.len()));
let first: usize = first_tx_index.into();
let next: usize = next_first_tx_index.into();
let txids: Vec<Txid> = indexer.vecs.transactions.txid.collect_range_at(first, next);
Ok(txids)
}
fn block_txs_by_height(&self, height: Height, start_index: usize) -> Result<Vec<Transaction>> {
let indexer = self.indexer();
let max_height = self.indexed_height();
if height > max_height {
return Err(Error::OutOfRange("Block height out of range".into()));
}
let first_tx_index = indexer
.vecs
.transactions
.first_tx_index
.collect_one(height)
.unwrap();
let next_first_tx_index = indexer
.vecs
.transactions
.first_tx_index
.collect_one(height.incremented())
.unwrap_or_else(|| TxIndex::from(indexer.vecs.transactions.txid.len()));
let first: usize = first_tx_index.into();
let next: usize = next_first_tx_index.into();
let tx_count = next - first;
if start_index >= tx_count {
/// Batch-read `count` consecutive transactions starting at raw index `start`.
/// Block info is cached per unique height — free for same-block batches.
pub fn transactions_by_range(&self, start: usize, count: usize) -> Result<Vec<Transaction>> {
if count == 0 {
return Ok(Vec::new());
}
let end_index = (start_index + BLOCK_TXS_PAGE_SIZE).min(tx_count);
let count = end_index - start_index;
let indexer = self.indexer();
let reader = self.reader();
let end = start + count;
// 7 range reads instead of count * 7 point reads
let txids: Vec<Txid> = indexer.vecs.transactions.txid.collect_range_at(start, end);
let heights: Vec<Height> = indexer.vecs.transactions.height.collect_range_at(start, end);
let versions = indexer.vecs.transactions.tx_version.collect_range_at(start, end);
let lock_times = indexer.vecs.transactions.raw_locktime.collect_range_at(start, end);
let total_sizes = indexer.vecs.transactions.total_size.collect_range_at(start, end);
let first_txin_indices = indexer
.vecs
.transactions
.first_txin_index
.collect_range_at(start, end);
let positions = indexer.vecs.transactions.position.collect_range_at(start, end);
// Readers for prevout lookups (created once)
let txid_reader = indexer.vecs.transactions.txid.reader();
let first_txout_index_reader = indexer.vecs.transactions.first_txout_index.reader();
let value_reader = indexer.vecs.outputs.value.reader();
let output_type_reader = indexer.vecs.outputs.output_type.reader();
let type_index_reader = indexer.vecs.outputs.type_index.reader();
let addr_readers = indexer.vecs.addrs.addr_readers();
// Block info cache — for same-block batches, read once
let mut cached_block: Option<(Height, BlockHash, Timestamp)> = None;
let mut txs = Vec::with_capacity(count);
for i in start_index..end_index {
let tx_index = TxIndex::from(first + i);
let tx = self.transaction_by_index(tx_index)?;
txs.push(tx);
for i in 0..count {
let height = heights[i];
// Reuse block info if same height as previous tx
let (block_hash, block_time) =
if let Some((h, ref bh, bt)) = cached_block && h == height {
(bh.clone(), bt)
} else {
let bh = indexer.vecs.blocks.blockhash.read_once(height)?;
let bt = indexer.vecs.blocks.timestamp.collect_one(height).unwrap();
cached_block = Some((height, bh.clone(), bt));
(bh, bt)
};
// Decode raw transaction from blk file
let buffer = reader.read_raw_bytes(positions[i], *total_sizes[i] as usize)?;
let tx = bitcoin::Transaction::consensus_decode(&mut Cursor::new(buffer))
.map_err(|_| Error::Parse("Failed to decode transaction".into()))?;
// Batch-read outpoints for this tx's inputs
let outpoints = indexer.vecs.inputs.outpoint.collect_range_at(
usize::from(first_txin_indices[i]),
usize::from(first_txin_indices[i]) + tx.input.len(),
);
let input: Vec<TxIn> = tx
.input
.iter()
.enumerate()
.map(|(j, txin)| {
let outpoint = outpoints[j];
let is_coinbase = outpoint.is_coinbase();
let (prev_txid, prev_vout, prevout) = if is_coinbase {
(Txid::COINBASE, Vout::MAX, None)
} else {
let prev_tx_index = outpoint.tx_index();
let prev_vout = outpoint.vout();
let prev_txid = txid_reader.get(prev_tx_index.to_usize());
let prev_first_txout_index =
first_txout_index_reader.get(prev_tx_index.to_usize());
let prev_txout_index = prev_first_txout_index + prev_vout;
let prev_value = value_reader.get(usize::from(prev_txout_index));
let prev_output_type: OutputType =
output_type_reader.get(usize::from(prev_txout_index));
let prev_type_index =
type_index_reader.get(usize::from(prev_txout_index));
let script_pubkey =
addr_readers.script_pubkey(prev_output_type, prev_type_index);
(
prev_txid,
prev_vout,
Some(TxOut::from((script_pubkey, prev_value))),
)
};
let witness = txin
.witness
.iter()
.map(|w| w.to_lower_hex_string())
.collect();
TxIn {
txid: prev_txid,
vout: prev_vout,
prevout,
script_sig: txin.script_sig.clone(),
script_sig_asm: (),
witness,
is_coinbase,
sequence: txin.sequence.0,
inner_redeem_script_asm: (),
inner_witness_script_asm: (),
}
})
.collect();
let weight = Weight::from(tx.weight());
let total_sigop_cost = tx.total_sigop_cost(|_| None);
let output: Vec<TxOut> = tx.output.into_iter().map(TxOut::from).collect();
let mut transaction = Transaction {
index: Some(TxIndex::from(start + i)),
txid: txids[i].clone(),
version: versions[i],
lock_time: lock_times[i],
total_size: *total_sizes[i] as usize,
weight,
total_sigop_cost,
fee: Sats::ZERO,
input,
output,
status: TxStatus {
confirmed: true,
block_height: Some(height),
block_hash: Some(block_hash),
block_time: Some(block_time),
},
};
transaction.compute_fee();
txs.push(transaction);
}
Ok(txs)
}
fn block_txid_at_index_by_height(&self, height: Height, index: usize) -> Result<Txid> {
let indexer = self.indexer();
// === Helper methods ===
let max_height = self.indexed_height();
if height > max_height {
pub(crate) fn block_txids_by_height(&self, height: Height) -> Result<Vec<Txid>> {
let (first, tx_count) = self.block_tx_range(height)?;
Ok(self
.indexer()
.vecs
.transactions
.txid
.collect_range_at(first, first + tx_count))
}
fn block_txid_at_index_by_height(&self, height: Height, index: usize) -> Result<Txid> {
let (first, tx_count) = self.block_tx_range(height)?;
if index >= tx_count {
return Err(Error::OutOfRange("Transaction index out of range".into()));
}
Ok(self
.indexer()
.vecs
.transactions
.txid
.reader()
.get(first + index))
}
/// Returns (first_tx_raw_index, tx_count) for a block at `height`.
fn block_tx_range(&self, height: Height) -> Result<(usize, usize)> {
let indexer = self.indexer();
if height > self.indexed_height() {
return Err(Error::OutOfRange("Block height out of range".into()));
}
let first_tx_index = indexer
let first: usize = indexer
.vecs
.transactions
.first_tx_index
.collect_one(height)
.unwrap();
let next_first_tx_index = indexer
.unwrap()
.into();
let next: usize = indexer
.vecs
.transactions
.first_tx_index
.collect_one(height.incremented())
.unwrap_or_else(|| TxIndex::from(indexer.vecs.transactions.txid.len()));
let first: usize = first_tx_index.into();
let next: usize = next_first_tx_index.into();
let tx_count = next - first;
if index >= tx_count {
return Err(Error::OutOfRange("Transaction index out of range".into()));
}
let tx_index = first + index;
let txid = indexer.vecs.transactions.txid.reader().get(tx_index);
Ok(txid)
.unwrap_or_else(|| TxIndex::from(indexer.vecs.transactions.txid.len()))
.into();
Ok((first, next - first))
}
}

View File

@@ -1,7 +1,9 @@
use std::cmp::Ordering;
use brk_error::{Error, Result};
use brk_types::{
CpfpEntry, CpfpInfo, MempoolBlock, MempoolInfo, MempoolRecentTx, RecommendedFees, Txid,
TxidParam, TxidPrefix, Weight,
CpfpEntry, CpfpInfo, FeeRate, MempoolBlock, MempoolInfo, MempoolRecentTx, RecommendedFees,
Txid, TxidParam, TxidPrefix, Weight,
};
use crate::Query;
@@ -86,10 +88,22 @@ impl Query {
let effective_fee_per_vsize = entry.effective_fee_rate();
let best_descendant = descendants
.iter()
.max_by(|a, b| {
FeeRate::from((a.fee, a.weight))
.partial_cmp(&FeeRate::from((b.fee, b.weight)))
.unwrap_or(Ordering::Equal)
})
.cloned();
Ok(CpfpInfo {
ancestors,
best_descendant,
descendants,
effective_fee_per_vsize,
fee: entry.fee,
adjusted_vsize: entry.vsize,
})
}

View File

@@ -1,49 +1,22 @@
use brk_error::Result;
use brk_types::{BlockFeesEntry, Height, Sats, TimePeriod};
use vecdb::{ReadableVec, VecIndex};
use brk_types::{BlockFeesEntry, TimePeriod};
use super::day1_iter::Day1Iter;
use super::block_window::BlockWindow;
use crate::Query;
impl Query {
pub fn block_fees(&self, time_period: TimePeriod) -> Result<Vec<BlockFeesEntry>> {
let computer = self.computer();
let current_height = self.height();
let start = current_height
.to_usize()
.saturating_sub(time_period.block_count());
let iter = Day1Iter::new(computer, start, current_height.to_usize());
let cumulative = &computer.mining.rewards.fees.cumulative.sats.height;
let first_height = &computer.indexes.day1.first_height;
Ok(iter.collect(|di, ts, h| {
let h_start = first_height.collect_one(di)?;
let h_end = first_height
.collect_one(di + 1_usize)
.unwrap_or(Height::from(current_height.to_usize() + 1));
let block_count = h_end.to_usize() - h_start.to_usize();
if block_count == 0 {
return None;
}
let cumulative_end = cumulative.collect_one_at(h_end.to_usize() - 1)?;
let cumulative_start = if h_start.to_usize() > 0 {
cumulative
.collect_one_at(h_start.to_usize() - 1)
.unwrap_or(Sats::ZERO)
} else {
Sats::ZERO
};
let daily_sum = cumulative_end - cumulative_start;
let avg_fees = Sats::from(*daily_sum / block_count as u64);
Some(BlockFeesEntry {
avg_height: h,
timestamp: ts,
avg_fees,
let bw = BlockWindow::new(self, time_period);
let cumulative = &self.computer().mining.rewards.fees.cumulative.sats.height;
Ok(bw
.cumulative_averages(self, cumulative)
.into_iter()
.map(|w| BlockFeesEntry {
avg_height: w.avg_height,
timestamp: w.timestamp,
avg_fees: w.avg_value,
usd: w.usd,
})
}))
.collect())
}
}

View File

@@ -1,86 +1,22 @@
use brk_error::Result;
use brk_types::{BlockRewardsEntry, Height, Sats, TimePeriod};
use vecdb::{ReadableVec, VecIndex};
use brk_types::{BlockRewardsEntry, TimePeriod};
use super::block_window::BlockWindow;
use crate::Query;
impl Query {
pub fn block_rewards(&self, time_period: TimePeriod) -> Result<Vec<BlockRewardsEntry>> {
let computer = self.computer();
let indexer = self.indexer();
let current_height = self.height().to_usize();
let start = current_height.saturating_sub(time_period.block_count());
let coinbase_vec = &computer.mining.rewards.coinbase.block.sats;
let timestamp_vec = &indexer.vecs.blocks.timestamp;
match time_period {
// Per-block, exact rewards
TimePeriod::Day | TimePeriod::ThreeDays => {
let rewards: Vec<Sats> = coinbase_vec.collect_range_at(start, current_height + 1);
let timestamps: Vec<brk_types::Timestamp> =
timestamp_vec.collect_range_at(start, current_height + 1);
Ok(rewards
.iter()
.zip(timestamps.iter())
.enumerate()
.map(|(i, (reward, ts))| BlockRewardsEntry {
avg_height: (start + i) as u32,
timestamp: **ts,
avg_rewards: **reward,
})
.collect())
}
// Daily averages, sampled to ~200 points
_ => {
let first_height_vec = &computer.indexes.day1.first_height;
let day1_vec = &computer.indexes.height.day1;
let start_di = day1_vec
.collect_one(Height::from(start))
.unwrap_or_default();
let end_di = day1_vec
.collect_one(Height::from(current_height))
.unwrap_or_default();
let total_days = end_di.to_usize().saturating_sub(start_di.to_usize()) + 1;
let step = (total_days / 200).max(1);
let mut entries = Vec::with_capacity(total_days / step + 1);
let mut di = start_di.to_usize();
while di <= end_di.to_usize() {
let day = brk_types::Day1::from(di);
let next_day = brk_types::Day1::from(di + 1);
if let Some(first_h) = first_height_vec.collect_one(day) {
let next_h = first_height_vec
.collect_one(next_day)
.unwrap_or(Height::from(current_height + 1));
let block_count = next_h.to_usize() - first_h.to_usize();
if block_count > 0 {
let sum =
coinbase_vec
.fold_range(first_h, next_h, Sats::ZERO, |acc, v| acc + v);
let avg = *sum / block_count as u64;
if let Some(ts) = timestamp_vec.collect_one(first_h) {
entries.push(BlockRewardsEntry {
avg_height: first_h.to_usize() as u32,
timestamp: *ts,
avg_rewards: avg,
});
}
}
}
di += step;
}
Ok(entries)
}
}
let bw = BlockWindow::new(self, time_period);
let cumulative = &self.computer().mining.rewards.coinbase.cumulative.sats.height;
Ok(bw
.cumulative_averages(self, cumulative)
.into_iter()
.map(|w| BlockRewardsEntry {
avg_height: w.avg_height,
timestamp: w.timestamp,
avg_rewards: w.avg_value,
usd: w.usd,
})
.collect())
}
}

View File

@@ -1,22 +1,18 @@
use brk_error::Result;
use brk_types::{BlockSizeEntry, BlockSizesWeights, BlockWeightEntry, TimePeriod};
use vecdb::{ReadableOptionVec, VecIndex};
use brk_types::{BlockSizeEntry, BlockSizesWeights, BlockWeightEntry, TimePeriod, Weight};
use vecdb::ReadableVec;
use super::day1_iter::Day1Iter;
use super::block_window::BlockWindow;
use crate::Query;
impl Query {
pub fn block_sizes_weights(&self, time_period: TimePeriod) -> Result<BlockSizesWeights> {
let computer = self.computer();
let current_height = self.height();
let start = current_height
.to_usize()
.saturating_sub(time_period.block_count());
let bw = BlockWindow::new(self, time_period);
let timestamps = bw.timestamps(self);
let iter = Day1Iter::new(computer, start, current_height.to_usize());
// Rolling 24h median, sampled at day1 boundaries
let sizes_vec = &computer
// Batch read per-block rolling 24h medians for the range
let all_sizes = computer
.blocks
.size
.size
@@ -24,8 +20,9 @@ impl Query {
.distribution
.median
._24h
.day1;
let weights_vec = &computer
.height
.collect_range_at(bw.start, bw.end);
let all_weights = computer
.blocks
.weight
.weight
@@ -33,35 +30,30 @@ impl Query {
.distribution
.median
._24h
.day1;
.height
.collect_range_at(bw.start, bw.end);
let entries: Vec<_> = iter.collect(|di, ts, h| {
let size: Option<u64> = sizes_vec.collect_one_flat(di).map(|s| *s);
let weight: Option<u64> = weights_vec.collect_one_flat(di).map(|w| *w);
Some((u32::from(h), (*ts), size, weight))
});
// Sample at window midpoints
let mut sizes = Vec::with_capacity(timestamps.len());
let mut weights = Vec::with_capacity(timestamps.len());
let sizes = entries
.iter()
.filter_map(|(h, ts, size, _)| {
size.map(|s| BlockSizeEntry {
avg_height: *h,
for ((avg_height, start, _end), ts) in bw.iter().zip(&timestamps) {
let mid = start - bw.start + (bw.window / 2).min(all_sizes.len().saturating_sub(1));
if let Some(&size) = all_sizes.get(mid) {
sizes.push(BlockSizeEntry {
avg_height,
timestamp: *ts,
avg_size: s,
})
})
.collect();
let weights = entries
.iter()
.filter_map(|(h, ts, _, weight)| {
weight.map(|w| BlockWeightEntry {
avg_height: *h,
avg_size: *size,
});
}
if let Some(&weight) = all_weights.get(mid) {
weights.push(BlockWeightEntry {
avg_height,
timestamp: *ts,
avg_weight: w,
})
})
.collect();
avg_weight: Weight::from(*weight),
});
}
}
Ok(BlockSizesWeights { sizes, weights })
}

View File

@@ -0,0 +1,154 @@
use brk_types::{Cents, Dollars, Height, Sats, Timestamp, TimePeriod};
use vecdb::{ReadableVec, VecIndex};
use crate::Query;
/// Number of blocks per aggregation window, matching mempool.space's granularity.
fn block_window(period: TimePeriod) -> usize {
match period {
TimePeriod::Day | TimePeriod::ThreeDays | TimePeriod::Week => 1,
TimePeriod::Month => 3,
TimePeriod::ThreeMonths => 12,
TimePeriod::SixMonths => 18,
TimePeriod::Year | TimePeriod::TwoYears => 48,
TimePeriod::ThreeYears => 72,
}
}
/// Per-window average with metadata.
pub struct WindowAvg {
pub avg_height: Height,
pub timestamp: Timestamp,
pub avg_value: Sats,
pub usd: Dollars,
}
/// Block range and window size for a time period.
pub struct BlockWindow {
pub start: usize,
pub end: usize,
pub window: usize,
}
impl BlockWindow {
pub fn new(query: &Query, time_period: TimePeriod) -> Self {
let current_height = query.height();
let computer = query.computer();
let lookback = &computer.blocks.lookback;
// Use pre-computed timestamp-based lookback for accurate time boundaries.
// 24h, 1w, 1m, 1y use in-memory CachedVec; others fall back to PcoVec.
let cached = &lookback.cached_window_starts.0;
let start_height = match time_period {
TimePeriod::Day => cached._24h.collect_one(current_height),
TimePeriod::ThreeDays => lookback._3d.collect_one(current_height),
TimePeriod::Week => cached._1w.collect_one(current_height),
TimePeriod::Month => cached._1m.collect_one(current_height),
TimePeriod::ThreeMonths => lookback._3m.collect_one(current_height),
TimePeriod::SixMonths => lookback._6m.collect_one(current_height),
TimePeriod::Year => cached._1y.collect_one(current_height),
TimePeriod::TwoYears => lookback._2y.collect_one(current_height),
TimePeriod::ThreeYears => lookback._3y.collect_one(current_height),
}
.unwrap_or_default();
Self {
start: start_height.to_usize(),
end: current_height.to_usize() + 1,
window: block_window(time_period),
}
}
/// Compute per-window averages from a cumulative sats vec.
/// Batch-reads timestamps, prices, and the cumulative in one pass.
pub fn cumulative_averages(
&self,
query: &Query,
cumulative: &impl ReadableVec<Height, Sats>,
) -> Vec<WindowAvg> {
let indexer = query.indexer();
let computer = query.computer();
// Batch read all needed data for the range
let all_ts = indexer
.vecs
.blocks
.timestamp
.collect_range_at(self.start, self.end);
let all_prices: Vec<Cents> = computer
.prices
.spot
.cents
.height
.collect_range_at(self.start, self.end);
let read_start = self.start.saturating_sub(1).max(0);
let all_cum = cumulative.collect_range_at(read_start, self.end);
let offset = if self.start > 0 { 1 } else { 0 };
let mut results = Vec::with_capacity(self.count());
let mut pos = 0;
let total = all_ts.len();
while pos < total {
let window_end = (pos + self.window).min(total);
let block_count = (window_end - pos) as u64;
if block_count > 0 {
let mid = (pos + window_end) / 2;
let cum_end = all_cum[window_end - 1 + offset];
let cum_start = if pos + offset > 0 {
all_cum[pos + offset - 1]
} else {
Sats::ZERO
};
let total_sats = cum_end - cum_start;
results.push(WindowAvg {
avg_height: Height::from(self.start + mid),
timestamp: all_ts[mid],
avg_value: Sats::from(*total_sats / block_count),
usd: Dollars::from(all_prices[mid]),
});
}
pos = window_end;
}
results
}
/// Batch-read timestamps for the midpoint of each window.
pub fn timestamps(&self, query: &Query) -> Vec<Timestamp> {
let all_ts = query
.indexer()
.vecs
.blocks
.timestamp
.collect_range_at(self.start, self.end);
let mut timestamps = Vec::with_capacity(self.count());
let mut pos = 0;
while pos < all_ts.len() {
let window_end = (pos + self.window).min(all_ts.len());
timestamps.push(all_ts[(pos + window_end) / 2]);
pos = window_end;
}
timestamps
}
/// Number of windows in this range.
fn count(&self) -> usize {
(self.end - self.start + self.window - 1) / self.window
}
/// Iterate windows, yielding (avg_height, window_start, window_end) for each.
pub fn iter(&self) -> impl Iterator<Item = (Height, usize, usize)> + '_ {
let mut pos = self.start;
std::iter::from_fn(move || {
if pos >= self.end {
return None;
}
let window_end = (pos + self.window).min(self.end);
let avg_height = Height::from((pos + window_end) / 2);
let start = pos;
pos = window_end;
Some((avg_height, start, window_end))
})
}
}

View File

@@ -1,67 +0,0 @@
use brk_computer::Computer;
use brk_types::{Day1, Height, Timestamp};
use vecdb::{ReadableVec, Ro, VecIndex};
/// Helper for iterating over day1 ranges with sampling.
pub struct Day1Iter<'a> {
computer: &'a Computer<Ro>,
start_di: Day1,
end_di: Day1,
step: usize,
}
impl<'a> Day1Iter<'a> {
pub fn new(computer: &'a Computer<Ro>, start_height: usize, end_height: usize) -> Self {
let start_di = computer
.indexes
.height
.day1
.collect_one(Height::from(start_height))
.unwrap_or_default();
let end_di = computer
.indexes
.height
.day1
.collect_one(Height::from(end_height))
.unwrap_or_default();
let total = end_di.to_usize().saturating_sub(start_di.to_usize()) + 1;
let step = (total / 200).max(1);
Self {
computer,
start_di,
end_di,
step,
}
}
/// Iterate and collect entries using the provided transform function.
pub fn collect<T, F>(&self, mut transform: F) -> Vec<T>
where
F: FnMut(Day1, Timestamp, Height) -> Option<T>,
{
let total = self
.end_di
.to_usize()
.saturating_sub(self.start_di.to_usize())
+ 1;
let timestamps = &self.computer.indexes.timestamp.day1;
let heights = &self.computer.indexes.day1.first_height;
let mut entries = Vec::with_capacity(total / self.step + 1);
let mut i = self.start_di.to_usize();
while i <= self.end_di.to_usize() {
let di = Day1::from(i);
if let (Some(ts), Some(h)) = (timestamps.collect_one(di), heights.collect_one(di))
&& let Some(entry) = transform(di, ts, h)
{
entries.push(entry);
}
i += self.step;
}
entries
}
}

View File

@@ -85,7 +85,7 @@ impl Query {
let time_offset = expected_time as i64 - elapsed_time as i64;
// Calculate previous retarget using stored difficulty values
let previous_retarget = if current_epoch_usize > 0 {
let (previous_retarget, previous_time) = if current_epoch_usize > 0 {
let prev_epoch = Epoch::from(current_epoch_usize - 1);
let prev_epoch_start = computer
.indexes
@@ -107,26 +107,33 @@ impl Query {
.collect_one(epoch_start_height)
.unwrap();
if *prev_difficulty > 0.0 {
let retarget = if *prev_difficulty > 0.0 {
((*curr_difficulty / *prev_difficulty) - 1.0) * 100.0
} else {
0.0
}
};
(retarget, epoch_start_timestamp)
} else {
0.0
(0.0, epoch_start_timestamp)
};
// Expected blocks based on wall clock time since epoch start
let expected_blocks = elapsed_time as f64 / TARGET_BLOCK_TIME as f64;
Ok(DifficultyAdjustment {
progress_percent,
difficulty_change,
estimated_retarget_date,
estimated_retarget_date: estimated_retarget_date * 1000,
remaining_blocks,
remaining_time,
remaining_time: remaining_time * 1000,
previous_retarget,
previous_time,
next_retarget_height: Height::from(next_retarget_height),
time_avg,
adjusted_time_avg: time_avg,
time_avg: time_avg * 1000,
adjusted_time_avg: time_avg * 1000,
time_offset,
expected_blocks,
})
}
}

View File

@@ -42,7 +42,7 @@ pub fn iter_difficulty_epochs(
let epoch_difficulty = *epoch_to_difficulty.collect_one(epoch).unwrap_or_default();
let change_percent = match prev_difficulty {
Some(prev) if prev > 0.0 => ((epoch_difficulty / prev) - 1.0) * 100.0,
Some(prev) if prev > 0.0 => epoch_difficulty / prev,
_ => 0.0,
};

View File

@@ -79,9 +79,10 @@ impl Query {
let difficulty: Vec<DifficultyEntry> = iter_difficulty_epochs(computer, start, end)
.into_iter()
.map(|e| DifficultyEntry {
timestamp: e.timestamp,
difficulty: e.difficulty,
time: e.timestamp,
height: e.height,
difficulty: e.difficulty,
adjustment: e.change_percent,
})
.collect();

View File

@@ -2,7 +2,7 @@ mod block_fee_rates;
mod block_fees;
mod block_rewards;
mod block_sizes;
mod day1_iter;
mod block_window;
mod difficulty;
mod difficulty_adjustments;
mod epochs;

View File

@@ -1,17 +1,30 @@
use brk_error::{Error, Result};
use brk_types::{
BlockInfoV1, Height, PoolBlockCounts, PoolBlockShares, PoolDetail, PoolDetailInfo,
PoolHashrateEntry, PoolInfo, PoolSlug, PoolStats, PoolsSummary, TimePeriod, pools,
BlockInfoV1, Day1, Height, Pool, PoolBlockCounts, PoolBlockShares, PoolDetail,
PoolDetailInfo, PoolHashrateEntry, PoolInfo, PoolSlug, PoolStats, PoolsSummary, StoredF64,
StoredU64, TimePeriod, pools,
};
use vecdb::{AnyVec, ReadableVec, VecIndex};
use crate::Query;
/// 7-day lookback for share computation (matching mempool.space)
const LOOKBACK_DAYS: usize = 7;
/// Weekly sample interval (matching mempool.space's 604800s interval)
const SAMPLE_WEEKLY: usize = 7;
/// Pre-read shared data for hashrate computation.
struct HashrateSharedData {
start_day: usize,
end_day: usize,
daily_hashrate: Vec<Option<StoredF64>>,
first_heights: Vec<Height>,
}
impl Query {
pub fn mining_pools(&self, time_period: TimePeriod) -> Result<PoolsSummary> {
let computer = self.computer();
let current_height = self.height();
let end = current_height.to_usize();
// No blocks indexed yet
if computer.pools.pool.len() == 0 {
@@ -19,14 +32,29 @@ impl Query {
pools: vec![],
block_count: 0,
last_estimated_hashrate: 0,
last_estimated_hashrate3d: 0,
last_estimated_hashrate1w: 0,
});
}
// Calculate start height based on time period
let start = end.saturating_sub(time_period.block_count());
// Use timestamp-based lookback for accurate time boundaries
let lookback = &computer.blocks.lookback;
let start = match time_period {
TimePeriod::Day => lookback.cached_window_starts.0._24h.collect_one(current_height),
TimePeriod::ThreeDays => lookback._3d.collect_one(current_height),
TimePeriod::Week => lookback.cached_window_starts.0._1w.collect_one(current_height),
TimePeriod::Month => lookback.cached_window_starts.0._1m.collect_one(current_height),
TimePeriod::ThreeMonths => lookback._3m.collect_one(current_height),
TimePeriod::SixMonths => lookback._6m.collect_one(current_height),
TimePeriod::Year => lookback.cached_window_starts.0._1y.collect_one(current_height),
TimePeriod::TwoYears => lookback._2y.collect_one(current_height),
TimePeriod::ThreeYears => lookback._3y.collect_one(current_height),
}
.unwrap_or_default()
.to_usize();
let pools = pools();
let mut pool_data: Vec<(&'static brk_types::Pool, u64)> = Vec::new();
let mut pool_data: Vec<(&'static Pool, u64)> = Vec::new();
// For each pool, get cumulative count at end and start, subtract to get range count
for (pool_id, cumulative) in computer
@@ -78,13 +106,33 @@ impl Query {
})
.collect();
// TODO: Calculate actual hashrate from difficulty
let last_estimated_hashrate = 0u128;
let hashrate_at = |height: Height| -> u128 {
let day = computer.indexes.height.day1.collect_one(height).unwrap_or_default();
computer
.mining
.hashrate
.rate
.base
.day1
.collect_one(day)
.flatten()
.map(|v| *v as u128)
.unwrap_or(0)
};
let lookback = &computer.blocks.lookback;
let last_estimated_hashrate = hashrate_at(current_height);
let last_estimated_hashrate3d =
hashrate_at(lookback._3d.collect_one(current_height).unwrap_or_default());
let last_estimated_hashrate1w =
hashrate_at(lookback._1w.collect_one(current_height).unwrap_or_default());
Ok(PoolsSummary {
pools: pool_stats,
block_count: total_blocks,
last_estimated_hashrate,
last_estimated_hashrate3d,
last_estimated_hashrate1w,
})
}
@@ -118,8 +166,15 @@ impl Query {
// Get total blocks (all time)
let total_all: u64 = *cumulative.collect_one(current_height).unwrap_or_default();
// Get blocks for 24h (144 blocks)
let start_24h = end.saturating_sub(144);
// Use timestamp-based lookback for accurate time boundaries
let lookback = &computer.blocks.lookback;
let start_24h = lookback
.cached_window_starts
.0
._24h
.collect_one(current_height)
.unwrap_or_default()
.to_usize();
let count_before_24h: u64 = if start_24h == 0 {
0
} else {
@@ -129,8 +184,13 @@ impl Query {
};
let total_24h = total_all.saturating_sub(count_before_24h);
// Get blocks for 1w (1008 blocks)
let start_1w = end.saturating_sub(1008);
let start_1w = lookback
.cached_window_starts
.0
._1w
.collect_one(current_height)
.unwrap_or_default()
.to_usize();
let count_before_1w: u64 = if start_1w == 0 {
0
} else {
@@ -191,11 +251,12 @@ impl Query {
let reader = computer.pools.pool.reader();
let end = start.min(reader.len().saturating_sub(1));
let mut heights = Vec::with_capacity(10);
const POOL_BLOCKS_LIMIT: usize = 100;
let mut heights = Vec::with_capacity(POOL_BLOCKS_LIMIT);
for h in (0..=end).rev() {
if reader.get(h) == slug {
heights.push(h);
if heights.len() >= 10 {
if heights.len() >= POOL_BLOCKS_LIMIT {
break;
}
}
@@ -211,98 +272,166 @@ impl Query {
}
pub fn pool_hashrate(&self, slug: PoolSlug) -> Result<Vec<PoolHashrateEntry>> {
let pools_list = pools();
let pool = pools_list.get(slug);
let entries = self.compute_pool_hashrate_entries(slug, 0)?;
Ok(entries
.into_iter()
.map(|(ts, hr, share)| PoolHashrateEntry {
timestamp: ts,
avg_hashrate: hr,
share,
pool_name: pool.name.to_string(),
})
.collect())
let pool_name = pools().get(slug).name.to_string();
let shared = self.hashrate_shared_data(0)?;
let pool_cum = self.pool_daily_cumulative(slug, shared.start_day, shared.end_day)?;
Ok(Self::compute_hashrate_entries(
&shared, &pool_cum, &pool_name, SAMPLE_WEEKLY,
))
}
pub fn pools_hashrate(
&self,
time_period: Option<TimePeriod>,
) -> Result<Vec<PoolHashrateEntry>> {
let current_height = self.height().to_usize();
let start = match time_period {
Some(tp) => current_height.saturating_sub(tp.block_count()),
let start_height = match time_period {
Some(tp) => {
let lookback = &self.computer().blocks.lookback;
let current_height = self.height();
match tp {
TimePeriod::Day => lookback.cached_window_starts.0._24h.collect_one(current_height),
TimePeriod::ThreeDays => lookback._3d.collect_one(current_height),
TimePeriod::Week => lookback.cached_window_starts.0._1w.collect_one(current_height),
TimePeriod::Month => lookback.cached_window_starts.0._1m.collect_one(current_height),
TimePeriod::ThreeMonths => lookback._3m.collect_one(current_height),
TimePeriod::SixMonths => lookback._6m.collect_one(current_height),
TimePeriod::Year => lookback.cached_window_starts.0._1y.collect_one(current_height),
TimePeriod::TwoYears => lookback._2y.collect_one(current_height),
TimePeriod::ThreeYears => lookback._3y.collect_one(current_height),
}
.unwrap_or_default()
.to_usize()
}
None => 0,
};
let shared = self.hashrate_shared_data(start_height)?;
let pools_list = pools();
let mut entries = Vec::new();
for pool in pools_list.iter() {
if let Ok(pool_entries) = self.compute_pool_hashrate_entries(pool.slug, start) {
for (ts, hr, share) in pool_entries {
if share > 0.0 {
entries.push(PoolHashrateEntry {
timestamp: ts,
avg_hashrate: hr,
share,
pool_name: pool.name.to_string(),
});
}
}
}
let Ok(pool_cum) =
self.pool_daily_cumulative(pool.slug, shared.start_day, shared.end_day)
else {
continue;
};
entries.extend(Self::compute_hashrate_entries(
&shared,
&pool_cum,
&pool.name,
SAMPLE_WEEKLY,
));
}
Ok(entries)
}
/// Compute (timestamp, hashrate, share) tuples for a pool from `start_height`.
fn compute_pool_hashrate_entries(
/// Shared data needed for hashrate computation (read once, reuse across pools).
fn hashrate_shared_data(&self, start_height: usize) -> Result<HashrateSharedData> {
let computer = self.computer();
let current_height = self.height();
let start_day = computer
.indexes
.height
.day1
.collect_one_at(start_height)
.unwrap_or_default()
.to_usize();
let end_day = computer
.indexes
.height
.day1
.collect_one(current_height)
.unwrap_or_default()
.to_usize()
+ 1;
let daily_hashrate = computer
.mining
.hashrate
.rate
.base
.day1
.collect_range_at(start_day, end_day);
let first_heights = computer
.indexes
.day1
.first_height
.collect_range_at(start_day, end_day);
Ok(HashrateSharedData {
start_day,
end_day,
daily_hashrate,
first_heights,
})
}
/// Read daily cumulative blocks mined for a pool.
fn pool_daily_cumulative(
&self,
slug: PoolSlug,
start_height: usize,
) -> Result<Vec<(brk_types::Timestamp, u128, f64)>> {
start_day: usize,
end_day: usize,
) -> Result<Vec<Option<StoredU64>>> {
let computer = self.computer();
let indexer = self.indexer();
let end = self.height().to_usize() + 1;
let start = start_height;
let dominance_bps = computer
computer
.pools
.major
.get(&slug)
.map(|v| &v.base.dominance.bps.height)
.map(|v| v.base.blocks_mined.cumulative.day1.collect_range_at(start_day, end_day))
.or_else(|| {
computer
.pools
.minor
.get(&slug)
.map(|v| &v.dominance.bps.height)
.map(|v| v.blocks_mined.cumulative.day1.collect_range_at(start_day, end_day))
})
.ok_or_else(|| Error::NotFound("Pool not found".into()))?;
.ok_or_else(|| Error::NotFound("Pool not found".into()))
}
let total = end - start;
let step = (total / 200).max(1);
/// Compute hashrate entries from daily cumulative blocks + shared data.
/// Uses 7-day windowed share: pool_blocks_in_week / total_blocks_in_week.
fn compute_hashrate_entries(
shared: &HashrateSharedData,
pool_cum: &[Option<StoredU64>],
pool_name: &str,
sample_days: usize,
) -> Vec<PoolHashrateEntry> {
let total = pool_cum.len();
if total <= LOOKBACK_DAYS {
return vec![];
}
// Batch read everything for the range
let timestamps = indexer.vecs.blocks.timestamp.collect_range_at(start, end);
let bps_values = dominance_bps.collect_range_at(start, end);
let day1_values = computer.indexes.height.day1.collect_range_at(start, end);
let hashrate_vec = &computer.mining.hashrate.rate.base.day1;
let mut entries = Vec::new();
let mut i = LOOKBACK_DAYS;
while i < total {
if let (Some(cum_now), Some(cum_prev)) =
(pool_cum[i], pool_cum[i - LOOKBACK_DAYS])
{
let pool_blocks = (*cum_now).saturating_sub(*cum_prev);
if pool_blocks > 0 {
let h_now = shared.first_heights[i].to_usize();
let h_prev = shared.first_heights[i - LOOKBACK_DAYS].to_usize();
let total_blocks = h_now.saturating_sub(h_prev);
// Pre-read all needed hashrates by collecting unique day1 values
let max_day = day1_values.iter().map(|d| d.to_usize()).max().unwrap_or(0);
let min_day = day1_values.iter().map(|d| d.to_usize()).min().unwrap_or(0);
let hashrates = hashrate_vec.collect_range_dyn(min_day, max_day + 1);
if total_blocks > 0 {
if let Some(hr) = shared.daily_hashrate[i].as_ref() {
let network_hr = f64::from(**hr);
let share = pool_blocks as f64 / total_blocks as f64;
let day = Day1::from(shared.start_day + i);
entries.push(PoolHashrateEntry {
timestamp: day.to_timestamp(),
avg_hashrate: (network_hr * share) as u128,
share,
pool_name: pool_name.to_string(),
});
}
}
}
}
i += sample_days;
}
Ok((0..total)
.step_by(step)
.filter_map(|i| {
let bps = *bps_values[i];
let share = bps as f64 / 10000.0;
let day_idx = day1_values[i].to_usize() - min_day;
let network_hr = f64::from(*hashrates.get(day_idx)?.as_ref()?);
Some((timestamps[i], (network_hr * share) as u128, share))
})
.collect())
entries
}
}

View File

@@ -1,6 +1,6 @@
use brk_error::Result;
use brk_types::{Dollars, ExchangeRates, HistoricalPrice, HistoricalPriceEntry, Timestamp};
use vecdb::{ReadableVec, VecIndex};
use brk_types::{Dollars, ExchangeRates, HistoricalPrice, HistoricalPriceEntry, Hour4, Timestamp};
use vecdb::ReadableVec;
use crate::Query;
@@ -21,38 +21,41 @@ impl Query {
}
pub fn historical_price(&self, timestamp: Option<Timestamp>) -> Result<HistoricalPrice> {
let indexer = self.indexer();
let computer = self.computer();
let max_height = self.height().to_usize();
let end = max_height + 1;
let timestamps = indexer.vecs.blocks.timestamp.collect();
let all_prices = computer.prices.spot.cents.height.collect();
let prices = if let Some(target_ts) = timestamp {
let target = usize::from(target_ts);
let h = timestamps
.binary_search_by_key(&target, |t| usize::from(*t))
.unwrap_or_else(|i| i.min(max_height));
vec![HistoricalPriceEntry {
time: usize::from(timestamps[h]) as u64,
usd: Dollars::from(all_prices[h]),
}]
} else {
let step = (max_height / 200).max(1);
(0..end)
.step_by(step)
.map(|h| HistoricalPriceEntry {
time: usize::from(timestamps[h]) as u64,
usd: Dollars::from(all_prices[h]),
})
.collect()
let prices = match timestamp {
Some(ts) => self.price_at(ts)?,
None => self.all_prices()?,
};
Ok(HistoricalPrice {
prices,
exchange_rates: ExchangeRates {},
})
}
fn price_at(&self, target: Timestamp) -> Result<Vec<HistoricalPriceEntry>> {
let h4 = Hour4::from_timestamp(target);
let cents = self.computer().prices.spot.cents.hour4.collect_one(h4);
Ok(vec![HistoricalPriceEntry {
time: usize::from(h4.to_timestamp()) as u64,
usd: Dollars::from(cents.flatten().unwrap_or_default()),
}])
}
fn all_prices(&self) -> Result<Vec<HistoricalPriceEntry>> {
let computer = self.computer();
Ok(computer
.prices
.spot
.cents
.hour4
.collect()
.into_iter()
.enumerate()
.filter_map(|(i, cents)| {
Some(HistoricalPriceEntry {
time: usize::from(Hour4::from(i).to_timestamp()) as u64,
usd: Dollars::from(cents?),
})
})
.collect())
}
}

View File

@@ -1,10 +1,8 @@
use std::io::Cursor;
use bitcoin::{consensus::Decodable, hex::DisplayHex};
use bitcoin::hex::DisplayHex;
use brk_error::{Error, Result};
use brk_types::{
Height, MerkleProof, OutputType, Sats, Transaction, TxIn, TxInIndex, TxIndex, TxOut,
TxOutspend, TxStatus, Txid, TxidParam, TxidPrefix, Vin, Vout, Weight,
BlockHash, Height, MerkleProof, Timestamp, TxInIndex, TxIndex, TxOutspend, TxStatus,
Transaction, Txid, TxidParam, TxidPrefix, Vin, Vout,
};
use vecdb::{ReadableVec, VecIndex};
@@ -109,43 +107,11 @@ impl Query {
self.transaction_hex_by_index(tx_index)
}
pub fn outspend(&self, TxidParam { txid }: TxidParam, vout: Vout) -> Result<TxOutspend> {
// Mempool outputs are unspent in on-chain terms
if let Some(mempool) = self.mempool()
&& mempool.get_txs().contains_key(&txid)
{
return Ok(TxOutspend::UNSPENT);
}
// Look up confirmed transaction
let prefix = TxidPrefix::from(&txid);
let indexer = self.indexer();
let Ok(Some(tx_index)) = indexer
.stores
.txid_prefix_to_tx_index
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
// Calculate txout_index
let first_txout_index = indexer
.vecs
.transactions
.first_txout_index
.read_once(tx_index)?;
let txout_index = first_txout_index + vout;
// Look up spend status
let computer = self.computer();
let txin_index = computer.outputs.spent.txin_index.read_once(txout_index)?;
if txin_index == TxInIndex::UNSPENT {
return Ok(TxOutspend::UNSPENT);
}
self.outspend_details(txin_index)
pub fn outspend(&self, txid: TxidParam, vout: Vout) -> Result<TxOutspend> {
let all = self.outspends(txid)?;
all.into_iter()
.nth(usize::from(vout))
.ok_or(Error::OutOfRange("Output index out of range".into()))
}
pub fn outspends(&self, TxidParam { txid }: TxidParam) -> Result<Vec<TxOutspend>> {
@@ -185,6 +151,16 @@ impl Query {
// Get spend status for each output
let computer = self.computer();
let txin_index_reader = computer.outputs.spent.txin_index.reader();
let txid_reader = indexer.vecs.transactions.txid.reader();
// Cursors for PcoVec reads — buffer chunks so nearby indices share decompression
let mut input_tx_cursor = indexer.vecs.inputs.tx_index.cursor();
let mut first_txin_cursor = indexer.vecs.transactions.first_txin_index.cursor();
let mut height_cursor = indexer.vecs.transactions.height.cursor();
let mut block_ts_cursor = indexer.vecs.blocks.timestamp.cursor();
// Block info cache — spending txs in the same block share block hash/time
let mut cached_block: Option<(Height, BlockHash, Timestamp)> = None;
let mut outspends = Vec::with_capacity(output_count);
for i in 0..output_count {
@@ -193,9 +169,38 @@ impl Query {
if txin_index == TxInIndex::UNSPENT {
outspends.push(TxOutspend::UNSPENT);
} else {
outspends.push(self.outspend_details(txin_index)?);
continue;
}
let spending_tx_index = input_tx_cursor.get(usize::from(txin_index)).unwrap();
let spending_first_txin_index =
first_txin_cursor.get(spending_tx_index.to_usize()).unwrap();
let vin =
Vin::from(usize::from(txin_index) - usize::from(spending_first_txin_index));
let spending_txid = txid_reader.get(spending_tx_index.to_usize());
let spending_height = height_cursor.get(spending_tx_index.to_usize()).unwrap();
let (block_hash, block_time) =
if let Some((h, ref bh, bt)) = cached_block && h == spending_height {
(bh.clone(), bt)
} else {
let bh = indexer.vecs.blocks.blockhash.read_once(spending_height)?;
let bt = block_ts_cursor.get(spending_height.to_usize()).unwrap();
cached_block = Some((spending_height, bh.clone(), bt));
(bh, bt)
};
outspends.push(TxOutspend {
spent: true,
txid: Some(spending_txid),
vin: Some(vin),
status: Some(TxStatus {
confirmed: true,
block_height: Some(spending_height),
block_hash: Some(block_hash),
block_time: Some(block_time),
}),
});
}
Ok(outspends)
@@ -204,155 +209,10 @@ impl Query {
// === Helper methods ===
pub fn transaction_by_index(&self, tx_index: TxIndex) -> Result<Transaction> {
let indexer = self.indexer();
let reader = self.reader();
// Get tx metadata using collect_one for PcoVec, read_once for BytesVec
let txid = indexer.vecs.transactions.txid.read_once(tx_index)?;
let height = indexer
.vecs
.transactions
.height
.collect_one(tx_index)
.unwrap();
let version = indexer
.vecs
.transactions
.tx_version
.collect_one(tx_index)
.unwrap();
let lock_time = indexer
.vecs
.transactions
.raw_locktime
.collect_one(tx_index)
.unwrap();
let total_size = indexer
.vecs
.transactions
.total_size
.collect_one(tx_index)
.unwrap();
let first_txin_index = indexer
.vecs
.transactions
.first_txin_index
.collect_one(tx_index)
.unwrap();
let position = indexer
.vecs
.transactions
.position
.collect_one(tx_index)
.unwrap();
// Get block info for status
let block_hash = indexer.vecs.blocks.blockhash.read_once(height)?;
let block_time = indexer.vecs.blocks.timestamp.collect_one(height).unwrap();
// Read and decode the raw transaction from blk file
let buffer = reader.read_raw_bytes(position, *total_size as usize)?;
let mut cursor = Cursor::new(buffer);
let tx = bitcoin::Transaction::consensus_decode(&mut cursor)
.map_err(|_| Error::Parse("Failed to decode transaction".into()))?;
// Create readers for random access lookups
let txid_reader = indexer.vecs.transactions.txid.reader();
let first_txout_index_reader = indexer.vecs.transactions.first_txout_index.reader();
let value_reader = indexer.vecs.outputs.value.reader();
let output_type_reader = indexer.vecs.outputs.output_type.reader();
let type_index_reader = indexer.vecs.outputs.type_index.reader();
let addr_readers = indexer.vecs.addrs.addr_readers();
// Batch-read outpoints for all inputs (avoids per-input PcoVec page decompression)
let outpoints: Vec<_> = indexer.vecs.inputs.outpoint.collect_range_at(
usize::from(first_txin_index),
usize::from(first_txin_index) + tx.input.len(),
);
// Build inputs with prevout information
let input: Vec<TxIn> = tx
.input
.iter()
.enumerate()
.map(|(i, txin)| {
let outpoint = outpoints[i];
let is_coinbase = outpoint.is_coinbase();
// Get prevout info if not coinbase
let (prev_txid, prev_vout, prevout) = if is_coinbase {
(Txid::COINBASE, Vout::MAX, None)
} else {
let prev_tx_index = outpoint.tx_index();
let prev_vout = outpoint.vout();
let prev_txid = txid_reader.get(prev_tx_index.to_usize());
// Calculate the txout_index for the prevout
let prev_first_txout_index =
first_txout_index_reader.get(prev_tx_index.to_usize());
let prev_txout_index = prev_first_txout_index + prev_vout;
let prev_value = value_reader.get(usize::from(prev_txout_index));
let prev_output_type: OutputType =
output_type_reader.get(usize::from(prev_txout_index));
let prev_type_index = type_index_reader.get(usize::from(prev_txout_index));
let script_pubkey =
addr_readers.script_pubkey(prev_output_type, prev_type_index);
let prevout = Some(TxOut::from((script_pubkey, prev_value)));
(prev_txid, prev_vout, prevout)
};
TxIn {
txid: prev_txid,
vout: prev_vout,
prevout,
script_sig: txin.script_sig.clone(),
script_sig_asm: (),
is_coinbase,
sequence: txin.sequence.0,
inner_redeem_script_asm: (),
}
})
.collect();
// Calculate weight before consuming tx.output
let weight = Weight::from(tx.weight());
// Calculate sigop cost
let total_sigop_cost = tx.total_sigop_cost(|_| None);
// Build outputs
let output: Vec<TxOut> = tx.output.into_iter().map(TxOut::from).collect();
// Build status
let status = TxStatus {
confirmed: true,
block_height: Some(height),
block_hash: Some(block_hash),
block_time: Some(block_time),
};
let mut transaction = Transaction {
index: Some(tx_index),
txid,
version,
lock_time,
total_size: *total_size as usize,
weight,
total_sigop_cost,
fee: Sats::ZERO, // Will be computed below
input,
output,
status,
};
// Compute fee from inputs - outputs
transaction.compute_fee();
Ok(transaction)
self.transactions_by_range(tx_index.to_usize(), 1)?
.into_iter()
.next()
.ok_or(Error::NotFound("Transaction not found".into()))
}
fn transaction_raw_by_index(&self, tx_index: TxIndex) -> Result<Vec<u8>> {
@@ -366,60 +226,7 @@ impl Query {
Ok(self.transaction_raw_by_index(tx_index)?.to_lower_hex_string())
}
fn outspend_details(&self, txin_index: TxInIndex) -> Result<TxOutspend> {
let indexer = self.indexer();
// Look up spending tx_index directly
let spending_tx_index = indexer
.vecs
.inputs
.tx_index
.collect_one(txin_index)
.unwrap();
// Calculate vin
let spending_first_txin_index = indexer
.vecs
.transactions
.first_txin_index
.collect_one(spending_tx_index)
.unwrap();
let vin = Vin::from(usize::from(txin_index) - usize::from(spending_first_txin_index));
// Get spending tx details
let spending_txid = indexer
.vecs
.transactions
.txid
.read_once(spending_tx_index)?;
let spending_height = indexer
.vecs
.transactions
.height
.collect_one(spending_tx_index)
.unwrap();
let block_hash = indexer.vecs.blocks.blockhash.read_once(spending_height)?;
let block_time = indexer
.vecs
.blocks
.timestamp
.collect_one(spending_height)
.unwrap();
Ok(TxOutspend {
spent: true,
txid: Some(spending_txid),
vin: Some(vin),
status: Some(TxStatus {
confirmed: true,
block_height: Some(spending_height),
block_hash: Some(block_hash),
block_time: Some(block_time),
}),
})
}
fn resolve_tx(&self, txid: &Txid) -> Result<(TxIndex, Height)> {
pub fn resolve_tx(&self, txid: &Txid) -> Result<(TxIndex, Height)> {
let indexer = self.indexer();
let prefix = TxidPrefix::from(txid);
let tx_index: TxIndex = indexer

View File

@@ -8,7 +8,7 @@ use brk_indexer::Indexer;
use brk_mempool::Mempool;
use brk_reader::Reader;
use brk_rpc::Client;
use brk_types::{Height, SyncStatus};
use brk_types::{BlockHash, BlockHashPrefix, Height, SyncStatus};
use vecdb::{AnyVec, ReadOnlyClone, ReadableVec, Ro};
#[cfg(feature = "tokio")]
@@ -72,6 +72,16 @@ impl Query {
self.indexed_height().min(self.computed_height())
}
/// Tip block hash, cached in the indexer.
pub fn tip_blockhash(&self) -> BlockHash {
self.indexer().tip_blockhash()
}
/// Tip block hash prefix for cache etags.
pub fn tip_hash_prefix(&self) -> BlockHashPrefix {
BlockHashPrefix::from(&self.tip_blockhash())
}
/// Build sync status with the given tip height
pub fn sync_status(&self, tip_height: Height) -> SyncStatus {
let indexed_height = self.indexed_height();

View File

@@ -137,6 +137,12 @@ impl Client {
None
};
let witness = txin
.witness
.iter()
.map(|w| bitcoin::hex::DisplayHex::to_lower_hex_string(w))
.collect();
Ok(TxIn {
is_coinbase,
prevout: txout,
@@ -144,8 +150,10 @@ impl Client {
vout: txin.previous_output.vout.into(),
script_sig: txin.script_sig,
script_sig_asm: (),
witness,
sequence: txin.sequence.into(),
inner_redeem_script_asm: (),
inner_witness_script_asm: (),
})
})
.collect::<Result<Vec<_>>>()?;

View File

@@ -7,7 +7,7 @@ use axum::{
};
use brk_types::{
AddrParam, AddrStats, AddrTxidsParam, AddrValidation, Transaction, Txid, Utxo,
ValidateAddrParam,
ValidateAddrParam, Version,
};
use crate::{AppState, CacheStrategy, extended::TransformResponseExtended};
@@ -29,7 +29,8 @@ impl AddrRoutes for ApiRouter<AppState> {
Path(path): Path<AddrParam>,
State(state): State<AppState>
| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.addr(path.addr)).await
let strategy = state.addr_cache(Version::ONE, &path.addr);
state.cached_json(&headers, strategy, &uri, move |q| q.addr(path.addr)).await
}, |op| op
.id("get_address")
.addrs_tag()
@@ -51,7 +52,8 @@ impl AddrRoutes for ApiRouter<AppState> {
Query(params): Query<AddrTxidsParam>,
State(state): State<AppState>
| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.addr_txs(path.addr, params.after_txid, 25)).await
let strategy = state.addr_cache(Version::ONE, &path.addr);
state.cached_json(&headers, strategy, &uri, move |q| q.addr_txs(path.addr, params.after_txid, 50)).await
}, |op| op
.id("get_address_txs")
.addrs_tag()
@@ -73,7 +75,8 @@ impl AddrRoutes for ApiRouter<AppState> {
Query(params): Query<AddrTxidsParam>,
State(state): State<AppState>
| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.addr_txs(path.addr, params.after_txid, 25)).await
let strategy = state.addr_cache(Version::ONE, &path.addr);
state.cached_json(&headers, strategy, &uri, move |q| q.addr_txs(path.addr, params.after_txid, 25)).await
}, |op| op
.id("get_address_confirmed_txs")
.addrs_tag()
@@ -115,7 +118,8 @@ impl AddrRoutes for ApiRouter<AppState> {
Path(path): Path<AddrParam>,
State(state): State<AppState>
| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.addr_utxos(path.addr)).await
let strategy = state.addr_cache(Version::ONE, &path.addr);
state.cached_json(&headers, strategy, &uri, move |q| q.addr_utxos(path.addr)).await
}, |op| op
.id("get_address_utxos")
.addrs_tag()

View File

@@ -6,7 +6,7 @@ use axum::{
use brk_query::BLOCK_TXS_PAGE_SIZE;
use brk_types::{
BlockHashParam, BlockHashStartIndex, BlockHashTxIndex, BlockInfo, BlockInfoV1, BlockStatus,
BlockTimestamp, HeightParam, TimestampParam, Transaction, TxIndex, Txid,
BlockTimestamp, HeightParam, TimestampParam, Transaction, TxIndex, Txid, Version,
};
use crate::{AppState, CacheStrategy, extended::TransformResponseExtended};
@@ -24,7 +24,8 @@ impl BlockRoutes for ApiRouter<AppState> {
headers: HeaderMap,
Path(path): Path<BlockHashParam>,
State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Static, &uri, move |q| q.block(&path.hash)).await
let strategy = state.block_cache(Version::ONE, &path.hash);
state.cached_json(&headers, strategy, &uri, move |q| q.block(&path.hash)).await
},
|op| {
op.id("get_block")
@@ -45,7 +46,8 @@ impl BlockRoutes for ApiRouter<AppState> {
"/api/v1/block/{hash}",
get_with(
async |uri: Uri, headers: HeaderMap, Path(path): Path<BlockHashParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| {
let strategy = state.block_cache(Version::ONE, &path.hash);
state.cached_json(&headers, strategy, &uri, move |q| {
let height = q.height_by_hash(&path.hash)?;
q.block_by_height_v1(height)
}).await
@@ -66,7 +68,8 @@ impl BlockRoutes for ApiRouter<AppState> {
"/api/block/{hash}/header",
get_with(
async |uri: Uri, headers: HeaderMap, Path(path): Path<BlockHashParam>, State(state): State<AppState>| {
state.cached_text(&headers, CacheStrategy::Height, &uri, move |q| q.block_header_hex(&path.hash)).await
let strategy = state.block_cache(Version::ONE, &path.hash);
state.cached_text(&headers, strategy, &uri, move |q| q.block_header_hex(&path.hash)).await
},
|op| {
op.id("get_block_header")
@@ -87,7 +90,7 @@ impl BlockRoutes for ApiRouter<AppState> {
headers: HeaderMap,
Path(path): Path<HeightParam>,
State(state): State<AppState>| {
state.cached_text(&headers, CacheStrategy::Height, &uri, move |q| q.block_hash_by_height(path.height).map(|h| h.to_string())).await
state.cached_text(&headers, state.height_cache(Version::ONE, path.height), &uri, move |q| q.block_hash_by_height(path.height).map(|h| h.to_string())).await
},
|op| {
op.id("get_block_by_height")
@@ -111,7 +114,7 @@ impl BlockRoutes for ApiRouter<AppState> {
headers: HeaderMap,
Path(path): Path<TimestampParam>,
State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.block_by_timestamp(path.timestamp)).await
state.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| q.block_by_timestamp(path.timestamp)).await
},
|op| {
op.id("get_block_by_timestamp")
@@ -133,7 +136,8 @@ impl BlockRoutes for ApiRouter<AppState> {
headers: HeaderMap,
Path(path): Path<BlockHashParam>,
State(state): State<AppState>| {
state.cached_bytes(&headers, CacheStrategy::Static, &uri, move |q| q.block_raw(&path.hash)).await
let strategy = state.block_cache(Version::ONE, &path.hash);
state.cached_bytes(&headers, strategy, &uri, move |q| q.block_raw(&path.hash)).await
},
|op| {
op.id("get_block_raw")
@@ -157,7 +161,7 @@ impl BlockRoutes for ApiRouter<AppState> {
headers: HeaderMap,
Path(path): Path<BlockHashParam>,
State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.block_status(&path.hash)).await
state.cached_json(&headers, state.block_status_cache(Version::ONE, &path.hash), &uri, move |q| q.block_status(&path.hash)).await
},
|op| {
op.id("get_block_status")
@@ -178,7 +182,7 @@ impl BlockRoutes for ApiRouter<AppState> {
"/api/blocks/tip/height",
get_with(
async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| {
state.cached_text(&headers, CacheStrategy::Height, &uri, |q| Ok(q.height().to_string())).await
state.cached_text(&headers, CacheStrategy::Tip, &uri, |q| Ok(q.height().to_string())).await
},
|op| {
op.id("get_block_tip_height")
@@ -195,7 +199,7 @@ impl BlockRoutes for ApiRouter<AppState> {
"/api/blocks/tip/hash",
get_with(
async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| {
state.cached_text(&headers, CacheStrategy::Height, &uri, |q| q.block_hash_by_height(q.height()).map(|h| h.to_string())).await
state.cached_text(&headers, CacheStrategy::Tip, &uri, |q| q.block_hash_by_height(q.height()).map(|h| h.to_string())).await
},
|op| {
op.id("get_block_tip_hash")
@@ -215,7 +219,8 @@ impl BlockRoutes for ApiRouter<AppState> {
headers: HeaderMap,
Path(path): Path<BlockHashTxIndex>,
State(state): State<AppState>| {
state.cached_text(&headers, CacheStrategy::Static, &uri, move |q| q.block_txid_at_index(&path.hash, path.index).map(|t| t.to_string())).await
let strategy = state.block_cache(Version::ONE, &path.hash);
state.cached_text(&headers, strategy, &uri, move |q| q.block_txid_at_index(&path.hash, path.index).map(|t| t.to_string())).await
},
|op| {
op.id("get_block_txid")
@@ -239,7 +244,8 @@ impl BlockRoutes for ApiRouter<AppState> {
headers: HeaderMap,
Path(path): Path<BlockHashParam>,
State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Static, &uri, move |q| q.block_txids(&path.hash)).await
let strategy = state.block_cache(Version::ONE, &path.hash);
state.cached_json(&headers, strategy, &uri, move |q| q.block_txids(&path.hash)).await
},
|op| {
op.id("get_block_txids")
@@ -263,7 +269,8 @@ impl BlockRoutes for ApiRouter<AppState> {
headers: HeaderMap,
Path(path): Path<BlockHashParam>,
State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Static, &uri, move |q| q.block_txs(&path.hash, TxIndex::default())).await
let strategy = state.block_cache(Version::ONE, &path.hash);
state.cached_json(&headers, strategy, &uri, move |q| q.block_txs(&path.hash, TxIndex::default())).await
},
|op| {
op.id("get_block_txs")
@@ -288,7 +295,8 @@ impl BlockRoutes for ApiRouter<AppState> {
headers: HeaderMap,
Path(path): Path<BlockHashStartIndex>,
State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Static, &uri, move |q| q.block_txs(&path.hash, path.start_index)).await
let strategy = state.block_cache(Version::ONE, &path.hash);
state.cached_json(&headers, strategy, &uri, move |q| q.block_txs(&path.hash, path.start_index)).await
},
|op| {
op.id("get_block_txs_from_index")
@@ -311,7 +319,7 @@ impl BlockRoutes for ApiRouter<AppState> {
get_with(
async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| {
state
.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.blocks(None))
.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| q.blocks(None))
.await
},
|op| {
@@ -332,7 +340,7 @@ impl BlockRoutes for ApiRouter<AppState> {
headers: HeaderMap,
Path(path): Path<HeightParam>,
State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.blocks(Some(path.height))).await
state.cached_json(&headers, state.height_cache(Version::ONE, path.height), &uri, move |q| q.blocks(Some(path.height))).await
},
|op| {
op.id("get_blocks_from_height")
@@ -353,7 +361,7 @@ impl BlockRoutes for ApiRouter<AppState> {
get_with(
async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| {
state
.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.blocks_v1(None))
.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| q.blocks_v1(None))
.await
},
|op| {
@@ -374,7 +382,7 @@ impl BlockRoutes for ApiRouter<AppState> {
headers: HeaderMap,
Path(path): Path<HeightParam>,
State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.blocks_v1(Some(path.height))).await
state.cached_json(&headers, state.height_cache(Version::ONE, path.height), &uri, move |q| q.blocks_v1(Some(path.height))).await
},
|op| {
op.id("get_blocks_v1_from_height")

View File

@@ -18,7 +18,7 @@ impl GeneralRoutes for ApiRouter<AppState> {
get_with(
async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| {
state
.cached_json(&headers, CacheStrategy::Height, &uri, |q| {
.cached_json(&headers, CacheStrategy::Tip, &uri, |q| {
q.difficulty_adjustment()
})
.await
@@ -65,7 +65,7 @@ impl GeneralRoutes for ApiRouter<AppState> {
Query(params): Query<OptionalTimestampParam>,
State(state): State<AppState>| {
state
.cached_json(&headers, CacheStrategy::Height, &uri, move |q| {
.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| {
q.historical_price(params.timestamp)
})
.await

View File

@@ -45,7 +45,7 @@ impl MiningRoutes for ApiRouter<AppState> {
"/api/v1/mining/pools/{time_period}",
get_with(
async |uri: Uri, headers: HeaderMap, Path(path): Path<TimePeriodParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.mining_pools(path.time_period)).await
state.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| q.mining_pools(path.time_period)).await
},
|op| {
op.id("get_pool_stats")
@@ -62,7 +62,7 @@ impl MiningRoutes for ApiRouter<AppState> {
"/api/v1/mining/pool/{slug}",
get_with(
async |uri: Uri, headers: HeaderMap, Path(path): Path<PoolSlugParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.pool_detail(path.slug)).await
state.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| q.pool_detail(path.slug)).await
},
|op| {
op.id("get_pool")
@@ -80,7 +80,7 @@ impl MiningRoutes for ApiRouter<AppState> {
"/api/v1/mining/hashrate/pools",
get_with(
async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, |q| q.pools_hashrate(None)).await
state.cached_json(&headers, CacheStrategy::Tip, &uri, |q| q.pools_hashrate(None)).await
},
|op| {
op.id("get_pools_hashrate")
@@ -97,7 +97,7 @@ impl MiningRoutes for ApiRouter<AppState> {
"/api/v1/mining/hashrate/pools/{time_period}",
get_with(
async |uri: Uri, headers: HeaderMap, Path(path): Path<TimePeriodParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.pools_hashrate(Some(path.time_period))).await
state.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| q.pools_hashrate(Some(path.time_period))).await
},
|op| {
op.id("get_pools_hashrate_by_period")
@@ -114,7 +114,7 @@ impl MiningRoutes for ApiRouter<AppState> {
"/api/v1/mining/pool/{slug}/hashrate",
get_with(
async |uri: Uri, headers: HeaderMap, Path(path): Path<PoolSlugParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.pool_hashrate(path.slug)).await
state.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| q.pool_hashrate(path.slug)).await
},
|op| {
op.id("get_pool_hashrate")
@@ -132,7 +132,7 @@ impl MiningRoutes for ApiRouter<AppState> {
"/api/v1/mining/pool/{slug}/blocks",
get_with(
async |uri: Uri, headers: HeaderMap, Path(path): Path<PoolSlugParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.pool_blocks(path.slug, None)).await
state.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| q.pool_blocks(path.slug, None)).await
},
|op| {
op.id("get_pool_blocks")
@@ -150,7 +150,7 @@ impl MiningRoutes for ApiRouter<AppState> {
"/api/v1/mining/pool/{slug}/blocks/{height}",
get_with(
async |uri: Uri, headers: HeaderMap, Path(PoolSlugAndHeightParam {slug, height}): Path<PoolSlugAndHeightParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.pool_blocks(slug, Some(height))).await
state.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| q.pool_blocks(slug, Some(height))).await
},
|op| {
op.id("get_pool_blocks_from")
@@ -168,7 +168,7 @@ impl MiningRoutes for ApiRouter<AppState> {
"/api/v1/mining/hashrate",
get_with(
async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, |q| q.hashrate(None)).await
state.cached_json(&headers, CacheStrategy::Tip, &uri, |q| q.hashrate(None)).await
},
|op| {
op.id("get_hashrate")
@@ -185,7 +185,7 @@ impl MiningRoutes for ApiRouter<AppState> {
"/api/v1/mining/hashrate/{time_period}",
get_with(
async |uri: Uri, headers: HeaderMap, Path(path): Path<TimePeriodParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.hashrate(Some(path.time_period))).await
state.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| q.hashrate(Some(path.time_period))).await
},
|op| {
op.id("get_hashrate_by_period")
@@ -202,7 +202,7 @@ impl MiningRoutes for ApiRouter<AppState> {
"/api/v1/mining/difficulty-adjustments",
get_with(
async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, |q| q.difficulty_adjustments(None)).await
state.cached_json(&headers, CacheStrategy::Tip, &uri, |q| q.difficulty_adjustments(None)).await
},
|op| {
op.id("get_difficulty_adjustments")
@@ -219,7 +219,7 @@ impl MiningRoutes for ApiRouter<AppState> {
"/api/v1/mining/difficulty-adjustments/{time_period}",
get_with(
async |uri: Uri, headers: HeaderMap, Path(path): Path<TimePeriodParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.difficulty_adjustments(Some(path.time_period))).await
state.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| q.difficulty_adjustments(Some(path.time_period))).await
},
|op| {
op.id("get_difficulty_adjustments_by_period")
@@ -236,7 +236,7 @@ impl MiningRoutes for ApiRouter<AppState> {
"/api/v1/mining/reward-stats/{block_count}",
get_with(
async |uri: Uri, headers: HeaderMap, Path(path): Path<BlockCountParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.reward_stats(path.block_count)).await
state.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| q.reward_stats(path.block_count)).await
},
|op| {
op.id("get_reward_stats")
@@ -253,7 +253,7 @@ impl MiningRoutes for ApiRouter<AppState> {
"/api/v1/mining/blocks/fees/{time_period}",
get_with(
async |uri: Uri, headers: HeaderMap, Path(path): Path<TimePeriodParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.block_fees(path.time_period)).await
state.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| q.block_fees(path.time_period)).await
},
|op| {
op.id("get_block_fees")
@@ -270,7 +270,7 @@ impl MiningRoutes for ApiRouter<AppState> {
"/api/v1/mining/blocks/rewards/{time_period}",
get_with(
async |uri: Uri, headers: HeaderMap, Path(path): Path<TimePeriodParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.block_rewards(path.time_period)).await
state.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| q.block_rewards(path.time_period)).await
},
|op| {
op.id("get_block_rewards")
@@ -302,7 +302,7 @@ impl MiningRoutes for ApiRouter<AppState> {
"/api/v1/mining/blocks/sizes-weights/{time_period}",
get_with(
async |uri: Uri, headers: HeaderMap, Path(path): Path<TimePeriodParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.block_sizes_weights(path.time_period)).await
state.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| q.block_sizes_weights(path.time_period)).await
},
|op| {
op.id("get_block_sizes_weights")

View File

@@ -7,7 +7,8 @@ use axum::{
http::{HeaderMap, Uri},
};
use brk_types::{
CpfpInfo, MerkleProof, Transaction, TxOutspend, TxStatus, Txid, TxidParam, TxidVout, TxidsParam,
CpfpInfo, MerkleProof, Transaction, TxOutspend, TxStatus, Txid, TxidParam, TxidVout,
TxidsParam, Version,
};
use crate::{AppState, CacheStrategy, extended::TransformResponseExtended};
@@ -22,8 +23,8 @@ impl TxRoutes for ApiRouter<AppState> {
.api_route(
"/api/v1/cpfp/{txid}",
get_with(
async |uri: Uri, headers: HeaderMap, Path(txid): Path<TxidParam>, State(state): State<AppState>| {
state.cached_json(&headers, state.mempool_cache(), &uri, move |q| q.cpfp(txid)).await
async |uri: Uri, headers: HeaderMap, Path(param): Path<TxidParam>, State(state): State<AppState>| {
state.cached_json(&headers, state.tx_cache(Version::ONE, &param.txid), &uri, move |q| q.cpfp(param)).await
},
|op| op
.id("get_cpfp")
@@ -41,10 +42,10 @@ impl TxRoutes for ApiRouter<AppState> {
async |
uri: Uri,
headers: HeaderMap,
Path(txid): Path<TxidParam>,
Path(param): Path<TxidParam>,
State(state): State<AppState>
| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.transaction(txid)).await
state.cached_json(&headers, state.tx_cache(Version::ONE, &param.txid), &uri, move |q| q.transaction(param)).await
},
|op| op
.id("get_tx")
@@ -69,7 +70,7 @@ impl TxRoutes for ApiRouter<AppState> {
Path(txid): Path<TxidParam>,
State(state): State<AppState>
| {
state.cached_text(&headers, CacheStrategy::Height, &uri, move |q| q.transaction_hex(txid)).await
state.cached_text(&headers, state.tx_cache(Version::ONE, &txid.txid), &uri, move |q| q.transaction_hex(txid)).await
},
|op| op
.id("get_tx_hex")
@@ -89,7 +90,7 @@ impl TxRoutes for ApiRouter<AppState> {
"/api/tx/{txid}/merkleblock-proof",
get_with(
async |uri: Uri, headers: HeaderMap, Path(txid): Path<TxidParam>, State(state): State<AppState>| {
state.cached_text(&headers, CacheStrategy::Height, &uri, move |q| q.merkleblock_proof(txid)).await
state.cached_text(&headers, state.tx_cache(Version::ONE, &txid.txid), &uri, move |q| q.merkleblock_proof(txid)).await
},
|op| op
.id("get_tx_merkleblock_proof")
@@ -107,7 +108,7 @@ impl TxRoutes for ApiRouter<AppState> {
"/api/tx/{txid}/merkle-proof",
get_with(
async |uri: Uri, headers: HeaderMap, Path(txid): Path<TxidParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.merkle_proof(txid)).await
state.cached_json(&headers, state.tx_cache(Version::ONE, &txid.txid), &uri, move |q| q.merkle_proof(txid)).await
},
|op| op
.id("get_tx_merkle_proof")
@@ -131,7 +132,7 @@ impl TxRoutes for ApiRouter<AppState> {
State(state): State<AppState>
| {
let txid = TxidParam { txid: path.txid };
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.outspend(txid, path.vout)).await
state.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| q.outspend(txid, path.vout)).await
},
|op| op
.id("get_tx_outspend")
@@ -156,7 +157,7 @@ impl TxRoutes for ApiRouter<AppState> {
Path(txid): Path<TxidParam>,
State(state): State<AppState>
| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.outspends(txid)).await
state.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| q.outspends(txid)).await
},
|op| op
.id("get_tx_outspends")
@@ -176,7 +177,7 @@ impl TxRoutes for ApiRouter<AppState> {
"/api/tx/{txid}/raw",
get_with(
async |uri: Uri, headers: HeaderMap, Path(txid): Path<TxidParam>, State(state): State<AppState>| {
state.cached_bytes(&headers, CacheStrategy::Height, &uri, move |q| q.transaction_raw(txid)).await
state.cached_bytes(&headers, state.tx_cache(Version::ONE, &txid.txid), &uri, move |q| q.transaction_raw(txid)).await
},
|op| op
.id("get_tx_raw")
@@ -196,10 +197,10 @@ impl TxRoutes for ApiRouter<AppState> {
async |
uri: Uri,
headers: HeaderMap,
Path(txid): Path<TxidParam>,
Path(param): Path<TxidParam>,
State(state): State<AppState>
| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.transaction_status(txid)).await
state.cached_json(&headers, state.tx_cache(Version::ONE, &param.txid), &uri, move |q| q.transaction_status(param)).await
},
|op| op
.id("get_tx_status")

View File

@@ -275,7 +275,7 @@ impl ApiMetricsLegacyRoutes for ApiRouter<AppState> {
State(state): State<AppState>,
Path(path): Path<LegacySeriesWithIndex>| {
state
.cached_json(&headers, CacheStrategy::Height, &uri, move |q| {
.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| {
q.latest(&path.metric, path.index)
})
.await
@@ -301,7 +301,7 @@ impl ApiMetricsLegacyRoutes for ApiRouter<AppState> {
State(state): State<AppState>,
Path(path): Path<LegacySeriesWithIndex>| {
state
.cached_json(&headers, CacheStrategy::Height, &uri, move |q| {
.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| {
q.len(&path.metric, path.index)
})
.await
@@ -327,7 +327,7 @@ impl ApiMetricsLegacyRoutes for ApiRouter<AppState> {
State(state): State<AppState>,
Path(path): Path<LegacySeriesWithIndex>| {
state
.cached_json(&headers, CacheStrategy::Height, &uri, move |q| {
.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| {
q.version(&path.metric, path.index)
})
.await
@@ -376,7 +376,7 @@ impl ApiMetricsLegacyRoutes for ApiRouter<AppState> {
Path(params): Path<CostBasisCohortParam>,
State(state): State<AppState>| {
state
.cached_json(&headers, CacheStrategy::Height, &uri, move |q| {
.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| {
q.cost_basis_dates(&params.cohort)
})
.await

View File

@@ -246,7 +246,7 @@ impl ApiSeriesRoutes for ApiRouter<AppState> {
State(state): State<AppState>,
Path(path): Path<SeriesNameWithIndex>| {
state
.cached_json(&headers, CacheStrategy::Height, &uri, move |q| {
.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| {
q.latest(&path.series, path.index)
})
.await
@@ -270,7 +270,7 @@ impl ApiSeriesRoutes for ApiRouter<AppState> {
State(state): State<AppState>,
Path(path): Path<SeriesNameWithIndex>| {
state
.cached_json(&headers, CacheStrategy::Height, &uri, move |q| {
.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| {
q.len(&path.series, path.index)
})
.await
@@ -292,7 +292,7 @@ impl ApiSeriesRoutes for ApiRouter<AppState> {
State(state): State<AppState>,
Path(path): Path<SeriesNameWithIndex>| {
state
.cached_json(&headers, CacheStrategy::Height, &uri, move |q| {
.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| {
q.version(&path.series, path.index)
})
.await
@@ -352,7 +352,7 @@ impl ApiSeriesRoutes for ApiRouter<AppState> {
Path(params): Path<CostBasisCohortParam>,
State(state): State<AppState>| {
state
.cached_json(&headers, CacheStrategy::Height, &uri, move |q| {
.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| {
q.cost_basis_dates(&params.cohort)
})
.await

View File

@@ -77,7 +77,7 @@ impl ServerRoutes for ApiRouter<AppState> {
get_with(
async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| {
state
.cached_json(&headers, CacheStrategy::Height, &uri, move |q| {
.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| {
let tip_height = q.client().get_last_height()?;
Ok(q.sync_status(tip_height))
})
@@ -102,7 +102,7 @@ impl ServerRoutes for ApiRouter<AppState> {
async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| {
let brk_path = state.data_path.clone();
state
.cached_json(&headers, CacheStrategy::Height, &uri, move |q| {
.cached_json(&headers, CacheStrategy::Tip, &uri, move |q| {
let brk_bytes = dir_size(&brk_path)?;
let bitcoin_bytes = dir_size(q.blocks_dir())?;
Ok(DiskUsage::new(brk_bytes, bitcoin_bytes))

View File

@@ -1,19 +1,28 @@
use axum::http::HeaderMap;
use brk_types::{BlockHashPrefix, Version};
use crate::{VERSION, extended::HeaderMapExtended};
/// Cache strategy for HTTP responses.
pub enum CacheStrategy {
/// Data that changes with each new block (addresses, mining stats, txs, outspends)
/// Etag = VERSION-{height}, Cache-Control: must-revalidate
Height,
/// Chain-dependent data (addresses, mining stats, txs, outspends).
/// Etag = {tip_hash_prefix:x}. Invalidates on any tip change including reorgs.
Tip,
/// Static/immutable data (blocks by hash, validate-address, series catalog)
/// Etag = VERSION only, Cache-Control: must-revalidate
/// Immutable data identified by hash in the URL (blocks by hash, confirmed tx data).
/// Etag = {version}. Permanent; only bumped when response format changes.
Immutable(Version),
/// Static non-chain data (validate-address, series catalog, pool list).
/// Etag = CARGO_PKG_VERSION. Invalidates on deploy.
Static,
/// Mempool data - etag from next projected block hash + short max-age
/// Etag = VERSION-m{hash:x}, Cache-Control: max-age=1, must-revalidate
/// Immutable data bound to a specific block (confirmed tx data, block status).
/// Etag = {version}-{block_hash_prefix:x}. Invalidates naturally on reorg.
BlockBound(Version, BlockHashPrefix),
/// Mempool data — etag from next projected block hash.
/// Etag = m{hash:x}. Invalidates on mempool change.
MempoolHash(u64),
}
@@ -24,9 +33,12 @@ pub struct CacheParams {
}
impl CacheParams {
/// Cache params using VERSION as etag
pub fn version() -> Self {
Self::resolve(&CacheStrategy::Static, || unreachable!())
/// Cache params using CARGO_PKG_VERSION as etag (for openapi.json etc.)
pub fn static_version() -> Self {
Self {
etag: Some(format!("s{VERSION}")),
cache_control: "public, max-age=1, must-revalidate".into(),
}
}
pub fn etag_str(&self) -> &str {
@@ -39,20 +51,28 @@ impl CacheParams {
.is_some_and(|etag| headers.has_etag(etag))
}
pub fn resolve(strategy: &CacheStrategy, height: impl FnOnce() -> u32) -> Self {
use CacheStrategy::*;
pub fn resolve(strategy: &CacheStrategy, tip: impl FnOnce() -> BlockHashPrefix) -> Self {
let cache_control = "public, max-age=1, must-revalidate".into();
match strategy {
Height => Self {
etag: Some(format!("{VERSION}-{}", height())),
cache_control: "public, max-age=1, must-revalidate".into(),
CacheStrategy::Tip => Self {
etag: Some(format!("t{:x}", *tip())),
cache_control,
},
Static => Self {
etag: Some(VERSION.to_string()),
cache_control: "public, max-age=1, must-revalidate".into(),
CacheStrategy::Immutable(v) => Self {
etag: Some(format!("i{v}")),
cache_control,
},
MempoolHash(hash) => Self {
etag: Some(format!("{VERSION}-m{hash:x}")),
cache_control: "public, max-age=1, must-revalidate".into(),
CacheStrategy::BlockBound(v, prefix) => Self {
etag: Some(format!("b{v}-{:x}", **prefix)),
cache_control,
},
CacheStrategy::Static => Self {
etag: Some(format!("s{VERSION}")),
cache_control,
},
CacheStrategy::MempoolHash(hash) => Self {
etag: Some(format!("m{hash:x}")),
cache_control,
},
}
}

View File

@@ -47,7 +47,7 @@ impl ResponseExtended for Response<Body> {
where
T: Serialize,
{
let params = CacheParams::version();
let params = CacheParams::static_version();
if params.matches_etag(headers) {
return Self::new_not_modified();
}

View File

@@ -5,13 +5,13 @@ use std::{
time::{Duration, Instant},
};
use derive_more::Deref;
use axum::{
body::{Body, Bytes},
http::{HeaderMap, HeaderValue, Response, Uri, header},
};
use brk_query::AsyncQuery;
use brk_types::{Addr, BlockHash, BlockHashPrefix, Height, Txid, Version};
use derive_more::Deref;
use jiff::Timestamp;
use quick_cache::sync::{Cache, GuardResult};
use serde::Serialize;
@@ -33,6 +33,80 @@ pub struct AppState {
}
impl AppState {
/// `Immutable` if height is >6 deep, `Tip` otherwise.
pub fn height_cache(&self, version: Version, height: Height) -> CacheStrategy {
let is_deep = self.sync(|q| (*q.height()).saturating_sub(*height) > 6);
if is_deep {
CacheStrategy::Immutable(version)
} else {
CacheStrategy::Tip
}
}
/// Smart address caching: checks mempool activity first, then on-chain.
/// - Address has mempool txs → `MempoolHash(addr_specific_hash)`
/// - No mempool, has on-chain activity → `BlockBound(last_activity_block)`
/// - Unknown address → `Tip`
pub fn addr_cache(&self, version: Version, addr: &Addr) -> CacheStrategy {
self.sync(|q| {
let mempool_hash = q.addr_mempool_hash(addr);
if mempool_hash != 0 {
return CacheStrategy::MempoolHash(mempool_hash);
}
q.addr_last_activity_height(addr)
.and_then(|h| {
let block_hash = q.block_hash_by_height(h)?;
Ok(CacheStrategy::BlockBound(
version,
BlockHashPrefix::from(&block_hash),
))
})
.unwrap_or(CacheStrategy::Tip)
})
}
/// `Immutable` if the block is >6 deep (status stable), `Tip` otherwise.
/// For block status which changes when the next block arrives.
pub fn block_status_cache(&self, version: Version, hash: &BlockHash) -> CacheStrategy {
self.sync(|q| {
q.height_by_hash(hash)
.map(|h| {
if (*q.height()).saturating_sub(*h) > 6 {
CacheStrategy::Immutable(version)
} else {
CacheStrategy::Tip
}
})
.unwrap_or(CacheStrategy::Tip)
})
}
/// `BlockBound` if the block exists (reorg-safe via block hash), `Tip` if not found.
pub fn block_cache(&self, version: Version, hash: &BlockHash) -> CacheStrategy {
self.sync(|q| {
if q.height_by_hash(hash).is_ok() {
CacheStrategy::BlockBound(version, BlockHashPrefix::from(hash))
} else {
CacheStrategy::Tip
}
})
}
/// Mempool → `MempoolHash`, confirmed → `BlockBound`, unknown → `Tip`.
pub fn tx_cache(&self, version: Version, txid: &Txid) -> CacheStrategy {
self.sync(|q| {
if q.mempool().is_some_and(|m| m.get_txs().contains(txid)) {
let hash = q.mempool().map(|m| m.next_block_hash()).unwrap_or(0);
return CacheStrategy::MempoolHash(hash);
} else if let Ok((_, height)) = q.resolve_tx(txid)
&& let Ok(block_hash) = q.block_hash_by_height(height)
{
return CacheStrategy::BlockBound(version, BlockHashPrefix::from(&block_hash));
}
CacheStrategy::Tip
})
}
pub fn mempool_cache(&self) -> CacheStrategy {
let hash = self.sync(|q| q.mempool().map(|m| m.next_block_hash()).unwrap_or(0));
CacheStrategy::MempoolHash(hash)
@@ -51,7 +125,7 @@ impl AppState {
F: FnOnce(&brk_query::Query, ContentEncoding) -> brk_error::Result<Bytes> + Send + 'static,
{
let encoding = ContentEncoding::negotiate(headers);
let params = CacheParams::resolve(&strategy, || self.sync(|q| q.height().into()));
let params = CacheParams::resolve(&strategy, || self.sync(|q| q.tip_hash_prefix()));
if params.matches_etag(headers) {
return ResponseExtended::new_not_modified();
}

File diff suppressed because it is too large Load Diff

View File

@@ -35,11 +35,19 @@ pub struct AddrValidation {
/// Witness program in hex
#[serde(skip_serializing_if = "Option::is_none")]
pub witness_program: Option<String>,
/// Error locations (empty array for most errors)
#[serde(skip_serializing_if = "Option::is_none")]
pub error_locations: Option<Vec<usize>>,
/// Error message for invalid addresses
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
}
impl AddrValidation {
/// Returns an invalid validation result
pub fn invalid() -> Self {
/// Returns an invalid validation result with error detail
pub fn invalid(error: String) -> Self {
Self {
isvalid: false,
addr: None,
@@ -48,13 +56,16 @@ impl AddrValidation {
iswitness: None,
witness_version: None,
witness_program: None,
error_locations: Some(vec![]),
error: Some(error),
}
}
/// Validate a Bitcoin address string and return details
pub fn from_addr(addr: &str) -> Self {
let Ok(script) = AddrBytes::addr_to_script(addr) else {
return Self::invalid();
let script = match AddrBytes::addr_to_script(addr) {
Ok(s) => s,
Err(e) => return Self::invalid(e.to_string()),
};
let output_type = OutputType::from(&script);
@@ -86,6 +97,8 @@ impl AddrValidation {
iswitness: Some(is_witness),
witness_version,
witness_program,
error_locations: None,
error: None,
}
}
}

View File

@@ -1,7 +1,7 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::{Height, Sats, Timestamp};
use crate::{Dollars, Height, Sats, Timestamp};
/// A single block fees data point.
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
@@ -10,4 +10,7 @@ pub struct BlockFeesEntry {
pub avg_height: Height,
pub timestamp: Timestamp,
pub avg_fees: Sats,
/// BTC/USD price at that height
#[serde(rename = "USD")]
pub usd: Dollars,
}

View File

@@ -19,9 +19,6 @@ pub struct BlockHeader {
/// Merkle root of the transaction tree
pub merkle_root: String,
/// Block timestamp as claimed by the miner (Unix time)
pub time: u32,
/// Compact target (bits)
pub bits: u32,
@@ -35,7 +32,6 @@ impl From<Header> for BlockHeader {
version: h.version.to_consensus() as u32,
previous_block_hash: BlockHash::from(h.prev_blockhash),
merkle_root: h.merkle_root.to_string(),
time: h.time,
bits: h.bits.to_consensus(),
nonce: h.nonce,
}

View File

@@ -1,37 +1,37 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::{BlockHash, BlockHeader, Height, Timestamp, Weight};
use crate::{BlockHash, Height, Timestamp, Weight};
/// Block information matching mempool.space /api/block/{hash}
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct BlockInfo {
/// Block hash
pub id: BlockHash,
/// Block height
pub height: Height,
/// Block header fields
#[serde(flatten)]
pub header: BlockHeader,
/// Block version
pub version: u32,
/// Block timestamp (Unix time)
pub timestamp: Timestamp,
/// Number of transactions in the block
/// Number of transactions
pub tx_count: u32,
/// Block size in bytes
pub size: u64,
/// Block weight in weight units
pub weight: Weight,
/// Merkle root of the transaction tree
pub merkle_root: String,
/// Previous block hash
#[serde(rename = "previousblockhash")]
pub previous_block_hash: BlockHash,
/// Median time of the last 11 blocks
#[serde(rename = "mediantime")]
pub median_time: Timestamp,
/// Nonce
pub nonce: u32,
/// Compact target (bits)
pub bits: u32,
/// Block difficulty
pub difficulty: f64,
}

View File

@@ -5,6 +5,7 @@ use crate::PoolSlug;
/// Mining pool identification for a block
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct BlockPool {
/// Unique pool identifier
pub id: u8,
@@ -14,4 +15,7 @@ pub struct BlockPool {
/// URL-friendly pool identifier
pub slug: PoolSlug,
/// Alternative miner names (if identified)
pub miner_names: Option<String>,
}

View File

@@ -1,11 +1,16 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::{Dollars, Height, Sats, Timestamp};
/// A single block rewards data point.
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct BlockRewardsEntry {
pub avg_height: u32,
pub timestamp: u32,
pub avg_rewards: u64,
pub avg_height: Height,
pub timestamp: Timestamp,
pub avg_rewards: Sats,
/// BTC/USD price at that height
#[serde(rename = "USD")]
pub usd: Dollars,
}

View File

@@ -1,11 +1,13 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::{Height, Timestamp};
/// A single block size data point.
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct BlockSizeEntry {
pub avg_height: u32,
pub timestamp: u32,
pub avg_height: Height,
pub timestamp: Timestamp,
pub avg_size: u64,
}

View File

@@ -13,8 +13,7 @@ pub struct BlockStatus {
#[serde(skip_serializing_if = "Option::is_none")]
pub height: Option<Height>,
/// Hash of the next block in the best chain (only if in best chain and not tip)
#[serde(skip_serializing_if = "Option::is_none")]
/// Hash of the next block in the best chain (null if tip)
pub next_best: Option<BlockHash>,
}

View File

@@ -1,11 +1,13 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::{Height, Timestamp, Weight};
/// A single block weight data point.
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct BlockWeightEntry {
pub avg_height: u32,
pub timestamp: u32,
pub avg_weight: u64,
pub avg_height: Height,
pub timestamp: Timestamp,
pub avg_weight: Weight,
}

View File

@@ -8,7 +8,7 @@ use serde::{Deserialize, Serialize, Serializer, de};
use vecdb::{Bytes, Formattable};
/// Block hash
#[derive(Debug, Deref, Clone, PartialEq, Eq, Bytes, JsonSchema)]
#[derive(Default, Debug, Deref, Clone, PartialEq, Eq, Bytes, JsonSchema)]
#[repr(C)]
#[schemars(
transparent,

View File

@@ -29,6 +29,13 @@ impl From<ByteView> for BlockHashPrefix {
}
}
impl From<u64> for BlockHashPrefix {
#[inline]
fn from(value: u64) -> Self {
Self(value)
}
}
impl From<BlockHashPrefix> for ByteView {
#[inline]
fn from(value: BlockHashPrefix) -> Self {

View File

@@ -1,21 +1,33 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::{FeeRate, Sats, Txid, Weight};
use crate::{FeeRate, Sats, Txid, VSize, Weight};
/// CPFP (Child Pays For Parent) information for a transaction
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct CpfpInfo {
/// Ancestor transactions in the CPFP chain
pub ancestors: Vec<CpfpEntry>,
/// Best (highest fee rate) descendant, if any
pub best_descendant: Option<CpfpEntry>,
/// Descendant transactions in the CPFP chain
pub descendants: Vec<CpfpEntry>,
#[serde(rename = "effectiveFeePerVsize")]
/// Effective fee rate considering CPFP relationships (sat/vB)
pub effective_fee_per_vsize: FeeRate,
/// Transaction fee (sats)
pub fee: Sats,
/// Adjusted virtual size (accounting for sigops)
pub adjusted_vsize: VSize,
}
/// A transaction in a CPFP relationship
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct CpfpEntry {
/// Transaction ID
pub txid: Txid,
/// Transaction weight
pub weight: Weight,
/// Transaction fee (sats)
pub fee: Sats,
}

View File

@@ -1,7 +1,7 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::Height;
use crate::{Height, Timestamp};
/// Difficulty adjustment information.
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
@@ -15,35 +15,43 @@ pub struct DifficultyAdjustment {
#[schemars(example = 2.5)]
pub difficulty_change: f64,
/// Estimated Unix timestamp of next retarget
#[schemars(example = 1627762478)]
/// Estimated timestamp of next retarget (milliseconds)
#[schemars(example = 1627762478000_u64)]
pub estimated_retarget_date: u64,
/// Blocks remaining until retarget
#[schemars(example = 1121)]
pub remaining_blocks: u32,
/// Estimated seconds until retarget
#[schemars(example = 665977)]
/// Estimated time until retarget (milliseconds)
#[schemars(example = 665977000_u64)]
pub remaining_time: u64,
/// Previous difficulty adjustment (%)
#[schemars(example = -4.8)]
pub previous_retarget: f64,
/// Timestamp of most recent retarget (seconds)
#[schemars(example = 1627000000_u64)]
pub previous_time: Timestamp,
/// Height of next retarget
#[schemars(example = 741888)]
pub next_retarget_height: Height,
/// Average block time in current epoch (seconds)
#[schemars(example = 580)]
/// Average block time in current epoch (milliseconds)
#[schemars(example = 580000_u64)]
pub time_avg: u64,
/// Time-adjusted average (accounting for timestamp manipulation)
#[schemars(example = 580)]
/// Time-adjusted average (milliseconds)
#[schemars(example = 580000_u64)]
pub adjusted_time_avg: u64,
/// Time offset from expected schedule (seconds)
#[schemars(example = 0)]
pub time_offset: i64,
/// Expected blocks based on wall clock time since epoch start
#[schemars(example = 1827.21)]
pub expected_blocks: f64,
}

View File

@@ -3,13 +3,15 @@ use serde::{Deserialize, Serialize};
use super::{Height, Timestamp};
/// A single difficulty data point.
/// A single difficulty data point in the hashrate summary.
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct DifficultyEntry {
/// Unix timestamp of the difficulty adjustment.
pub timestamp: Timestamp,
/// Difficulty value.
pub difficulty: f64,
/// Block height of the adjustment.
/// Unix timestamp of the difficulty adjustment
pub time: Timestamp,
/// Block height of the adjustment
pub height: Height,
/// Difficulty value
pub difficulty: f64,
/// Adjustment ratio (new/previous, e.g. 1.068 = +6.8%)
pub adjustment: f64,
}

View File

@@ -7,7 +7,7 @@ use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use vecdb::{CheckedSub, Formattable, Pco};
use super::{Sats, VSize};
use super::{Sats, VSize, Weight};
/// Fee rate in sats/vB
#[derive(Debug, Default, Clone, Copy, Serialize, Deserialize, Pco, JsonSchema)]
@@ -36,6 +36,13 @@ impl From<(Sats, VSize)> for FeeRate {
}
}
impl From<(Sats, Weight)> for FeeRate {
#[inline]
fn from((sats, weight): (Sats, Weight)) -> Self {
Self::from((sats, VSize::from(weight.to_vbytes_ceil())))
}
}
impl From<f64> for FeeRate {
#[inline]
fn from(value: f64) -> Self {

View File

@@ -31,7 +31,7 @@ pub struct PoolDetail {
/// Pool information for detail view
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct PoolDetailInfo {
/// Unique pool identifier
/// Pool identifier
pub id: u8,
/// Pool name
@@ -41,13 +41,16 @@ pub struct PoolDetailInfo {
pub link: Cow<'static, str>,
/// Known payout addresses
pub addrs: Vec<Cow<'static, str>>,
pub addresses: Vec<Cow<'static, str>>,
/// Coinbase tag patterns (regexes)
pub regexes: Vec<Cow<'static, str>>,
/// URL-friendly pool identifier
pub slug: PoolSlug,
/// Unique pool identifier
pub unique_id: u8,
}
impl From<&'static Pool> for PoolDetailInfo {
@@ -56,9 +59,10 @@ impl From<&'static Pool> for PoolDetailInfo {
id: pool.unique_id(),
name: Cow::Borrowed(pool.name),
link: Cow::Borrowed(pool.link),
addrs: pool.addrs.iter().map(|&s| Cow::Borrowed(s)).collect(),
addresses: pool.addrs.iter().map(|&s| Cow::Borrowed(s)).collect(),
regexes: pool.tags.iter().map(|&s| Cow::Borrowed(s)).collect(),
slug: pool.slug(),
unique_id: pool.unique_id(),
}
}
}

View File

@@ -198,10 +198,8 @@ pub enum PoolSlug {
Parasite,
RedRockPool,
Est3lar,
#[serde(skip)]
Dummy168,
#[serde(skip)]
Dummy169,
BraiinsSolo,
SoloPool,
#[serde(skip)]
Dummy170,
#[serde(skip)]

View File

@@ -34,20 +34,26 @@ pub struct PoolStats {
/// Pool's share of total blocks (0.0 - 1.0)
pub share: f64,
/// Unique pool identifier
#[serde(rename = "poolUniqueId")]
pub pool_unique_id: u8,
}
impl PoolStats {
/// Create a new PoolStats from a Pool reference
pub fn new(pool: &'static Pool, block_count: u64, rank: u32, share: f64) -> Self {
let id = pool.unique_id();
Self {
pool_id: pool.unique_id(),
pool_id: id,
name: Cow::Borrowed(pool.name),
link: Cow::Borrowed(pool.link),
block_count,
rank,
empty_blocks: 0, // TODO: track empty blocks if needed
empty_blocks: 0,
slug: pool.slug(),
share,
pool_unique_id: id,
}
}
}

View File

@@ -7,7 +7,7 @@ use crate::PoolSlug;
use super::Pool;
const JSON_DATA: &str = include_str!("../pools-v2.json");
const POOL_COUNT: usize = 168;
const POOL_COUNT: usize = 170;
const TESTNET_IDS: &[u16] = &[145, 146, 149, 150, 156, 163];
#[derive(Deserialize)]

View File

@@ -5,15 +5,16 @@ use crate::PoolStats;
/// Mining pools response for a time period
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct PoolsSummary {
/// List of pools sorted by block count descending
pub pools: Vec<PoolStats>,
/// Total blocks in the time period
#[serde(rename = "blockCount")]
pub block_count: u64,
/// Estimated network hashrate (hashes per second)
#[serde(rename = "lastEstimatedHashrate")]
pub last_estimated_hashrate: u128,
/// Estimated network hashrate over last 3 days
pub last_estimated_hashrate3d: u128,
/// Estimated network hashrate over last 1 week
pub last_estimated_hashrate1w: u128,
}

View File

@@ -21,6 +21,14 @@ pub struct Transaction {
#[serde(rename = "locktime")]
pub lock_time: RawLockTime,
/// Transaction inputs
#[serde(rename = "vin")]
pub input: Vec<TxIn>,
/// Transaction outputs
#[serde(rename = "vout")]
pub output: Vec<TxOut>,
/// Transaction size in bytes
#[schemars(example = 222)]
#[serde(rename = "size")]
@@ -39,14 +47,6 @@ pub struct Transaction {
#[schemars(example = Sats::new(31))]
pub fee: Sats,
/// Transaction inputs
#[serde(rename = "vin")]
pub input: Vec<TxIn>,
/// Transaction outputs
#[serde(rename = "vout")]
pub output: Vec<TxOut>,
pub status: TxStatus,
}

View File

@@ -10,6 +10,7 @@ pub struct TxIn {
#[schemars(example = "0000000000000000000000000000000000000000000000000000000000000000")]
pub txid: Txid,
/// Output index being spent
#[schemars(example = 0)]
pub vout: Vout,
@@ -17,55 +18,32 @@ pub struct TxIn {
#[schemars(example = None as Option<TxOut>)]
pub prevout: Option<TxOut>,
/// Signature script (for non-SegWit inputs)
#[schemars(
rename = "scriptsig",
with = "String",
example = "04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73"
)]
/// Signature script (hex, for non-SegWit inputs)
#[schemars(rename = "scriptsig", with = "String")]
pub script_sig: ScriptBuf,
/// Signature script in assembly format
#[schemars(
rename = "scriptsig_asm",
with = "String",
example = "OP_PUSHBYTES_4 ffff001d OP_PUSHBYTES_1 04 OP_PUSHBYTES_69 5468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73"
)]
#[schemars(rename = "scriptsig_asm", with = "String")]
pub script_sig_asm: (),
// /// Witness data (for SegWit inputs)
// #[schemars(example = vec!["3045022100d0c9936990bf00bdba15f425f0f360a223d5cbf81f4bf8477fe6c6d838fb5fae02207e42a8325a4dd41702bf065aa6e0a1b7b0b8ee92a5e6c182da018b0afc82c40601".to_string()])]
// pub witness: Vec<String>,
//
/// Witness data (hex-encoded stack items, present for SegWit inputs)
pub witness: Vec<String>,
/// Whether this input is a coinbase (block reward) input
#[schemars(example = false)]
pub is_coinbase: bool,
/// Input sequence number
#[schemars(example = 429496729)]
#[schemars(example = 4294967293_u32)]
pub sequence: u32,
/// Inner redeemscript in assembly format (for P2SH-wrapped SegWit)
#[allow(dead_code)]
#[schemars(
rename = "inner_redeemscript_asm",
with = "Option<String>",
example = Some("OP_0 OP_PUSHBYTES_20 992a1f7420fc5285070d19c71ff2efb1e356ad2f".to_string())
)]
/// Inner redeemscript in assembly (for P2SH-wrapped SegWit: scriptsig + witness both present)
#[schemars(rename = "inner_redeemscript_asm", with = "String")]
pub inner_redeem_script_asm: (),
}
impl TxIn {
pub fn script_sig_asm(&self) -> String {
self.script_sig.to_asm_string()
}
pub fn inner_redeemscript_asm(&self) -> String {
self.script_sig
.redeem_script()
.map(|s| s.to_asm_string())
.unwrap_or_default()
}
/// Inner witnessscript in assembly (for P2WSH: last witness item decoded as script)
#[schemars(rename = "inner_witnessscript_asm", with = "String")]
pub inner_witness_script_asm: (),
}
impl Serialize for TxIn {
@@ -73,16 +51,55 @@ impl Serialize for TxIn {
where
S: Serializer,
{
let mut state = serializer.serialize_struct("TxIn", 8)?;
let has_witness = !self.witness.is_empty();
let has_scriptsig = !self.script_sig.is_empty();
// P2SH-wrapped SegWit: both scriptsig and witness present
let inner_redeem = if has_scriptsig && has_witness {
self.script_sig
.redeem_script()
.map(|s| s.to_asm_string())
.unwrap_or_default()
} else {
String::new()
};
// P2WSH: witness has >2 items, last is the witnessScript
let inner_witness = if has_witness && !has_scriptsig && self.witness.len() > 2 {
if let Some(last) = self.witness.last() {
let bytes: Vec<u8> =
bitcoin::hex::FromHex::from_hex(last).unwrap_or_default();
ScriptBuf::from(bytes).to_asm_string()
} else {
String::new()
}
} else {
String::new()
};
let has_inner_redeem = !inner_redeem.is_empty();
let has_inner_witness = !inner_witness.is_empty();
let field_count =
7 + has_witness as usize + has_inner_redeem as usize + has_inner_witness as usize;
let mut state = serializer.serialize_struct("TxIn", field_count)?;
state.serialize_field("txid", &self.txid)?;
state.serialize_field("vout", &self.vout)?;
state.serialize_field("prevout", &self.prevout)?;
state.serialize_field("scriptsig", &self.script_sig.to_hex_string())?;
state.serialize_field("scriptsig_asm", &self.script_sig_asm())?;
state.serialize_field("scriptsig_asm", &self.script_sig.to_asm_string())?;
if has_witness {
state.serialize_field("witness", &self.witness)?;
}
state.serialize_field("is_coinbase", &self.is_coinbase)?;
state.serialize_field("sequence", &self.sequence)?;
state.serialize_field("inner_redeemscript_asm", &self.inner_redeemscript_asm())?;
if has_inner_redeem {
state.serialize_field("inner_redeemscript_asm", &inner_redeem)?;
}
if has_inner_witness {
state.serialize_field("inner_witnessscript_asm", &inner_witness)?;
}
state.end()
}

View File

@@ -76,6 +76,13 @@ impl From<VSize> for Weight {
}
}
impl From<u64> for Weight {
#[inline]
fn from(value: u64) -> Self {
Self(value)
}
}
impl From<usize> for Weight {
#[inline]
fn from(value: usize) -> Self {

View File

@@ -60,6 +60,8 @@
* @property {?boolean=} iswitness - Whether this is a witness address
* @property {?number=} witnessVersion - Witness version (0 for P2WPKH/P2WSH, 1 for P2TR)
* @property {?string=} witnessProgram - Witness program in hex
* @property {?number[]=} errorLocations - Error locations (empty array for most errors)
* @property {?string=} error - Error message for invalid addresses
*/
/**
* Unified index for any address type (funded or empty)
@@ -145,6 +147,7 @@
* @property {Height} avgHeight
* @property {Timestamp} timestamp
* @property {Sats} avgFees
* @property {Dollars} uSD - BTC/USD price at that height
*/
/**
* Block hash
@@ -171,17 +174,16 @@
* @typedef {Object} BlockInfo
* @property {BlockHash} id - Block hash
* @property {Height} height - Block height
* @property {number} version - Block version, used for soft fork signaling
* @property {BlockHash} previousblockhash - Previous block hash
* @property {string} merkleRoot - Merkle root of the transaction tree
* @property {number} time - Block timestamp as claimed by the miner (Unix time)
* @property {number} bits - Compact target (bits)
* @property {number} nonce - Nonce used to produce a valid block hash
* @property {number} version - Block version
* @property {Timestamp} timestamp - Block timestamp (Unix time)
* @property {number} txCount - Number of transactions in the block
* @property {number} txCount - Number of transactions
* @property {number} size - Block size in bytes
* @property {Weight} weight - Block weight in weight units
* @property {string} merkleRoot - Merkle root of the transaction tree
* @property {BlockHash} previousblockhash - Previous block hash
* @property {Timestamp} mediantime - Median time of the last 11 blocks
* @property {number} nonce - Nonce
* @property {number} bits - Compact target (bits)
* @property {number} difficulty - Block difficulty
*/
/**
@@ -190,18 +192,18 @@
* @typedef {Object} BlockInfoV1
* @property {BlockHash} id - Block hash
* @property {Height} height - Block height
* @property {number} version - Block version, used for soft fork signaling
* @property {BlockHash} previousblockhash - Previous block hash
* @property {string} merkleRoot - Merkle root of the transaction tree
* @property {number} time - Block timestamp as claimed by the miner (Unix time)
* @property {number} bits - Compact target (bits)
* @property {number} nonce - Nonce used to produce a valid block hash
* @property {number} version - Block version
* @property {Timestamp} timestamp - Block timestamp (Unix time)
* @property {number} txCount - Number of transactions in the block
* @property {number} txCount - Number of transactions
* @property {number} size - Block size in bytes
* @property {Weight} weight - Block weight in weight units
* @property {string} merkleRoot - Merkle root of the transaction tree
* @property {BlockHash} previousblockhash - Previous block hash
* @property {Timestamp} mediantime - Median time of the last 11 blocks
* @property {number} nonce - Nonce
* @property {number} bits - Compact target (bits)
* @property {number} difficulty - Block difficulty
* @property {boolean=} stale - Whether this block is stale (orphaned)
* @property {BlockExtras} extras - Extended block data
*/
/**
@@ -211,21 +213,23 @@
* @property {number} id - Unique pool identifier
* @property {string} name - Pool name
* @property {PoolSlug} slug - URL-friendly pool identifier
* @property {?string=} minerNames - Alternative miner names (if identified)
*/
/**
* A single block rewards data point.
*
* @typedef {Object} BlockRewardsEntry
* @property {number} avgHeight
* @property {number} timestamp
* @property {number} avgRewards
* @property {Height} avgHeight
* @property {Timestamp} timestamp
* @property {Sats} avgRewards
* @property {Dollars} uSD - BTC/USD price at that height
*/
/**
* A single block size data point.
*
* @typedef {Object} BlockSizeEntry
* @property {number} avgHeight
* @property {number} timestamp
* @property {Height} avgHeight
* @property {Timestamp} timestamp
* @property {number} avgSize
*/
/**
@@ -241,7 +245,7 @@
* @typedef {Object} BlockStatus
* @property {boolean} inBestChain - Whether this block is in the best chain
* @property {(Height|null)=} height - Block height (only if in best chain)
* @property {(BlockHash|null)=} nextBest - Hash of the next block in the best chain (only if in best chain and not tip)
* @property {(BlockHash|null)=} nextBest - Hash of the next block in the best chain (null if tip)
*/
/**
* Block information returned for timestamp queries
@@ -255,9 +259,9 @@
* A single block weight data point.
*
* @typedef {Object} BlockWeightEntry
* @property {number} avgHeight
* @property {number} timestamp
* @property {number} avgWeight
* @property {Height} avgHeight
* @property {Timestamp} timestamp
* @property {Weight} avgWeight
*/
/**
* Unsigned cents (u64) - for values that should never be negative.
@@ -342,17 +346,20 @@
* A transaction in a CPFP relationship
*
* @typedef {Object} CpfpEntry
* @property {Txid} txid
* @property {Weight} weight
* @property {Sats} fee
* @property {Txid} txid - Transaction ID
* @property {Weight} weight - Transaction weight
* @property {Sats} fee - Transaction fee (sats)
*/
/**
* CPFP (Child Pays For Parent) information for a transaction
*
* @typedef {Object} CpfpInfo
* @property {CpfpEntry[]} ancestors
* @property {CpfpEntry[]} descendants
* @property {FeeRate} effectiveFeePerVsize
* @property {CpfpEntry[]} ancestors - Ancestor transactions in the CPFP chain
* @property {(CpfpEntry|null)=} bestDescendant - Best (highest fee rate) descendant, if any
* @property {CpfpEntry[]} descendants - Descendant transactions in the CPFP chain
* @property {FeeRate} effectiveFeePerVsize - Effective fee rate considering CPFP relationships (sat/vB)
* @property {Sats} fee - Transaction fee (sats)
* @property {VSize} adjustedVsize - Adjusted virtual size (accounting for sigops)
*/
/**
* Data range with output format for API query parameters
@@ -386,14 +393,16 @@
* @typedef {Object} DifficultyAdjustment
* @property {number} progressPercent - Progress through current difficulty epoch (0-100%)
* @property {number} difficultyChange - Estimated difficulty change at next retarget (%)
* @property {number} estimatedRetargetDate - Estimated Unix timestamp of next retarget
* @property {number} estimatedRetargetDate - Estimated timestamp of next retarget (milliseconds)
* @property {number} remainingBlocks - Blocks remaining until retarget
* @property {number} remainingTime - Estimated seconds until retarget
* @property {number} remainingTime - Estimated time until retarget (milliseconds)
* @property {number} previousRetarget - Previous difficulty adjustment (%)
* @property {Timestamp} previousTime - Timestamp of most recent retarget (seconds)
* @property {Height} nextRetargetHeight - Height of next retarget
* @property {number} timeAvg - Average block time in current epoch (seconds)
* @property {number} adjustedTimeAvg - Time-adjusted average (accounting for timestamp manipulation)
* @property {number} timeAvg - Average block time in current epoch (milliseconds)
* @property {number} adjustedTimeAvg - Time-adjusted average (milliseconds)
* @property {number} timeOffset - Time offset from expected schedule (seconds)
* @property {number} expectedBlocks - Expected blocks based on wall clock time since epoch start
*/
/**
* A single difficulty adjustment entry.
@@ -406,12 +415,13 @@
* @property {number} changePercent
*/
/**
* A single difficulty data point.
* A single difficulty data point in the hashrate summary.
*
* @typedef {Object} DifficultyEntry
* @property {Timestamp} timestamp - Unix timestamp of the difficulty adjustment.
* @property {number} difficulty - Difficulty value.
* @property {Height} height - Block height of the adjustment.
* @property {Timestamp} time - Unix timestamp of the difficulty adjustment
* @property {Height} height - Block height of the adjustment
* @property {number} difficulty - Difficulty value
* @property {number} adjustment - Adjustment ratio (new/previous, e.g. 1.068 = +6.8%)
*/
/**
* Disk usage of the indexed data
@@ -729,12 +739,13 @@
* Pool information for detail view
*
* @typedef {Object} PoolDetailInfo
* @property {number} id - Unique pool identifier
* @property {number} id - Pool identifier
* @property {string} name - Pool name
* @property {string} link - Pool website URL
* @property {string[]} addrs - Known payout addresses
* @property {string[]} addresses - Known payout addresses
* @property {string[]} regexes - Coinbase tag patterns (regexes)
* @property {PoolSlug} slug - URL-friendly pool identifier
* @property {number} uniqueId - Unique pool identifier
*/
/**
* A single pool hashrate data point.
@@ -753,7 +764,7 @@
* @property {PoolSlug} slug - URL-friendly pool identifier
* @property {number} uniqueId - Unique numeric pool identifier
*/
/** @typedef {("unknown"|"blockfills"|"ultimuspool"|"terrapool"|"luxor"|"onethash"|"btccom"|"bitfarms"|"huobipool"|"wayicn"|"canoepool"|"btctop"|"bitcoincom"|"pool175btc"|"gbminers"|"axbt"|"asicminer"|"bitminter"|"bitcoinrussia"|"btcserv"|"simplecoinus"|"btcguild"|"eligius"|"ozcoin"|"eclipsemc"|"maxbtc"|"triplemining"|"coinlab"|"pool50btc"|"ghashio"|"stminingcorp"|"bitparking"|"mmpool"|"polmine"|"kncminer"|"bitalo"|"f2pool"|"hhtt"|"megabigpower"|"mtred"|"nmcbit"|"yourbtcnet"|"givemecoins"|"braiinspool"|"antpool"|"multicoinco"|"bcpoolio"|"cointerra"|"kanopool"|"solock"|"ckpool"|"nicehash"|"bitclub"|"bitcoinaffiliatenetwork"|"btcc"|"bwpool"|"exxbw"|"bitsolo"|"bitfury"|"twentyoneinc"|"digitalbtc"|"eightbaochi"|"mybtccoinpool"|"tbdice"|"hashpool"|"nexious"|"bravomining"|"hotpool"|"okexpool"|"bcmonster"|"onehash"|"bixin"|"tatmaspool"|"viabtc"|"connectbtc"|"batpool"|"waterhole"|"dcexploration"|"dcex"|"btpool"|"fiftyeightcoin"|"bitcoinindia"|"shawnp0wers"|"phashio"|"rigpool"|"haozhuzhu"|"sevenpool"|"miningkings"|"hashbx"|"dpool"|"rawpool"|"haominer"|"helix"|"bitcoinukraine"|"poolin"|"secretsuperstar"|"tigerpoolnet"|"sigmapoolcom"|"okpooltop"|"hummerpool"|"tangpool"|"bytepool"|"spiderpool"|"novablock"|"miningcity"|"binancepool"|"minerium"|"lubiancom"|"okkong"|"aaopool"|"emcdpool"|"foundryusa"|"sbicrypto"|"arkpool"|"purebtccom"|"marapool"|"kucoinpool"|"entrustcharitypool"|"okminer"|"titan"|"pegapool"|"btcnuggets"|"cloudhashing"|"digitalxmintsy"|"telco214"|"btcpoolparty"|"multipool"|"transactioncoinmining"|"btcdig"|"trickysbtcpool"|"btcmp"|"eobot"|"unomp"|"patels"|"gogreenlight"|"bitcoinindiapool"|"ekanembtc"|"canoe"|"tiger"|"onem1x"|"zulupool"|"secpool"|"ocean"|"whitepool"|"wiz"|"wk057"|"futurebitapollosolo"|"carbonnegative"|"portlandhodl"|"phoenix"|"neopool"|"maxipool"|"bitfufupool"|"gdpool"|"miningdutch"|"publicpool"|"miningsquared"|"innopolistech"|"btclab"|"parasite"|"redrockpool"|"est3lar")} PoolSlug */
/** @typedef {("unknown"|"blockfills"|"ultimuspool"|"terrapool"|"luxor"|"onethash"|"btccom"|"bitfarms"|"huobipool"|"wayicn"|"canoepool"|"btctop"|"bitcoincom"|"pool175btc"|"gbminers"|"axbt"|"asicminer"|"bitminter"|"bitcoinrussia"|"btcserv"|"simplecoinus"|"btcguild"|"eligius"|"ozcoin"|"eclipsemc"|"maxbtc"|"triplemining"|"coinlab"|"pool50btc"|"ghashio"|"stminingcorp"|"bitparking"|"mmpool"|"polmine"|"kncminer"|"bitalo"|"f2pool"|"hhtt"|"megabigpower"|"mtred"|"nmcbit"|"yourbtcnet"|"givemecoins"|"braiinspool"|"antpool"|"multicoinco"|"bcpoolio"|"cointerra"|"kanopool"|"solock"|"ckpool"|"nicehash"|"bitclub"|"bitcoinaffiliatenetwork"|"btcc"|"bwpool"|"exxbw"|"bitsolo"|"bitfury"|"twentyoneinc"|"digitalbtc"|"eightbaochi"|"mybtccoinpool"|"tbdice"|"hashpool"|"nexious"|"bravomining"|"hotpool"|"okexpool"|"bcmonster"|"onehash"|"bixin"|"tatmaspool"|"viabtc"|"connectbtc"|"batpool"|"waterhole"|"dcexploration"|"dcex"|"btpool"|"fiftyeightcoin"|"bitcoinindia"|"shawnp0wers"|"phashio"|"rigpool"|"haozhuzhu"|"sevenpool"|"miningkings"|"hashbx"|"dpool"|"rawpool"|"haominer"|"helix"|"bitcoinukraine"|"poolin"|"secretsuperstar"|"tigerpoolnet"|"sigmapoolcom"|"okpooltop"|"hummerpool"|"tangpool"|"bytepool"|"spiderpool"|"novablock"|"miningcity"|"binancepool"|"minerium"|"lubiancom"|"okkong"|"aaopool"|"emcdpool"|"foundryusa"|"sbicrypto"|"arkpool"|"purebtccom"|"marapool"|"kucoinpool"|"entrustcharitypool"|"okminer"|"titan"|"pegapool"|"btcnuggets"|"cloudhashing"|"digitalxmintsy"|"telco214"|"btcpoolparty"|"multipool"|"transactioncoinmining"|"btcdig"|"trickysbtcpool"|"btcmp"|"eobot"|"unomp"|"patels"|"gogreenlight"|"bitcoinindiapool"|"ekanembtc"|"canoe"|"tiger"|"onem1x"|"zulupool"|"secpool"|"ocean"|"whitepool"|"wiz"|"wk057"|"futurebitapollosolo"|"carbonnegative"|"portlandhodl"|"phoenix"|"neopool"|"maxipool"|"bitfufupool"|"gdpool"|"miningdutch"|"publicpool"|"miningsquared"|"innopolistech"|"btclab"|"parasite"|"redrockpool"|"est3lar"|"braiinssolo"|"solopool")} PoolSlug */
/**
* @typedef {Object} PoolSlugAndHeightParam
* @property {PoolSlug} slug
@@ -775,6 +786,7 @@
* @property {number} emptyBlocks - Number of empty blocks mined
* @property {PoolSlug} slug - URL-friendly pool identifier
* @property {number} share - Pool's share of total blocks (0.0 - 1.0)
* @property {number} poolUniqueId - Unique pool identifier
*/
/**
* Mining pools response for a time period
@@ -783,6 +795,8 @@
* @property {PoolStats[]} pools - List of pools sorted by block count descending
* @property {number} blockCount - Total blocks in the time period
* @property {number} lastEstimatedHashrate - Estimated network hashrate (hashes per second)
* @property {number} lastEstimatedHashrate3d - Estimated network hashrate over last 3 days
* @property {number} lastEstimatedHashrate1w - Estimated network hashrate over last 1 week
*/
/**
* Current price response matching mempool.space /api/v1/prices format
@@ -990,12 +1004,12 @@
* @property {Txid} txid
* @property {TxVersion} version
* @property {RawLockTime} locktime
* @property {TxIn[]} vin - Transaction inputs
* @property {TxOut[]} vout - Transaction outputs
* @property {number} size - Transaction size in bytes
* @property {Weight} weight - Transaction weight
* @property {number} sigops - Number of signature operations
* @property {Sats} fee - Transaction fee in satoshis
* @property {TxIn[]} vin - Transaction inputs
* @property {TxOut[]} vout - Transaction outputs
* @property {TxStatus} status
*/
/**
@@ -1008,13 +1022,15 @@
*
* @typedef {Object} TxIn
* @property {Txid} txid - Transaction ID of the output being spent
* @property {Vout} vout
* @property {Vout} vout - Output index being spent
* @property {(TxOut|null)=} prevout - Information about the previous output being spent
* @property {string} scriptsig - Signature script (for non-SegWit inputs)
* @property {string} scriptsig - Signature script (hex, for non-SegWit inputs)
* @property {string} scriptsigAsm - Signature script in assembly format
* @property {string[]} witness - Witness data (hex-encoded stack items, present for SegWit inputs)
* @property {boolean} isCoinbase - Whether this input is a coinbase (block reward) input
* @property {number} sequence - Input sequence number
* @property {?string=} innerRedeemscriptAsm - Inner redeemscript in assembly format (for P2SH-wrapped SegWit)
* @property {string} innerRedeemscriptAsm - Inner redeemscript in assembly (for P2SH-wrapped SegWit: scriptsig + witness both present)
* @property {string} innerWitnessscriptAsm - Inner witnessscript in assembly (for P2WSH: last witness item decoded as script)
*/
/** @typedef {number} TxInIndex */
/** @typedef {number} TxIndex */
@@ -5683,6 +5699,8 @@ function createTransferPattern(client, acc) {
* @property {BlocksDominancePattern} parasite
* @property {BlocksDominancePattern} redrockpool
* @property {BlocksDominancePattern} est3lar
* @property {BlocksDominancePattern} braiinssolo
* @property {BlocksDominancePattern} solopool
*/
/**
@@ -6746,7 +6764,9 @@ class BrkClient extends BrkClientBase {
"btclab": "BTCLab",
"parasite": "Parasite",
"redrockpool": "RedRock Pool",
"est3lar": "Est3lar"
"est3lar": "Est3lar",
"braiinssolo": "Braiins Solo",
"solopool": "SoloPool.com"
});
TERM_NAMES = /** @type {const} */ ({
@@ -8712,6 +8732,8 @@ class BrkClient extends BrkClientBase {
parasite: createBlocksDominancePattern(this, 'parasite'),
redrockpool: createBlocksDominancePattern(this, 'redrockpool'),
est3lar: createBlocksDominancePattern(this, 'est3lar'),
braiinssolo: createBlocksDominancePattern(this, 'braiinssolo'),
solopool: createBlocksDominancePattern(this, 'solopool'),
},
},
prices: {

View File

@@ -49,11 +49,13 @@ BasisPointsSigned16 = int
BasisPointsSigned32 = int
# Bitcoin amount as floating point (1 BTC = 100,000,000 satoshis)
Bitcoin = float
PoolSlug = Literal["unknown", "blockfills", "ultimuspool", "terrapool", "luxor", "onethash", "btccom", "bitfarms", "huobipool", "wayicn", "canoepool", "btctop", "bitcoincom", "pool175btc", "gbminers", "axbt", "asicminer", "bitminter", "bitcoinrussia", "btcserv", "simplecoinus", "btcguild", "eligius", "ozcoin", "eclipsemc", "maxbtc", "triplemining", "coinlab", "pool50btc", "ghashio", "stminingcorp", "bitparking", "mmpool", "polmine", "kncminer", "bitalo", "f2pool", "hhtt", "megabigpower", "mtred", "nmcbit", "yourbtcnet", "givemecoins", "braiinspool", "antpool", "multicoinco", "bcpoolio", "cointerra", "kanopool", "solock", "ckpool", "nicehash", "bitclub", "bitcoinaffiliatenetwork", "btcc", "bwpool", "exxbw", "bitsolo", "bitfury", "twentyoneinc", "digitalbtc", "eightbaochi", "mybtccoinpool", "tbdice", "hashpool", "nexious", "bravomining", "hotpool", "okexpool", "bcmonster", "onehash", "bixin", "tatmaspool", "viabtc", "connectbtc", "batpool", "waterhole", "dcexploration", "dcex", "btpool", "fiftyeightcoin", "bitcoinindia", "shawnp0wers", "phashio", "rigpool", "haozhuzhu", "sevenpool", "miningkings", "hashbx", "dpool", "rawpool", "haominer", "helix", "bitcoinukraine", "poolin", "secretsuperstar", "tigerpoolnet", "sigmapoolcom", "okpooltop", "hummerpool", "tangpool", "bytepool", "spiderpool", "novablock", "miningcity", "binancepool", "minerium", "lubiancom", "okkong", "aaopool", "emcdpool", "foundryusa", "sbicrypto", "arkpool", "purebtccom", "marapool", "kucoinpool", "entrustcharitypool", "okminer", "titan", "pegapool", "btcnuggets", "cloudhashing", "digitalxmintsy", "telco214", "btcpoolparty", "multipool", "transactioncoinmining", "btcdig", "trickysbtcpool", "btcmp", "eobot", "unomp", "patels", "gogreenlight", "bitcoinindiapool", "ekanembtc", "canoe", "tiger", "onem1x", "zulupool", "secpool", "ocean", "whitepool", "wiz", "wk057", "futurebitapollosolo", "carbonnegative", "portlandhodl", "phoenix", "neopool", "maxipool", "bitfufupool", "gdpool", "miningdutch", "publicpool", "miningsquared", "innopolistech", "btclab", "parasite", "redrockpool", "est3lar"]
PoolSlug = Literal["unknown", "blockfills", "ultimuspool", "terrapool", "luxor", "onethash", "btccom", "bitfarms", "huobipool", "wayicn", "canoepool", "btctop", "bitcoincom", "pool175btc", "gbminers", "axbt", "asicminer", "bitminter", "bitcoinrussia", "btcserv", "simplecoinus", "btcguild", "eligius", "ozcoin", "eclipsemc", "maxbtc", "triplemining", "coinlab", "pool50btc", "ghashio", "stminingcorp", "bitparking", "mmpool", "polmine", "kncminer", "bitalo", "f2pool", "hhtt", "megabigpower", "mtred", "nmcbit", "yourbtcnet", "givemecoins", "braiinspool", "antpool", "multicoinco", "bcpoolio", "cointerra", "kanopool", "solock", "ckpool", "nicehash", "bitclub", "bitcoinaffiliatenetwork", "btcc", "bwpool", "exxbw", "bitsolo", "bitfury", "twentyoneinc", "digitalbtc", "eightbaochi", "mybtccoinpool", "tbdice", "hashpool", "nexious", "bravomining", "hotpool", "okexpool", "bcmonster", "onehash", "bixin", "tatmaspool", "viabtc", "connectbtc", "batpool", "waterhole", "dcexploration", "dcex", "btpool", "fiftyeightcoin", "bitcoinindia", "shawnp0wers", "phashio", "rigpool", "haozhuzhu", "sevenpool", "miningkings", "hashbx", "dpool", "rawpool", "haominer", "helix", "bitcoinukraine", "poolin", "secretsuperstar", "tigerpoolnet", "sigmapoolcom", "okpooltop", "hummerpool", "tangpool", "bytepool", "spiderpool", "novablock", "miningcity", "binancepool", "minerium", "lubiancom", "okkong", "aaopool", "emcdpool", "foundryusa", "sbicrypto", "arkpool", "purebtccom", "marapool", "kucoinpool", "entrustcharitypool", "okminer", "titan", "pegapool", "btcnuggets", "cloudhashing", "digitalxmintsy", "telco214", "btcpoolparty", "multipool", "transactioncoinmining", "btcdig", "trickysbtcpool", "btcmp", "eobot", "unomp", "patels", "gogreenlight", "bitcoinindiapool", "ekanembtc", "canoe", "tiger", "onem1x", "zulupool", "secpool", "ocean", "whitepool", "wiz", "wk057", "futurebitapollosolo", "carbonnegative", "portlandhodl", "phoenix", "neopool", "maxipool", "bitfufupool", "gdpool", "miningdutch", "publicpool", "miningsquared", "innopolistech", "btclab", "parasite", "redrockpool", "est3lar", "braiinssolo", "solopool"]
# Fee rate in sats/vB
FeeRate = float
# Transaction or block weight in weight units (WU)
Weight = int
# US Dollar amount as floating point
Dollars = float
# Block height
Height = int
# UNIX timestamp in seconds
@@ -74,8 +76,6 @@ CentsSigned = int
# Used for precise accumulation of investor cap values: Σ(price² × sats).
# investor_price = investor_cap_raw / realized_cap_raw
CentsSquaredSats = int
# US Dollar amount as floating point
Dollars = float
# Closing price value for a time period
Close = Dollars
# Cohort identifier for cost basis distribution.
@@ -95,6 +95,8 @@ CostBasisBucket = Literal["raw", "lin200", "lin500", "lin1000", "log10", "log50"
# Value type for cost basis distribution.
# Options: supply (BTC), realized (USD, price × supply), unrealized (USD, spot × supply).
CostBasisValue = Literal["supply", "realized", "unrealized"]
# Virtual size in vbytes (weight / 4, rounded up)
VSize = int
# Date in YYYYMMDD format stored as u32
Date = int
# Output format for API responses
@@ -121,8 +123,6 @@ Hour4 = int
SeriesName = str
# Lowest price value for a time period
Low = Dollars
# Virtual size in vbytes (weight / 4, rounded up)
VSize = int
Minute10 = int
Minute30 = int
Month1 = int
@@ -288,6 +288,8 @@ class AddrValidation(TypedDict):
iswitness: Whether this is a witness address
witness_version: Witness version (0 for P2WPKH/P2WSH, 1 for P2TR)
witness_program: Witness program in hex
error_locations: Error locations (empty array for most errors)
error: Error message for invalid addresses
"""
isvalid: bool
address: Optional[str]
@@ -296,6 +298,8 @@ class AddrValidation(TypedDict):
iswitness: Optional[bool]
witness_version: Optional[int]
witness_program: Optional[str]
error_locations: Optional[List[int]]
error: Optional[str]
class BlockCountParam(TypedDict):
"""
@@ -312,10 +316,12 @@ class BlockPool(TypedDict):
id: Unique pool identifier
name: Pool name
slug: URL-friendly pool identifier
minerNames: Alternative miner names (if identified)
"""
id: int
name: str
slug: PoolSlug
minerNames: Optional[str]
class BlockExtras(TypedDict):
"""
@@ -379,10 +385,14 @@ class BlockExtras(TypedDict):
class BlockFeesEntry(TypedDict):
"""
A single block fees data point.
Attributes:
USD: BTC/USD price at that height
"""
avgHeight: Height
timestamp: Timestamp
avgFees: Sats
USD: Dollars
class BlockHashParam(TypedDict):
hash: BlockHash
@@ -412,32 +422,30 @@ class BlockInfo(TypedDict):
Attributes:
id: Block hash
height: Block height
version: Block version, used for soft fork signaling
previousblockhash: Previous block hash
merkle_root: Merkle root of the transaction tree
time: Block timestamp as claimed by the miner (Unix time)
bits: Compact target (bits)
nonce: Nonce used to produce a valid block hash
version: Block version
timestamp: Block timestamp (Unix time)
tx_count: Number of transactions in the block
tx_count: Number of transactions
size: Block size in bytes
weight: Block weight in weight units
merkle_root: Merkle root of the transaction tree
previousblockhash: Previous block hash
mediantime: Median time of the last 11 blocks
nonce: Nonce
bits: Compact target (bits)
difficulty: Block difficulty
"""
id: BlockHash
height: Height
version: int
previousblockhash: BlockHash
merkle_root: str
time: int
bits: int
nonce: int
timestamp: Timestamp
tx_count: int
size: int
weight: Weight
merkle_root: str
previousblockhash: BlockHash
mediantime: Timestamp
nonce: int
bits: int
difficulty: float
class BlockInfoV1(TypedDict):
@@ -447,59 +455,63 @@ class BlockInfoV1(TypedDict):
Attributes:
id: Block hash
height: Block height
version: Block version, used for soft fork signaling
previousblockhash: Previous block hash
merkle_root: Merkle root of the transaction tree
time: Block timestamp as claimed by the miner (Unix time)
bits: Compact target (bits)
nonce: Nonce used to produce a valid block hash
version: Block version
timestamp: Block timestamp (Unix time)
tx_count: Number of transactions in the block
tx_count: Number of transactions
size: Block size in bytes
weight: Block weight in weight units
merkle_root: Merkle root of the transaction tree
previousblockhash: Previous block hash
mediantime: Median time of the last 11 blocks
nonce: Nonce
bits: Compact target (bits)
difficulty: Block difficulty
stale: Whether this block is stale (orphaned)
extras: Extended block data
"""
id: BlockHash
height: Height
version: int
previousblockhash: BlockHash
merkle_root: str
time: int
bits: int
nonce: int
timestamp: Timestamp
tx_count: int
size: int
weight: Weight
merkle_root: str
previousblockhash: BlockHash
mediantime: Timestamp
nonce: int
bits: int
difficulty: float
stale: bool
extras: BlockExtras
class BlockRewardsEntry(TypedDict):
"""
A single block rewards data point.
Attributes:
USD: BTC/USD price at that height
"""
avgHeight: int
timestamp: int
avgRewards: int
avgHeight: Height
timestamp: Timestamp
avgRewards: Sats
USD: Dollars
class BlockSizeEntry(TypedDict):
"""
A single block size data point.
"""
avgHeight: int
timestamp: int
avgHeight: Height
timestamp: Timestamp
avgSize: int
class BlockWeightEntry(TypedDict):
"""
A single block weight data point.
"""
avgHeight: int
timestamp: int
avgWeight: int
avgHeight: Height
timestamp: Timestamp
avgWeight: Weight
class BlockSizesWeights(TypedDict):
"""
@@ -515,7 +527,7 @@ class BlockStatus(TypedDict):
Attributes:
in_best_chain: Whether this block is in the best chain
height: Block height (only if in best chain)
next_best: Hash of the next block in the best chain (only if in best chain and not tip)
next_best: Hash of the next block in the best chain (null if tip)
"""
in_best_chain: bool
height: Union[Height, None]
@@ -561,6 +573,11 @@ class CostBasisQuery(TypedDict):
class CpfpEntry(TypedDict):
"""
A transaction in a CPFP relationship
Attributes:
txid: Transaction ID
weight: Transaction weight
fee: Transaction fee (sats)
"""
txid: Txid
weight: Weight
@@ -569,10 +586,21 @@ class CpfpEntry(TypedDict):
class CpfpInfo(TypedDict):
"""
CPFP (Child Pays For Parent) information for a transaction
Attributes:
ancestors: Ancestor transactions in the CPFP chain
bestDescendant: Best (highest fee rate) descendant, if any
descendants: Descendant transactions in the CPFP chain
effectiveFeePerVsize: Effective fee rate considering CPFP relationships (sat/vB)
fee: Transaction fee (sats)
adjustedVsize: Adjusted virtual size (accounting for sigops)
"""
ancestors: List[CpfpEntry]
bestDescendant: Union[CpfpEntry, None]
descendants: List[CpfpEntry]
effectiveFeePerVsize: FeeRate
fee: Sats
adjustedVsize: VSize
class DataRangeFormat(TypedDict):
"""
@@ -628,14 +656,16 @@ class DifficultyAdjustment(TypedDict):
Attributes:
progressPercent: Progress through current difficulty epoch (0-100%)
difficultyChange: Estimated difficulty change at next retarget (%)
estimatedRetargetDate: Estimated Unix timestamp of next retarget
estimatedRetargetDate: Estimated timestamp of next retarget (milliseconds)
remainingBlocks: Blocks remaining until retarget
remainingTime: Estimated seconds until retarget
remainingTime: Estimated time until retarget (milliseconds)
previousRetarget: Previous difficulty adjustment (%)
previousTime: Timestamp of most recent retarget (seconds)
nextRetargetHeight: Height of next retarget
timeAvg: Average block time in current epoch (seconds)
adjustedTimeAvg: Time-adjusted average (accounting for timestamp manipulation)
timeAvg: Average block time in current epoch (milliseconds)
adjustedTimeAvg: Time-adjusted average (milliseconds)
timeOffset: Time offset from expected schedule (seconds)
expectedBlocks: Expected blocks based on wall clock time since epoch start
"""
progressPercent: float
difficultyChange: float
@@ -643,10 +673,12 @@ class DifficultyAdjustment(TypedDict):
remainingBlocks: int
remainingTime: int
previousRetarget: float
previousTime: Timestamp
nextRetargetHeight: Height
timeAvg: int
adjustedTimeAvg: int
timeOffset: int
expectedBlocks: float
class DifficultyAdjustmentEntry(TypedDict):
"""
@@ -660,16 +692,18 @@ class DifficultyAdjustmentEntry(TypedDict):
class DifficultyEntry(TypedDict):
"""
A single difficulty data point.
A single difficulty data point in the hashrate summary.
Attributes:
timestamp: Unix timestamp of the difficulty adjustment.
difficulty: Difficulty value.
height: Block height of the adjustment.
time: Unix timestamp of the difficulty adjustment
height: Block height of the adjustment
difficulty: Difficulty value
adjustment: Adjustment ratio (new/previous, e.g. 1.068 = +6.8%)
"""
timestamp: Timestamp
difficulty: float
time: Timestamp
height: Height
difficulty: float
adjustment: float
class DiskUsage(TypedDict):
"""
@@ -974,19 +1008,21 @@ class PoolDetailInfo(TypedDict):
Pool information for detail view
Attributes:
id: Unique pool identifier
id: Pool identifier
name: Pool name
link: Pool website URL
addrs: Known payout addresses
addresses: Known payout addresses
regexes: Coinbase tag patterns (regexes)
slug: URL-friendly pool identifier
unique_id: Unique pool identifier
"""
id: int
name: str
link: str
addrs: List[str]
addresses: List[str]
regexes: List[str]
slug: PoolSlug
unique_id: int
class PoolDetail(TypedDict):
"""
@@ -1053,6 +1089,7 @@ class PoolStats(TypedDict):
emptyBlocks: Number of empty blocks mined
slug: URL-friendly pool identifier
share: Pool's share of total blocks (0.0 - 1.0)
poolUniqueId: Unique pool identifier
"""
poolId: int
name: str
@@ -1062,6 +1099,7 @@ class PoolStats(TypedDict):
emptyBlocks: int
slug: PoolSlug
share: float
poolUniqueId: int
class PoolsSummary(TypedDict):
"""
@@ -1071,10 +1109,14 @@ class PoolsSummary(TypedDict):
pools: List of pools sorted by block count descending
blockCount: Total blocks in the time period
lastEstimatedHashrate: Estimated network hashrate (hashes per second)
lastEstimatedHashrate3d: Estimated network hashrate over last 3 days
lastEstimatedHashrate1w: Estimated network hashrate over last 1 week
"""
pools: List[PoolStats]
blockCount: int
lastEstimatedHashrate: int
lastEstimatedHashrate3d: int
lastEstimatedHashrate1w: int
class Prices(TypedDict):
"""
@@ -1235,21 +1277,26 @@ class TxIn(TypedDict):
Attributes:
txid: Transaction ID of the output being spent
vout: Output index being spent
prevout: Information about the previous output being spent
scriptsig: Signature script (for non-SegWit inputs)
scriptsig: Signature script (hex, for non-SegWit inputs)
scriptsig_asm: Signature script in assembly format
witness: Witness data (hex-encoded stack items, present for SegWit inputs)
is_coinbase: Whether this input is a coinbase (block reward) input
sequence: Input sequence number
inner_redeemscript_asm: Inner redeemscript in assembly format (for P2SH-wrapped SegWit)
inner_redeemscript_asm: Inner redeemscript in assembly (for P2SH-wrapped SegWit: scriptsig + witness both present)
inner_witnessscript_asm: Inner witnessscript in assembly (for P2WSH: last witness item decoded as script)
"""
txid: Txid
vout: Vout
prevout: Union[TxOut, None]
scriptsig: str
scriptsig_asm: str
witness: List[str]
is_coinbase: bool
sequence: int
inner_redeemscript_asm: Optional[str]
inner_redeemscript_asm: str
inner_witnessscript_asm: str
class TxStatus(TypedDict):
"""
@@ -1271,23 +1318,23 @@ class Transaction(TypedDict):
Transaction information compatible with mempool.space API format
Attributes:
vin: Transaction inputs
vout: Transaction outputs
size: Transaction size in bytes
weight: Transaction weight
sigops: Number of signature operations
fee: Transaction fee in satoshis
vin: Transaction inputs
vout: Transaction outputs
"""
index: Union[TxIndex, None]
txid: Txid
version: TxVersion
locktime: RawLockTime
vin: List[TxIn]
vout: List[TxOut]
size: int
weight: Weight
sigops: int
fee: Sats
vin: List[TxIn]
vout: List[TxOut]
status: TxStatus
class TxOutspend(TypedDict):
@@ -4935,6 +4982,8 @@ class SeriesTree_Pools_Minor:
self.parasite: BlocksDominancePattern = BlocksDominancePattern(client, 'parasite')
self.redrockpool: BlocksDominancePattern = BlocksDominancePattern(client, 'redrockpool')
self.est3lar: BlocksDominancePattern = BlocksDominancePattern(client, 'est3lar')
self.braiinssolo: BlocksDominancePattern = BlocksDominancePattern(client, 'braiinssolo')
self.solopool: BlocksDominancePattern = BlocksDominancePattern(client, 'solopool')
class SeriesTree_Pools:
"""Series tree node."""
@@ -5952,6 +6001,7 @@ class BrkClient(BrkClientBase):
"bixin": "Bixin",
"blockfills": "BlockFills",
"braiinspool": "Braiins Pool",
"braiinssolo": "Braiins Solo",
"bravomining": "Bravo Mining",
"btcc": "BTCC",
"btccom": "BTC.com",
@@ -6063,6 +6113,7 @@ class BrkClient(BrkClientBase):
"sigmapoolcom": "Sigmapool.com",
"simplecoinus": "simplecoin.us",
"solock": "Solo CK",
"solopool": "SoloPool.com",
"spiderpool": "SpiderPool",
"stminingcorp": "ST Mining Corp",
"tangpool": "Tangpool",