global: snapshot

This commit is contained in:
nym21
2026-03-31 22:53:25 +02:00
parent d038141a8a
commit ae26db6df2
83 changed files with 3398 additions and 710 deletions

20
Cargo.lock generated
View File

@@ -444,7 +444,6 @@ dependencies = [
"brk_computer", "brk_computer",
"brk_error", "brk_error",
"brk_indexer", "brk_indexer",
"brk_iterator",
"brk_logger", "brk_logger",
"brk_mempool", "brk_mempool",
"brk_query", "brk_query",
@@ -494,12 +493,10 @@ dependencies = [
"brk_cohort", "brk_cohort",
"brk_error", "brk_error",
"brk_indexer", "brk_indexer",
"brk_iterator",
"brk_logger", "brk_logger",
"brk_oracle", "brk_oracle",
"brk_reader", "brk_reader",
"brk_rpc", "brk_rpc",
"brk_store",
"brk_traversable", "brk_traversable",
"brk_types", "brk_types",
"color-eyre", "color-eyre",
@@ -553,7 +550,6 @@ dependencies = [
"brk_bencher", "brk_bencher",
"brk_cohort", "brk_cohort",
"brk_error", "brk_error",
"brk_iterator",
"brk_logger", "brk_logger",
"brk_reader", "brk_reader",
"brk_rpc", "brk_rpc",
@@ -2545,9 +2541,7 @@ dependencies = [
[[package]] [[package]]
name = "rawdb" name = "rawdb"
version = "0.9.0" version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fddb06a11fcc5f7f44d9b5bee4ab61b5a1135232b2fd239253428abd192ba504"
dependencies = [ dependencies = [
"libc", "libc",
"log", "log",
@@ -3439,9 +3433,7 @@ checksum = "8f54a172d0620933a27a4360d3db3e2ae0dd6cceae9730751a036bbf182c4b23"
[[package]] [[package]]
name = "vecdb" name = "vecdb"
version = "0.9.0" version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a33f1cbef9bf38048ee1b51328366f0a734e06bcc0b9739d68fef9ecce43d0b8"
dependencies = [ dependencies = [
"itoa", "itoa",
"libc", "libc",
@@ -3462,9 +3454,7 @@ dependencies = [
[[package]] [[package]]
name = "vecdb_derive" name = "vecdb_derive"
version = "0.9.0" version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33d31f03d1c7269d65195fb4d54c1d510b124807871bd11af7d10a08700d7590"
dependencies = [ dependencies = [
"quote", "quote",
"syn", "syn",
@@ -3788,9 +3778,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]] [[package]]
name = "winnow" name = "winnow"
version = "1.0.0" version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a90e88e4667264a994d34e6d1ab2d26d398dcdca8b7f52bec8668957517fc7d8" checksum = "09dac053f1cd375980747450bfc7250c264eaae0583872e845c0c7cd578872b5"
[[package]] [[package]]
name = "wio" name = "wio"

View File

@@ -87,8 +87,8 @@ tower-http = { version = "0.6.8", features = ["catch-panic", "compression-br", "
tower-layer = "0.3" tower-layer = "0.3"
tracing = { version = "0.1", default-features = false, features = ["std"] } tracing = { version = "0.1", default-features = false, features = ["std"] }
ureq = { version = "3.3.0", features = ["json"] } ureq = { version = "3.3.0", features = ["json"] }
vecdb = { version = "0.9.0", features = ["derive", "serde_json", "pco", "schemars"] } # vecdb = { version = "0.9.1", features = ["derive", "serde_json", "pco", "schemars"] }
# vecdb = { path = "../anydb/crates/vecdb", features = ["derive", "serde_json", "pco", "schemars"] } vecdb = { path = "../anydb/crates/vecdb", features = ["derive", "serde_json", "pco", "schemars"] }
[workspace.metadata.release] [workspace.metadata.release]
shared-version = true shared-version = true

View File

@@ -82,18 +82,19 @@ pub fn generate_api_methods(output: &mut String, endpoints: &[Endpoint]) {
} else { } else {
writeln!(output, " const params = new URLSearchParams();").unwrap(); writeln!(output, " const params = new URLSearchParams();").unwrap();
for param in &endpoint.query_params { for param in &endpoint.query_params {
let ident = sanitize_ident(&param.name);
if param.required { if param.required {
writeln!( writeln!(
output, output,
" params.set('{}', String({}));", " params.set('{}', String({}));",
param.name, param.name param.name, ident
) )
.unwrap(); .unwrap();
} else { } else {
writeln!( writeln!(
output, output,
" if ({} !== undefined) params.set('{}', String({}));", " if ({} !== undefined) params.set('{}', String({}));",
param.name, param.name, param.name ident, param.name, ident
) )
.unwrap(); .unwrap();
} }
@@ -127,14 +128,19 @@ fn endpoint_to_method_name(endpoint: &Endpoint) -> String {
fn build_method_params(endpoint: &Endpoint) -> String { fn build_method_params(endpoint: &Endpoint) -> String {
let mut params = Vec::new(); let mut params = Vec::new();
for param in &endpoint.path_params { for param in &endpoint.path_params {
params.push(param.name.clone()); params.push(sanitize_ident(&param.name));
} }
for param in &endpoint.query_params { for param in &endpoint.query_params {
params.push(param.name.clone()); params.push(sanitize_ident(&param.name));
} }
params.join(", ") params.join(", ")
} }
/// Strip characters invalid in JS identifiers (e.g. `[]` from `txId[]`).
fn sanitize_ident(name: &str) -> String {
name.replace(['[', ']'], "")
}
fn build_path_template(path: &str, path_params: &[Parameter]) -> String { fn build_path_template(path: &str, path_params: &[Parameter]) -> String {
let mut result = path.to_string(); let mut result = path.to_string();
for param in path_params { for param in path_params {

View File

@@ -143,18 +143,19 @@ pub fn generate_api_methods(output: &mut String, endpoints: &[Endpoint]) {
} else { } else {
writeln!(output, " let mut query = Vec::new();").unwrap(); writeln!(output, " let mut query = Vec::new();").unwrap();
for param in &endpoint.query_params { for param in &endpoint.query_params {
let ident = sanitize_ident(&param.name);
if param.required { if param.required {
writeln!( writeln!(
output, output,
" query.push(format!(\"{}={{}}\", {}));", " query.push(format!(\"{}={{}}\", {}));",
param.name, param.name param.name, ident
) )
.unwrap(); .unwrap();
} else { } else {
writeln!( writeln!(
output, output,
" if let Some(v) = {} {{ query.push(format!(\"{}={{}}\", v)); }}", " if let Some(v) = {} {{ query.push(format!(\"{}={{}}\", v)); }}",
param.name, param.name ident, param.name
) )
.unwrap(); .unwrap();
} }
@@ -198,26 +199,35 @@ fn build_method_params(endpoint: &Endpoint) -> String {
let mut params = Vec::new(); let mut params = Vec::new();
for param in &endpoint.path_params { for param in &endpoint.path_params {
let rust_type = param_type_to_rust(&param.param_type); let rust_type = param_type_to_rust(&param.param_type);
params.push(format!(", {}: {}", param.name, rust_type)); params.push(format!(", {}: {}", sanitize_ident(&param.name), rust_type));
} }
for param in &endpoint.query_params { for param in &endpoint.query_params {
let rust_type = param_type_to_rust(&param.param_type); let rust_type = param_type_to_rust(&param.param_type);
let name = sanitize_ident(&param.name);
if param.required { if param.required {
params.push(format!(", {}: {}", param.name, rust_type)); params.push(format!(", {}: {}", name, rust_type));
} else { } else {
params.push(format!(", {}: Option<{}>", param.name, rust_type)); params.push(format!(", {}: Option<{}>", name, rust_type));
} }
} }
params.join("") params.join("")
} }
/// Strip characters invalid in Rust identifiers (e.g. `[]` from `txId[]`).
fn sanitize_ident(name: &str) -> String {
name.replace(['[', ']'], "")
}
/// Convert parameter type to Rust type for function signatures. /// Convert parameter type to Rust type for function signatures.
fn param_type_to_rust(param_type: &str) -> String { fn param_type_to_rust(param_type: &str) -> String {
if let Some(inner) = param_type.strip_suffix("[]") {
return format!("&[{}]", param_type_to_rust(inner));
}
match param_type { match param_type {
"string" | "*" => "&str".to_string(), "string" | "*" => "&str".to_string(),
"integer" | "number" => "i64".to_string(), "integer" | "number" => "i64".to_string(),
"boolean" => "bool".to_string(), "boolean" => "bool".to_string(),
other => other.to_string(), // Domain types like Index, SeriesName, Format other => other.to_string(),
} }
} }

View File

@@ -74,6 +74,9 @@ pub fn escape_python_keyword(name: &str) -> String {
"try", "while", "with", "yield", "try", "while", "with", "yield",
]; ];
// Strip characters invalid in identifiers (e.g. `[]` from `txId[]`)
let name = name.replace(['[', ']'], "");
// Prefix with underscore if starts with digit // Prefix with underscore if starts with digit
let name = if name.starts_with(|c: char| c.is_ascii_digit()) { let name = if name.starts_with(|c: char| c.is_ascii_digit()) {
format!("_{}", name) format!("_{}", name)

View File

@@ -13,7 +13,6 @@ brk_alloc = { workspace = true }
brk_computer = { workspace = true } brk_computer = { workspace = true }
brk_error = { workspace = true, features = ["tokio", "vecdb"] } brk_error = { workspace = true, features = ["tokio", "vecdb"] }
brk_indexer = { workspace = true } brk_indexer = { workspace = true }
brk_iterator = { workspace = true }
brk_logger = { workspace = true } brk_logger = { workspace = true }
brk_mempool = { workspace = true } brk_mempool = { workspace = true }
brk_query = { workspace = true } brk_query = { workspace = true }

View File

@@ -10,7 +10,6 @@ use brk_alloc::Mimalloc;
use brk_computer::Computer; use brk_computer::Computer;
use brk_error::Result; use brk_error::Result;
use brk_indexer::Indexer; use brk_indexer::Indexer;
use brk_iterator::Blocks;
use brk_mempool::Mempool; use brk_mempool::Mempool;
use brk_query::AsyncQuery; use brk_query::AsyncQuery;
use brk_reader::Reader; use brk_reader::Reader;
@@ -37,8 +36,6 @@ pub fn main() -> anyhow::Result<()> {
let reader = Reader::new(config.blocksdir(), &client); let reader = Reader::new(config.blocksdir(), &client);
let blocks = Blocks::new(&client, &reader);
let mut indexer = Indexer::forced_import(&config.brkdir())?; let mut indexer = Indexer::forced_import(&config.brkdir())?;
#[cfg(not(debug_assertions))] #[cfg(not(debug_assertions))]
@@ -52,7 +49,7 @@ pub fn main() -> anyhow::Result<()> {
info!("Indexing {blocks_behind} blocks before starting server..."); info!("Indexing {blocks_behind} blocks before starting server...");
info!("---"); info!("---");
sleep(Duration::from_secs(10)); sleep(Duration::from_secs(10));
indexer.index(&blocks, &client, &exit)?; indexer.index(&reader, &client, &exit)?;
drop(indexer); drop(indexer);
Mimalloc::collect(); Mimalloc::collect();
indexer = Indexer::forced_import(&config.brkdir())?; indexer = Indexer::forced_import(&config.brkdir())?;
@@ -102,14 +99,14 @@ pub fn main() -> anyhow::Result<()> {
let total_start = Instant::now(); let total_start = Instant::now();
let starting_indexes = if cfg!(debug_assertions) { let starting_indexes = if cfg!(debug_assertions) {
indexer.checked_index(&blocks, &client, &exit)? indexer.checked_index(&reader, &client, &exit)?
} else { } else {
indexer.index(&blocks, &client, &exit)? indexer.index(&reader, &client, &exit)?
}; };
Mimalloc::collect(); Mimalloc::collect();
computer.compute(&indexer, starting_indexes, &reader, &exit)?; computer.compute(&indexer, starting_indexes, &exit)?;
info!("Total time: {:?}", total_start.elapsed()); info!("Total time: {:?}", total_start.elapsed());
info!("Waiting for new blocks..."); info!("Waiting for new blocks...");

View File

@@ -13,6 +13,7 @@ use serde::de::DeserializeOwned;
pub use brk_cohort::*; pub use brk_cohort::*;
pub use brk_types::*; pub use brk_types::*;
/// Error type for BRK client operations. /// Error type for BRK client operations.
#[derive(Debug)] #[derive(Debug)]
pub struct BrkError { pub struct BrkError {
@@ -3124,7 +3125,6 @@ pub struct SeriesTree {
pub addrs: SeriesTree_Addrs, pub addrs: SeriesTree_Addrs,
pub scripts: SeriesTree_Scripts, pub scripts: SeriesTree_Scripts,
pub mining: SeriesTree_Mining, pub mining: SeriesTree_Mining,
pub positions: SeriesTree_Positions,
pub cointime: SeriesTree_Cointime, pub cointime: SeriesTree_Cointime,
pub constants: SeriesTree_Constants, pub constants: SeriesTree_Constants,
pub indexes: SeriesTree_Indexes, pub indexes: SeriesTree_Indexes,
@@ -3147,7 +3147,6 @@ impl SeriesTree {
addrs: SeriesTree_Addrs::new(client.clone(), format!("{base_path}_addrs")), addrs: SeriesTree_Addrs::new(client.clone(), format!("{base_path}_addrs")),
scripts: SeriesTree_Scripts::new(client.clone(), format!("{base_path}_scripts")), scripts: SeriesTree_Scripts::new(client.clone(), format!("{base_path}_scripts")),
mining: SeriesTree_Mining::new(client.clone(), format!("{base_path}_mining")), mining: SeriesTree_Mining::new(client.clone(), format!("{base_path}_mining")),
positions: SeriesTree_Positions::new(client.clone(), format!("{base_path}_positions")),
cointime: SeriesTree_Cointime::new(client.clone(), format!("{base_path}_cointime")), cointime: SeriesTree_Cointime::new(client.clone(), format!("{base_path}_cointime")),
constants: SeriesTree_Constants::new(client.clone(), format!("{base_path}_constants")), constants: SeriesTree_Constants::new(client.clone(), format!("{base_path}_constants")),
indexes: SeriesTree_Indexes::new(client.clone(), format!("{base_path}_indexes")), indexes: SeriesTree_Indexes::new(client.clone(), format!("{base_path}_indexes")),
@@ -3165,10 +3164,14 @@ impl SeriesTree {
/// Series tree node. /// Series tree node.
pub struct SeriesTree_Blocks { pub struct SeriesTree_Blocks {
pub blockhash: SeriesPattern18<BlockHash>, pub blockhash: SeriesPattern18<BlockHash>,
pub coinbase_tag: SeriesPattern18<CoinbaseTag>,
pub difficulty: SeriesTree_Blocks_Difficulty, pub difficulty: SeriesTree_Blocks_Difficulty,
pub time: SeriesTree_Blocks_Time, pub time: SeriesTree_Blocks_Time,
pub size: SeriesTree_Blocks_Size, pub size: SeriesTree_Blocks_Size,
pub weight: AverageBaseCumulativeMaxMedianMinPct10Pct25Pct75Pct90SumPattern<Weight>, pub weight: AverageBaseCumulativeMaxMedianMinPct10Pct25Pct75Pct90SumPattern<Weight>,
pub segwit_txs: SeriesPattern18<StoredU32>,
pub segwit_size: SeriesPattern18<StoredU64>,
pub segwit_weight: SeriesPattern18<Weight>,
pub count: SeriesTree_Blocks_Count, pub count: SeriesTree_Blocks_Count,
pub lookback: SeriesTree_Blocks_Lookback, pub lookback: SeriesTree_Blocks_Lookback,
pub interval: SeriesTree_Blocks_Interval, pub interval: SeriesTree_Blocks_Interval,
@@ -3181,10 +3184,14 @@ impl SeriesTree_Blocks {
pub fn new(client: Arc<BrkClientBase>, base_path: String) -> Self { pub fn new(client: Arc<BrkClientBase>, base_path: String) -> Self {
Self { Self {
blockhash: SeriesPattern18::new(client.clone(), "blockhash".to_string()), blockhash: SeriesPattern18::new(client.clone(), "blockhash".to_string()),
coinbase_tag: SeriesPattern18::new(client.clone(), "coinbase_tag".to_string()),
difficulty: SeriesTree_Blocks_Difficulty::new(client.clone(), format!("{base_path}_difficulty")), difficulty: SeriesTree_Blocks_Difficulty::new(client.clone(), format!("{base_path}_difficulty")),
time: SeriesTree_Blocks_Time::new(client.clone(), format!("{base_path}_time")), time: SeriesTree_Blocks_Time::new(client.clone(), format!("{base_path}_time")),
size: SeriesTree_Blocks_Size::new(client.clone(), format!("{base_path}_size")), size: SeriesTree_Blocks_Size::new(client.clone(), format!("{base_path}_size")),
weight: AverageBaseCumulativeMaxMedianMinPct10Pct25Pct75Pct90SumPattern::new(client.clone(), "block_weight".to_string()), weight: AverageBaseCumulativeMaxMedianMinPct10Pct25Pct75Pct90SumPattern::new(client.clone(), "block_weight".to_string()),
segwit_txs: SeriesPattern18::new(client.clone(), "segwit_txs".to_string()),
segwit_size: SeriesPattern18::new(client.clone(), "segwit_size".to_string()),
segwit_weight: SeriesPattern18::new(client.clone(), "segwit_weight".to_string()),
count: SeriesTree_Blocks_Count::new(client.clone(), format!("{base_path}_count")), count: SeriesTree_Blocks_Count::new(client.clone(), format!("{base_path}_count")),
lookback: SeriesTree_Blocks_Lookback::new(client.clone(), format!("{base_path}_lookback")), lookback: SeriesTree_Blocks_Lookback::new(client.clone(), format!("{base_path}_lookback")),
interval: SeriesTree_Blocks_Interval::new(client.clone(), format!("{base_path}_interval")), interval: SeriesTree_Blocks_Interval::new(client.clone(), format!("{base_path}_interval")),
@@ -3538,6 +3545,7 @@ pub struct SeriesTree_Transactions_Fees {
pub output_value: SeriesPattern19<Sats>, pub output_value: SeriesPattern19<Sats>,
pub fee: _6bBlockTxPattern<Sats>, pub fee: _6bBlockTxPattern<Sats>,
pub fee_rate: _6bBlockTxPattern<FeeRate>, pub fee_rate: _6bBlockTxPattern<FeeRate>,
pub effective_fee_rate: _6bBlockTxPattern<FeeRate>,
} }
impl SeriesTree_Transactions_Fees { impl SeriesTree_Transactions_Fees {
@@ -3547,6 +3555,7 @@ impl SeriesTree_Transactions_Fees {
output_value: SeriesPattern19::new(client.clone(), "output_value".to_string()), output_value: SeriesPattern19::new(client.clone(), "output_value".to_string()),
fee: _6bBlockTxPattern::new(client.clone(), "fee".to_string()), fee: _6bBlockTxPattern::new(client.clone(), "fee".to_string()),
fee_rate: _6bBlockTxPattern::new(client.clone(), "fee_rate".to_string()), fee_rate: _6bBlockTxPattern::new(client.clone(), "fee_rate".to_string()),
effective_fee_rate: _6bBlockTxPattern::new(client.clone(), "effective_fee_rate".to_string()),
} }
} }
} }
@@ -4179,6 +4188,7 @@ pub struct SeriesTree_Mining_Rewards {
pub coinbase: AverageBlockCumulativeSumPattern3, pub coinbase: AverageBlockCumulativeSumPattern3,
pub subsidy: SeriesTree_Mining_Rewards_Subsidy, pub subsidy: SeriesTree_Mining_Rewards_Subsidy,
pub fees: SeriesTree_Mining_Rewards_Fees, pub fees: SeriesTree_Mining_Rewards_Fees,
pub output_volume: SeriesPattern18<Sats>,
pub unclaimed: BlockCumulativePattern, pub unclaimed: BlockCumulativePattern,
} }
@@ -4188,6 +4198,7 @@ impl SeriesTree_Mining_Rewards {
coinbase: AverageBlockCumulativeSumPattern3::new(client.clone(), "coinbase".to_string()), coinbase: AverageBlockCumulativeSumPattern3::new(client.clone(), "coinbase".to_string()),
subsidy: SeriesTree_Mining_Rewards_Subsidy::new(client.clone(), format!("{base_path}_subsidy")), subsidy: SeriesTree_Mining_Rewards_Subsidy::new(client.clone(), format!("{base_path}_subsidy")),
fees: SeriesTree_Mining_Rewards_Fees::new(client.clone(), format!("{base_path}_fees")), fees: SeriesTree_Mining_Rewards_Fees::new(client.clone(), format!("{base_path}_fees")),
output_volume: SeriesPattern18::new(client.clone(), "output_volume".to_string()),
unclaimed: BlockCumulativePattern::new(client.clone(), "unclaimed_rewards".to_string()), unclaimed: BlockCumulativePattern::new(client.clone(), "unclaimed_rewards".to_string()),
} }
} }
@@ -4325,17 +4336,6 @@ impl SeriesTree_Mining_Hashrate_Rate_Sma {
} }
} }
/// Series tree node.
pub struct SeriesTree_Positions {
}
impl SeriesTree_Positions {
pub fn new(client: Arc<BrkClientBase>, base_path: String) -> Self {
Self {
}
}
}
/// Series tree node. /// Series tree node.
pub struct SeriesTree_Cointime { pub struct SeriesTree_Cointime {
pub activity: SeriesTree_Cointime_Activity, pub activity: SeriesTree_Cointime_Activity,
@@ -8320,14 +8320,14 @@ impl BrkClient {
self.base.get_json(&format!("/api/address/{address}/utxo")) self.base.get_json(&format!("/api/address/{address}/utxo"))
} }
/// Block by height /// Block hash by height
/// ///
/// Retrieve block information by block height. Returns block metadata including hash, timestamp, difficulty, size, weight, and transaction count. /// Retrieve the block hash at a given height. Returns the hash as plain text.
/// ///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-height)* /// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-height)*
/// ///
/// Endpoint: `GET /api/block-height/{height}` /// Endpoint: `GET /api/block-height/{height}`
pub fn get_block_by_height(&self, height: Height) -> Result<BlockInfo> { pub fn get_block_by_height(&self, height: Height) -> Result<BlockHash> {
self.base.get_json(&format!("/api/block-height/{height}")) self.base.get_json(&format!("/api/block-height/{height}"))
} }
@@ -8342,6 +8342,17 @@ impl BrkClient {
self.base.get_json(&format!("/api/block/{hash}")) self.base.get_json(&format!("/api/block/{hash}"))
} }
/// Block header
///
/// Returns the hex-encoded block header.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-header)*
///
/// Endpoint: `GET /api/block/{hash}/header`
pub fn get_block_header(&self, hash: BlockHash) -> Result<Hex> {
self.base.get_json(&format!("/api/block/{hash}/header"))
}
/// Raw block /// Raw block
/// ///
/// Returns the raw block data in binary format. /// Returns the raw block data in binary format.
@@ -8408,6 +8419,28 @@ impl BrkClient {
self.base.get_json(&format!("/api/blocks")) self.base.get_json(&format!("/api/blocks"))
} }
/// Block tip hash
///
/// Returns the hash of the last block.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-tip-hash)*
///
/// Endpoint: `GET /api/blocks/tip/hash`
pub fn get_block_tip_hash(&self) -> Result<BlockHash> {
self.base.get_json(&format!("/api/blocks/tip/hash"))
}
/// Block tip height
///
/// Returns the height of the last block.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-tip-height)*
///
/// Endpoint: `GET /api/blocks/tip/height`
pub fn get_block_tip_height(&self) -> Result<Height> {
self.base.get_json(&format!("/api/blocks/tip/height"))
}
/// Blocks from height /// Blocks from height
/// ///
/// Retrieve up to 10 blocks going backwards from the given height. For example, height=100 returns blocks 100, 99, 98, ..., 91. Height=0 returns only block 0. /// Retrieve up to 10 blocks going backwards from the given height. For example, height=100 returns blocks 100, 99, 98, ..., 91. Height=0 returns only block 0.
@@ -8425,9 +8458,9 @@ impl BrkClient {
/// ///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-mempool)* /// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-mempool)*
/// ///
/// Endpoint: `GET /api/mempool/info` /// Endpoint: `GET /api/mempool`
pub fn get_mempool(&self) -> Result<MempoolInfo> { pub fn get_mempool(&self) -> Result<MempoolInfo> {
self.base.get_json(&format!("/api/mempool/info")) self.base.get_json(&format!("/api/mempool"))
} }
/// Live BTC/USD price /// Live BTC/USD price
@@ -8439,6 +8472,17 @@ impl BrkClient {
self.base.get_json(&format!("/api/mempool/price")) self.base.get_json(&format!("/api/mempool/price"))
} }
/// Recent mempool transactions
///
/// Get the last 10 transactions to enter the mempool.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-mempool-recent)*
///
/// Endpoint: `GET /api/mempool/recent`
pub fn get_mempool_recent(&self) -> Result<Vec<MempoolRecentTx>> {
self.base.get_json(&format!("/api/mempool/recent"))
}
/// Mempool transaction IDs /// Mempool transaction IDs
/// ///
/// Get all transaction IDs currently in the mempool. /// Get all transaction IDs currently in the mempool.
@@ -8679,6 +8723,17 @@ impl BrkClient {
self.base.get_json(&format!("/api/tx/{txid}/hex")) self.base.get_json(&format!("/api/tx/{txid}/hex"))
} }
/// Transaction merkle proof
///
/// Get the merkle inclusion proof for a transaction.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-transaction-merkle-proof)*
///
/// Endpoint: `GET /api/tx/{txid}/merkle-proof`
pub fn get_tx_merkle_proof(&self, txid: Txid) -> Result<MerkleProof> {
self.base.get_json(&format!("/api/tx/{txid}/merkle-proof"))
}
/// Output spend status /// Output spend status
/// ///
/// Get the spending status of a transaction output. Returns whether the output has been spent and, if so, the spending transaction details. /// Get the spending status of a transaction output. Returns whether the output has been spent and, if so, the spending transaction details.
@@ -8701,6 +8756,17 @@ impl BrkClient {
self.base.get_json(&format!("/api/tx/{txid}/outspends")) self.base.get_json(&format!("/api/tx/{txid}/outspends"))
} }
/// Transaction raw
///
/// Returns a transaction as binary data.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-transaction-raw)*
///
/// Endpoint: `GET /api/tx/{txid}/raw`
pub fn get_tx_raw(&self, txid: Txid) -> Result<Vec<f64>> {
self.base.get_json(&format!("/api/tx/{txid}/raw"))
}
/// Transaction status /// Transaction status
/// ///
/// Retrieve the confirmation status of a transaction. Returns whether the transaction is confirmed and, if so, the block height, hash, and timestamp. /// Retrieve the confirmation status of a transaction. Returns whether the transaction is confirmed and, if so, the block height, hash, and timestamp.
@@ -8712,6 +8778,50 @@ impl BrkClient {
self.base.get_json(&format!("/api/tx/{txid}/status")) self.base.get_json(&format!("/api/tx/{txid}/status"))
} }
/// Block (v1)
///
/// Returns block details with extras by hash.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-v1)*
///
/// Endpoint: `GET /api/v1/block/{hash}`
pub fn get_block_v1(&self, hash: BlockHash) -> Result<BlockInfoV1> {
self.base.get_json(&format!("/api/v1/block/{hash}"))
}
/// Recent blocks with extras
///
/// Retrieve the last 10 blocks with extended data including pool identification and fee statistics.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-blocks-v1)*
///
/// Endpoint: `GET /api/v1/blocks`
pub fn get_blocks_v1(&self) -> Result<Vec<BlockInfoV1>> {
self.base.get_json(&format!("/api/v1/blocks"))
}
/// Blocks from height with extras
///
/// Retrieve up to 10 blocks with extended data going backwards from the given height.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-blocks-v1)*
///
/// Endpoint: `GET /api/v1/blocks/{height}`
pub fn get_blocks_v1_from_height(&self, height: Height) -> Result<Vec<BlockInfoV1>> {
self.base.get_json(&format!("/api/v1/blocks/{height}"))
}
/// CPFP info
///
/// Returns ancestors and descendants for a CPFP transaction.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-children-pay-for-parent)*
///
/// Endpoint: `GET /api/v1/cpfp/{txid}`
pub fn get_cpfp(&self, txid: Txid) -> Result<CpfpInfo> {
self.base.get_json(&format!("/api/v1/cpfp/{txid}"))
}
/// Difficulty adjustment /// Difficulty adjustment
/// ///
/// Get current difficulty adjustment information including progress through the current epoch, estimated retarget date, and difficulty change prediction. /// Get current difficulty adjustment information including progress through the current epoch, estimated retarget date, and difficulty change prediction.
@@ -8734,6 +8844,17 @@ impl BrkClient {
self.base.get_json(&format!("/api/v1/fees/mempool-blocks")) self.base.get_json(&format!("/api/v1/fees/mempool-blocks"))
} }
/// Precise recommended fees
///
/// Get recommended fee rates with up to 3 decimal places, including sub-sat feerates.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-recommended-fees-precise)*
///
/// Endpoint: `GET /api/v1/fees/precise`
pub fn get_precise_fees(&self) -> Result<RecommendedFees> {
self.base.get_json(&format!("/api/v1/fees/precise"))
}
/// Recommended fees /// Recommended fees
/// ///
/// Get recommended fee rates for different confirmation targets based on current mempool state. /// Get recommended fee rates for different confirmation targets based on current mempool state.
@@ -8745,6 +8866,21 @@ impl BrkClient {
self.base.get_json(&format!("/api/v1/fees/recommended")) self.base.get_json(&format!("/api/v1/fees/recommended"))
} }
/// Historical price
///
/// Get historical BTC/USD price. Optionally specify a UNIX timestamp to get the price at that time.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-historical-price)*
///
/// Endpoint: `GET /api/v1/historical-price`
pub fn get_historical_price(&self, timestamp: Option<Timestamp>) -> Result<HistoricalPrice> {
let mut query = Vec::new();
if let Some(v) = timestamp { query.push(format!("timestamp={}", v)); }
let query_str = if query.is_empty() { String::new() } else { format!("?{}", query.join("&")) };
let path = format!("/api/v1/historical-price{}", query_str);
self.base.get_json(&path)
}
/// Block fee rates (WIP) /// Block fee rates (WIP)
/// ///
/// **Work in progress.** Get block fee rate percentiles (min, 10th, 25th, median, 75th, 90th, max) for a time period. Valid periods: 24h, 3d, 1w, 1m, 3m, 6m, 1y, 2y, 3y /// **Work in progress.** Get block fee rate percentiles (min, 10th, 25th, median, 75th, 90th, max) for a time period. Valid periods: 24h, 3d, 1w, 1m, 3m, 6m, 1y, 2y, 3y
@@ -8833,6 +8969,28 @@ impl BrkClient {
self.base.get_json(&format!("/api/v1/mining/hashrate")) self.base.get_json(&format!("/api/v1/mining/hashrate"))
} }
/// All pools hashrate (all time)
///
/// Get hashrate data for all mining pools.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-hashrates)*
///
/// Endpoint: `GET /api/v1/mining/hashrate/pools`
pub fn get_pools_hashrate(&self) -> Result<Vec<PoolHashrateEntry>> {
self.base.get_json(&format!("/api/v1/mining/hashrate/pools"))
}
/// All pools hashrate
///
/// Get hashrate data for all mining pools for a time period. Valid periods: 1m, 3m, 6m, 1y, 2y, 3y
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-hashrates)*
///
/// Endpoint: `GET /api/v1/mining/hashrate/pools/{time_period}`
pub fn get_pools_hashrate_by_period(&self, time_period: TimePeriod) -> Result<Vec<PoolHashrateEntry>> {
self.base.get_json(&format!("/api/v1/mining/hashrate/pools/{time_period}"))
}
/// Network hashrate /// Network hashrate
/// ///
/// Get network hashrate and difficulty data for a time period. Valid periods: 24h, 3d, 1w, 1m, 3m, 6m, 1y, 2y, 3y /// Get network hashrate and difficulty data for a time period. Valid periods: 24h, 3d, 1w, 1m, 3m, 6m, 1y, 2y, 3y
@@ -8855,6 +9013,39 @@ impl BrkClient {
self.base.get_json(&format!("/api/v1/mining/pool/{slug}")) self.base.get_json(&format!("/api/v1/mining/pool/{slug}"))
} }
/// Mining pool blocks
///
/// Get the 10 most recent blocks mined by a specific pool.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-blocks)*
///
/// Endpoint: `GET /api/v1/mining/pool/{slug}/blocks`
pub fn get_pool_blocks(&self, slug: PoolSlug) -> Result<Vec<BlockInfoV1>> {
self.base.get_json(&format!("/api/v1/mining/pool/{slug}/blocks"))
}
/// Mining pool blocks from height
///
/// Get 10 blocks mined by a specific pool before (and including) the given height.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-blocks)*
///
/// Endpoint: `GET /api/v1/mining/pool/{slug}/blocks/{height}`
pub fn get_pool_blocks_from(&self, slug: PoolSlug, height: Height) -> Result<Vec<BlockInfoV1>> {
self.base.get_json(&format!("/api/v1/mining/pool/{slug}/blocks/{height}"))
}
/// Mining pool hashrate
///
/// Get hashrate history for a specific mining pool.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-hashrate)*
///
/// Endpoint: `GET /api/v1/mining/pool/{slug}/hashrate`
pub fn get_pool_hashrate(&self, slug: PoolSlug) -> Result<Vec<PoolHashrateEntry>> {
self.base.get_json(&format!("/api/v1/mining/pool/{slug}/hashrate"))
}
/// List all mining pools /// List all mining pools
/// ///
/// Get list of all known mining pools with their identifiers. /// Get list of all known mining pools with their identifiers.
@@ -8888,6 +9079,21 @@ impl BrkClient {
self.base.get_json(&format!("/api/v1/mining/reward-stats/{block_count}")) self.base.get_json(&format!("/api/v1/mining/reward-stats/{block_count}"))
} }
/// Transaction first-seen times
///
/// Returns timestamps when transactions were first seen in the mempool. Returns 0 for mined or unknown transactions.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-transaction-times)*
///
/// Endpoint: `GET /api/v1/transaction-times`
pub fn get_transaction_times(&self, txId: Txid[]) -> Result<Vec<f64>> {
let mut query = Vec::new();
query.push(format!("txId[]={}", txId));
let query_str = if query.is_empty() { String::new() } else { format!("?{}", query.join("&")) };
let path = format!("/api/v1/transaction-times{}", query_str);
self.base.get_json(&path)
}
/// Validate address /// Validate address
/// ///
/// Validate a Bitcoin address and get information about its type and scriptPubKey. /// Validate a Bitcoin address and get information about its type and scriptPubKey.

View File

@@ -14,3 +14,6 @@ brk_traversable = { workspace = true }
vecdb = { workspace = true } vecdb = { workspace = true }
rayon = { workspace = true } rayon = { workspace = true }
serde = { workspace = true } serde = { workspace = true }
[package.metadata.cargo-machete]
ignored = ["vecdb"]

View File

@@ -14,11 +14,8 @@ brk_error = { workspace = true, features = ["vecdb"] }
brk_cohort = { workspace = true } brk_cohort = { workspace = true }
brk_indexer = { workspace = true } brk_indexer = { workspace = true }
brk_oracle = { workspace = true } brk_oracle = { workspace = true }
brk_iterator = { workspace = true }
brk_logger = { workspace = true } brk_logger = { workspace = true }
brk_reader = { workspace = true }
brk_rpc = { workspace = true, features = ["corepc"] } brk_rpc = { workspace = true, features = ["corepc"] }
brk_store = { workspace = true }
brk_traversable = { workspace = true } brk_traversable = { workspace = true }
brk_types = { workspace = true } brk_types = { workspace = true }
derive_more = { workspace = true } derive_more = { workspace = true }
@@ -33,6 +30,7 @@ smallvec = { workspace = true }
vecdb = { workspace = true } vecdb = { workspace = true }
[dev-dependencies] [dev-dependencies]
brk_reader = { workspace = true }
brk_alloc = { workspace = true } brk_alloc = { workspace = true }
brk_bencher = { workspace = true } brk_bencher = { workspace = true }
color-eyre = { workspace = true } color-eyre = { workspace = true }

View File

@@ -8,7 +8,6 @@ use std::{
use brk_alloc::Mimalloc; use brk_alloc::Mimalloc;
use brk_computer::Computer; use brk_computer::Computer;
use brk_indexer::Indexer; use brk_indexer::Indexer;
use brk_iterator::Blocks;
use brk_reader::Reader; use brk_reader::Reader;
use brk_rpc::{Auth, Client}; use brk_rpc::{Auth, Client};
use vecdb::Exit; use vecdb::Exit;
@@ -31,8 +30,6 @@ pub fn main() -> color_eyre::Result<()> {
let reader = Reader::new(bitcoin_dir.join("blocks"), &client); let reader = Reader::new(bitcoin_dir.join("blocks"), &client);
let blocks = Blocks::new(&client, &reader);
let mut indexer = Indexer::forced_import(&outputs_dir)?; let mut indexer = Indexer::forced_import(&outputs_dir)?;
let exit = Exit::new(); let exit = Exit::new();
@@ -42,7 +39,7 @@ pub fn main() -> color_eyre::Result<()> {
let chain_height = client.get_last_height()?; let chain_height = client.get_last_height()?;
let indexed_height = indexer.vecs.starting_height(); let indexed_height = indexer.vecs.starting_height();
if u32::from(chain_height).saturating_sub(u32::from(indexed_height)) > 1000 { if u32::from(chain_height).saturating_sub(u32::from(indexed_height)) > 1000 {
indexer.checked_index(&blocks, &client, &exit)?; indexer.checked_index(&reader, &client, &exit)?;
drop(indexer); drop(indexer);
Mimalloc::collect(); Mimalloc::collect();
indexer = Indexer::forced_import(&outputs_dir)?; indexer = Indexer::forced_import(&outputs_dir)?;
@@ -52,11 +49,11 @@ pub fn main() -> color_eyre::Result<()> {
loop { loop {
let i = Instant::now(); let i = Instant::now();
let starting_indexes = indexer.checked_index(&blocks, &client, &exit)?; let starting_indexes = indexer.checked_index(&reader, &client, &exit)?;
Mimalloc::collect(); Mimalloc::collect();
computer.compute(&indexer, starting_indexes, &reader, &exit)?; computer.compute(&indexer, starting_indexes, &exit)?;
dbg!(i.elapsed()); dbg!(i.elapsed());
sleep(Duration::from_secs(10)); sleep(Duration::from_secs(10));
} }

View File

@@ -5,7 +5,6 @@ use brk_bencher::Bencher;
use brk_computer::Computer; use brk_computer::Computer;
use brk_error::Result; use brk_error::Result;
use brk_indexer::Indexer; use brk_indexer::Indexer;
use brk_iterator::Blocks;
use brk_reader::Reader; use brk_reader::Reader;
use brk_rpc::{Auth, Client}; use brk_rpc::{Auth, Client};
use tracing::{debug, info}; use tracing::{debug, info};
@@ -28,8 +27,6 @@ pub fn main() -> Result<()> {
let reader = Reader::new(bitcoin_dir.join("blocks"), &client); let reader = Reader::new(bitcoin_dir.join("blocks"), &client);
let blocks = Blocks::new(&client, &reader);
let mut indexer = Indexer::forced_import(&outputs_dir)?; let mut indexer = Indexer::forced_import(&outputs_dir)?;
let mut computer = Computer::forced_import(&outputs_benches_dir, &indexer)?; let mut computer = Computer::forced_import(&outputs_benches_dir, &indexer)?;
@@ -47,13 +44,13 @@ pub fn main() -> Result<()> {
}); });
let i = Instant::now(); let i = Instant::now();
let starting_indexes = indexer.index(&blocks, &client, &exit)?; let starting_indexes = indexer.index(&reader, &client, &exit)?;
info!("Done in {:?}", i.elapsed()); info!("Done in {:?}", i.elapsed());
Mimalloc::collect(); Mimalloc::collect();
let i = Instant::now(); let i = Instant::now();
computer.compute(&indexer, starting_indexes, &reader, &exit)?; computer.compute(&indexer, starting_indexes, &exit)?;
info!("Done in {:?}", i.elapsed()); info!("Done in {:?}", i.elapsed());
// We want to benchmark the drop too // We want to benchmark the drop too

View File

@@ -9,7 +9,6 @@ use brk_alloc::Mimalloc;
use brk_bencher::Bencher; use brk_bencher::Bencher;
use brk_computer::Computer; use brk_computer::Computer;
use brk_indexer::Indexer; use brk_indexer::Indexer;
use brk_iterator::Blocks;
use brk_reader::Reader; use brk_reader::Reader;
use brk_rpc::{Auth, Client}; use brk_rpc::{Auth, Client};
use tracing::{debug, info}; use tracing::{debug, info};
@@ -45,15 +44,13 @@ pub fn main() -> color_eyre::Result<()> {
let reader = Reader::new(bitcoin_dir.join("blocks"), &client); let reader = Reader::new(bitcoin_dir.join("blocks"), &client);
let blocks = Blocks::new(&client, &reader);
let mut indexer = Indexer::forced_import(&outputs_dir)?; let mut indexer = Indexer::forced_import(&outputs_dir)?;
// Pre-run indexer if too far behind, then drop and reimport to reduce memory // Pre-run indexer if too far behind, then drop and reimport to reduce memory
let chain_height = client.get_last_height()?; let chain_height = client.get_last_height()?;
let indexed_height = indexer.vecs.starting_height(); let indexed_height = indexer.vecs.starting_height();
if chain_height.saturating_sub(*indexed_height) > 1000 { if chain_height.saturating_sub(*indexed_height) > 1000 {
indexer.index(&blocks, &client, &exit)?; indexer.index(&reader, &client, &exit)?;
drop(indexer); drop(indexer);
Mimalloc::collect(); Mimalloc::collect();
indexer = Indexer::forced_import(&outputs_dir)?; indexer = Indexer::forced_import(&outputs_dir)?;
@@ -63,13 +60,13 @@ pub fn main() -> color_eyre::Result<()> {
loop { loop {
let i = Instant::now(); let i = Instant::now();
let starting_indexes = indexer.index(&blocks, &client, &exit)?; let starting_indexes = indexer.index(&reader, &client, &exit)?;
info!("Done in {:?}", i.elapsed()); info!("Done in {:?}", i.elapsed());
Mimalloc::collect(); Mimalloc::collect();
let i = Instant::now(); let i = Instant::now();
computer.compute(&indexer, starting_indexes, &reader, &exit)?; computer.compute(&indexer, starting_indexes, &exit)?;
info!("Done in {:?}", i.elapsed()); info!("Done in {:?}", i.elapsed());
sleep(Duration::from_secs(60)); sleep(Duration::from_secs(60));

View File

@@ -7,7 +7,7 @@ use brk_types::{
use rayon::prelude::*; use rayon::prelude::*;
use rustc_hash::FxHashSet; use rustc_hash::FxHashSet;
use tracing::{debug, info}; use tracing::{debug, info};
use vecdb::{AnyStoredVec, AnyVec, Exit, ReadableVec, VecIndex, WritableVec}; use vecdb::{AnyStoredVec, AnyVec, Exit, ReadableVec, VecIndex, WritableVec, unlikely};
use crate::{ use crate::{
distribution::{ distribution::{
@@ -243,7 +243,11 @@ pub(crate) fn process_blocks(
for height in starting_height.to_usize()..=last_height.to_usize() { for height in starting_height.to_usize()..=last_height.to_usize() {
let height = Height::from(height); let height = Height::from(height);
info!("Processing chain at {}...", height); if unlikely(height.is_multiple_of(100)) {
info!("Processing chain at {}...", height);
} else {
debug!("Processing chain at {}...", height);
}
// Get block metadata from pre-collected vecs // Get block metadata from pre-collected vecs
let offset = height.to_usize() - start_usize; let offset = height.to_usize() - start_usize;

View File

@@ -4,7 +4,6 @@ use std::{fs, path::Path, thread, time::Instant};
use brk_error::Result; use brk_error::Result;
use brk_indexer::Indexer; use brk_indexer::Indexer;
use brk_reader::Reader;
use brk_traversable::Traversable; use brk_traversable::Traversable;
use brk_types::Version; use brk_types::Version;
use tracing::info; use tracing::info;
@@ -23,7 +22,6 @@ mod market;
mod mining; mod mining;
mod outputs; mod outputs;
mod pools; mod pools;
mod positions;
pub mod prices; pub mod prices;
mod scripts; mod scripts;
mod supply; mod supply;
@@ -35,7 +33,6 @@ pub struct Computer<M: StorageMode = Rw> {
pub mining: Box<mining::Vecs<M>>, pub mining: Box<mining::Vecs<M>>,
pub transactions: Box<transactions::Vecs<M>>, pub transactions: Box<transactions::Vecs<M>>,
pub scripts: Box<scripts::Vecs<M>>, pub scripts: Box<scripts::Vecs<M>>,
pub positions: Box<positions::Vecs<M>>,
pub cointime: Box<cointime::Vecs<M>>, pub cointime: Box<cointime::Vecs<M>>,
pub constants: Box<constants::Vecs>, pub constants: Box<constants::Vecs>,
pub indexes: Box<indexes::Vecs<M>>, pub indexes: Box<indexes::Vecs<M>>,
@@ -63,24 +60,12 @@ impl Computer {
const STACK_SIZE: usize = 8 * 1024 * 1024; const STACK_SIZE: usize = 8 * 1024 * 1024;
let big_thread = || thread::Builder::new().stack_size(STACK_SIZE); let big_thread = || thread::Builder::new().stack_size(STACK_SIZE);
let (indexes, positions) = timed("Imported indexes/positions", || { let indexes = timed("Imported indexes", || -> Result<_> {
thread::scope(|s| -> Result<_> { Ok(Box::new(indexes::Vecs::forced_import(
let positions_handle = big_thread().spawn_scoped(s, || -> Result<_> { &computed_path,
Ok(Box::new(positions::Vecs::forced_import( VERSION,
&computed_path, indexer,
VERSION, )?))
)?))
})?;
let indexes = Box::new(indexes::Vecs::forced_import(
&computed_path,
VERSION,
indexer,
)?);
let positions = positions_handle.join().unwrap()?;
Ok((indexes, positions))
})
})?; })?;
let (constants, prices) = timed("Imported prices/constants", || -> Result<_> { let (constants, prices) = timed("Imported prices/constants", || -> Result<_> {
@@ -257,7 +242,6 @@ impl Computer {
market, market,
distribution, distribution,
supply, supply,
positions,
pools, pools,
cointime, cointime,
indexes, indexes,
@@ -278,7 +262,6 @@ impl Computer {
mining::DB_NAME, mining::DB_NAME,
transactions::DB_NAME, transactions::DB_NAME,
scripts::DB_NAME, scripts::DB_NAME,
positions::DB_NAME,
cointime::DB_NAME, cointime::DB_NAME,
indicators::DB_NAME, indicators::DB_NAME,
indexes::DB_NAME, indexes::DB_NAME,
@@ -319,7 +302,6 @@ impl Computer {
&mut self, &mut self,
indexer: &Indexer, indexer: &Indexer,
starting_indexes: brk_indexer::Indexes, starting_indexes: brk_indexer::Indexes,
reader: &Reader,
exit: &Exit, exit: &Exit,
) -> Result<()> { ) -> Result<()> {
internal::cache_clear_all(); internal::cache_clear_all();
@@ -387,13 +369,6 @@ impl Computer {
) )
})?; })?;
let positions = scope.spawn(|| {
timed("Computed positions", || {
self.positions
.compute(indexer, &starting_indexes, reader, exit)
})
});
timed("Computed transactions", || { timed("Computed transactions", || {
self.transactions.compute( self.transactions.compute(
indexer, indexer,
@@ -419,7 +394,6 @@ impl Computer {
) )
})?; })?;
positions.join().unwrap()?;
market.join().unwrap()?; market.join().unwrap()?;
Ok(()) Ok(())
})?; })?;
@@ -561,7 +535,6 @@ impl_iter_named!(
mining, mining,
transactions, transactions,
scripts, scripts,
positions,
cointime, cointime,
constants, constants,
indicators, indicators,

View File

@@ -25,7 +25,7 @@ impl Vecs {
indexer, indexer,
indexes, indexes,
&blocks.lookback, &blocks.lookback,
&transactions.fees, transactions,
prices, prices,
starting_indexes, starting_indexes,
exit, exit,

View File

@@ -17,7 +17,7 @@ impl Vecs {
indexer: &Indexer, indexer: &Indexer,
indexes: &indexes::Vecs, indexes: &indexes::Vecs,
lookback: &blocks::LookbackVecs, lookback: &blocks::LookbackVecs,
transactions_fees: &transactions::FeesVecs, transactions: &transactions::Vecs,
prices: &prices::Vecs, prices: &prices::Vecs,
starting_indexes: &Indexes, starting_indexes: &Indexes,
exit: &Exit, exit: &Exit,
@@ -67,7 +67,7 @@ impl Vecs {
starting_indexes.height, starting_indexes.height,
&indexer.vecs.transactions.first_tx_index, &indexer.vecs.transactions.first_tx_index,
&indexes.height.tx_index_count, &indexes.height.tx_index_count,
&transactions_fees.fee.tx_index, &transactions.fees.fee.tx_index,
exit, exit,
)?; )?;
Ok(()) Ok(())
@@ -95,6 +95,13 @@ impl Vecs {
self.subsidy self.subsidy
.compute_rest(starting_indexes.height, prices, exit)?; .compute_rest(starting_indexes.height, prices, exit)?;
self.output_volume.compute_subtract(
starting_indexes.height,
&transactions.volume.transfer_volume.block.sats,
&self.fees.block.sats,
exit,
)?;
self.unclaimed.block.sats.compute_transform( self.unclaimed.block.sats.compute_transform(
starting_indexes.height, starting_indexes.height,
&self.subsidy.block.sats, &self.subsidy.block.sats,

View File

@@ -1,6 +1,6 @@
use brk_error::Result; use brk_error::Result;
use brk_types::Version; use brk_types::Version;
use vecdb::Database; use vecdb::{Database, EagerVec, ImportableVec};
use super::Vecs; use super::Vecs;
use crate::{ use crate::{
@@ -44,6 +44,7 @@ impl Vecs {
cached_starts, cached_starts,
)?, )?,
fees: AmountPerBlockFull::forced_import(db, "fees", version, indexes, cached_starts)?, fees: AmountPerBlockFull::forced_import(db, "fees", version, indexes, cached_starts)?,
output_volume: EagerVec::forced_import(db, "output_volume", version)?,
unclaimed: AmountPerBlockCumulative::forced_import( unclaimed: AmountPerBlockCumulative::forced_import(
db, db,
"unclaimed_rewards", "unclaimed_rewards",

View File

@@ -1,6 +1,6 @@
use brk_traversable::Traversable; use brk_traversable::Traversable;
use brk_types::{BasisPoints16, BasisPoints32}; use brk_types::{BasisPoints16, BasisPoints32, Height, Sats};
use vecdb::{Rw, StorageMode}; use vecdb::{EagerVec, PcoVec, Rw, StorageMode};
use crate::internal::{ use crate::internal::{
AmountPerBlockCumulative, AmountPerBlockCumulativeRolling, AmountPerBlockFull, AmountPerBlockCumulative, AmountPerBlockCumulativeRolling, AmountPerBlockFull,
@@ -12,6 +12,7 @@ pub struct Vecs<M: StorageMode = Rw> {
pub coinbase: AmountPerBlockCumulativeRolling<M>, pub coinbase: AmountPerBlockCumulativeRolling<M>,
pub subsidy: AmountPerBlockCumulativeRolling<M>, pub subsidy: AmountPerBlockCumulativeRolling<M>,
pub fees: AmountPerBlockFull<M>, pub fees: AmountPerBlockFull<M>,
pub output_volume: M::Stored<EagerVec<PcoVec<Height, Sats>>>,
pub unclaimed: AmountPerBlockCumulative<M>, pub unclaimed: AmountPerBlockCumulative<M>,
#[traversable(wrap = "fees", rename = "dominance")] #[traversable(wrap = "fees", rename = "dominance")]
pub fee_dominance: PercentPerBlock<BasisPoints16, M>, pub fee_dominance: PercentPerBlock<BasisPoints16, M>,

View File

@@ -2,7 +2,6 @@ use std::{collections::BTreeMap, path::Path};
use brk_error::Result; use brk_error::Result;
use brk_indexer::Indexer; use brk_indexer::Indexer;
use brk_store::AnyStore;
use brk_traversable::Traversable; use brk_traversable::Traversable;
use brk_types::{Addr, AddrBytes, Height, Indexes, OutputType, PoolSlug, Pools, TxOutIndex, pools}; use brk_types::{Addr, AddrBytes, Height, Indexes, OutputType, PoolSlug, Pools, TxOutIndex, pools};
use rayon::prelude::*; use rayon::prelude::*;
@@ -114,8 +113,18 @@ impl Vecs {
starting_indexes: &Indexes, starting_indexes: &Indexes,
exit: &Exit, exit: &Exit,
) -> Result<()> { ) -> Result<()> {
let dep_version = indexer.vecs.blocks.coinbase_tag.version();
let pool_vec_version = self.pool.header().vec_version();
let pool_computed = self.pool.header().computed_version();
let expected = pool_vec_version + dep_version;
if expected != pool_computed {
tracing::warn!(
"Pool version mismatch: vec_version={pool_vec_version:?} + dep={dep_version:?} = {expected:?}, stored computed={pool_computed:?}, len={}",
self.pool.len()
);
}
self.pool self.pool
.validate_computed_version_or_reset(indexer.stores.height_to_coinbase_tag.version())?; .validate_computed_version_or_reset(dep_version)?;
let first_txout_index = indexer.vecs.transactions.first_txout_index.reader(); let first_txout_index = indexer.vecs.transactions.first_txout_index.reader();
let output_type = indexer.vecs.outputs.output_type.reader(); let output_type = indexer.vecs.outputs.output_type.reader();
@@ -142,12 +151,12 @@ impl Vecs {
self.pool.truncate_if_needed_at(min)?; self.pool.truncate_if_needed_at(min)?;
indexer let len = indexer.vecs.blocks.coinbase_tag.len();
.stores
.height_to_coinbase_tag indexer.vecs.blocks.coinbase_tag.try_for_each_range_at(
.iter() min,
.skip(min) len,
.try_for_each(|(_, coinbase_tag)| -> Result<()> { |coinbase_tag| -> Result<()> {
let tx_index = first_tx_index_cursor.next().unwrap(); let tx_index = first_tx_index_cursor.next().unwrap();
let out_start = first_txout_index.get(tx_index.to_usize()); let out_start = first_txout_index.get(tx_index.to_usize());
@@ -174,12 +183,13 @@ impl Vecs {
.map(|bytes| Addr::try_from(&bytes).unwrap()) .map(|bytes| Addr::try_from(&bytes).unwrap())
.and_then(|addr| self.pools.find_from_addr(&addr)) .and_then(|addr| self.pools.find_from_addr(&addr))
}) })
.or_else(|| self.pools.find_from_coinbase_tag(&coinbase_tag)) .or_else(|| self.pools.find_from_coinbase_tag(&coinbase_tag.as_str()))
.unwrap_or(unknown); .unwrap_or(unknown);
self.pool.push(pool.slug); self.pool.push(pool.slug);
Ok(()) Ok(())
})?; },
)?;
let _lock = exit.lock(); let _lock = exit.lock();
self.pool.write()?; self.pool.write()?;

View File

@@ -1,147 +0,0 @@
use std::{fs, path::Path};
use brk_error::Result;
use brk_indexer::Indexer;
use brk_reader::{Reader, XOR_LEN, XORBytes};
use brk_traversable::Traversable;
use brk_types::{BlkPosition, Height, Indexes, TxIndex, Version};
use tracing::info;
use vecdb::{
AnyStoredVec, AnyVec, Database, Exit, ImportableVec, PcoVec, ReadableVec, Rw, StorageMode,
WritableVec,
};
use crate::internal::db_utils::{finalize_db, open_db};
pub const DB_NAME: &str = "positions";
#[derive(Traversable)]
#[traversable(hidden)]
pub struct Vecs<M: StorageMode = Rw> {
db: Database,
pub block: M::Stored<PcoVec<Height, BlkPosition>>,
pub tx: M::Stored<PcoVec<TxIndex, BlkPosition>>,
}
impl Vecs {
pub(crate) fn forced_import(parent_path: &Path, parent_version: Version) -> Result<Self> {
let db = open_db(parent_path, DB_NAME, 1_000_000)?;
let version = parent_version;
let this = Self {
block: PcoVec::forced_import(&db, "position", version + Version::TWO)?,
tx: PcoVec::forced_import(&db, "position", version + Version::TWO)?,
db,
};
finalize_db(&this.db, &this)?;
Ok(this)
}
pub(crate) fn compute(
&mut self,
indexer: &Indexer,
starting_indexes: &Indexes,
reader: &Reader,
exit: &Exit,
) -> Result<()> {
self.db.sync_bg_tasks()?;
self.compute_(indexer, starting_indexes, reader, exit)?;
let exit = exit.clone();
self.db.run_bg(move |db| {
let _lock = exit.lock();
db.compact_deferred_default()
});
Ok(())
}
fn check_xor_bytes(&mut self, reader: &Reader) -> Result<()> {
let xor_path = self.db.path().join("xor.dat");
let current = reader.xor_bytes();
let cached = fs::read(&xor_path)
.ok()
.and_then(|b| <[u8; XOR_LEN]>::try_from(b).ok())
.map(XORBytes::from);
match cached {
Some(c) if c == current => return Ok(()),
Some(_) => {
info!("XOR bytes changed, resetting positions...");
self.block.reset()?;
self.tx.reset()?;
}
None => {}
}
fs::write(&xor_path, *current)?;
Ok(())
}
fn compute_(
&mut self,
indexer: &Indexer,
starting_indexes: &Indexes,
parser: &Reader,
exit: &Exit,
) -> Result<()> {
self.check_xor_bytes(parser)?;
// Validate computed versions against dependencies
let dep_version = indexer.vecs.transactions.first_tx_index.version()
+ indexer.vecs.transactions.height.version();
self.block.validate_computed_version_or_reset(dep_version)?;
self.tx.validate_computed_version_or_reset(dep_version)?;
let min_tx_index = TxIndex::from(self.tx.len()).min(starting_indexes.tx_index);
let Some(min_height) = indexer
.vecs
.transactions
.height
.collect_one(min_tx_index)
.map(|h: Height| h.min(starting_indexes.height))
else {
return Ok(());
};
let first_tx_at_min_height = indexer
.vecs
.transactions
.first_tx_index
.collect_one(min_height)
.unwrap();
self.block.truncate_if_needed(min_height)?;
self.tx.truncate_if_needed(first_tx_at_min_height)?;
parser
.read(
Some(min_height),
Some((indexer.vecs.transactions.first_tx_index.len() - 1).into()),
)
.iter()
.try_for_each(|block| -> Result<()> {
self.block.push(block.metadata().position());
block.tx_metadata().iter().for_each(|metadata| {
self.tx.push(metadata.position());
});
if *block.height() % 1_000 == 0 {
let _lock = exit.lock();
self.block.write()?;
self.tx.write()?;
}
Ok(())
})?;
let _lock = exit.lock();
self.block.write()?;
self.tx.write()?;
Ok(())
}
}

View File

@@ -1,6 +1,6 @@
use brk_error::Result; use brk_error::Result;
use brk_indexer::Indexer; use brk_indexer::Indexer;
use brk_types::{FeeRate, Indexes, Sats}; use brk_types::{FeeRate, Indexes, OutPoint, Sats, TxInIndex, VSize};
use vecdb::{AnyStoredVec, AnyVec, Exit, ReadableVec, VecIndex, WritableVec, unlikely}; use vecdb::{AnyStoredVec, AnyVec, Exit, ReadableVec, VecIndex, WritableVec, unlikely};
use super::super::size; use super::super::size;
@@ -33,26 +33,47 @@ impl Vecs {
exit, exit,
)?; )?;
self.compute_fee_and_fee_rate(size_vecs, starting_indexes, exit)?; self.compute_fees(indexer, indexes, size_vecs, starting_indexes, exit)?;
let (r3, r4) = rayon::join( let (r1, (r2, r3)) = rayon::join(
|| { || {
self.fee self.fee
.derive_from_with_skip(indexer, indexes, starting_indexes, exit, 1) .derive_from_with_skip(indexer, indexes, starting_indexes, exit, 1)
}, },
|| { || {
self.fee_rate rayon::join(
.derive_from_with_skip(indexer, indexes, starting_indexes, exit, 1) || {
self.fee_rate.derive_from_with_skip(
indexer,
indexes,
starting_indexes,
exit,
1,
)
},
|| {
self.effective_fee_rate.derive_from_with_skip(
indexer,
indexes,
starting_indexes,
exit,
1,
)
},
)
}, },
); );
r1?;
r2?;
r3?; r3?;
r4?;
Ok(()) Ok(())
} }
fn compute_fee_and_fee_rate( fn compute_fees(
&mut self, &mut self,
indexer: &Indexer,
indexes: &indexes::Vecs,
size_vecs: &size::Vecs, size_vecs: &size::Vecs,
starting_indexes: &Indexes, starting_indexes: &Indexes,
exit: &Exit, exit: &Exit,
@@ -67,6 +88,9 @@ impl Vecs {
self.fee_rate self.fee_rate
.tx_index .tx_index
.validate_computed_version_or_reset(dep_version)?; .validate_computed_version_or_reset(dep_version)?;
self.effective_fee_rate
.tx_index
.validate_computed_version_or_reset(dep_version)?;
let target = self let target = self
.input_value .input_value
@@ -78,6 +102,7 @@ impl Vecs {
.tx_index .tx_index
.len() .len()
.min(self.fee_rate.tx_index.len()) .min(self.fee_rate.tx_index.len())
.min(self.effective_fee_rate.tx_index.len())
.min(starting_indexes.tx_index.to_usize()); .min(starting_indexes.tx_index.to_usize());
if min >= target { if min >= target {
@@ -90,39 +115,171 @@ impl Vecs {
self.fee_rate self.fee_rate
.tx_index .tx_index
.truncate_if_needed(starting_indexes.tx_index)?; .truncate_if_needed(starting_indexes.tx_index)?;
self.effective_fee_rate
.tx_index
.truncate_if_needed(starting_indexes.tx_index)?;
loop { let start_tx = self.fee.tx_index.len();
let skip = self.fee.tx_index.len(); let max_height = indexer.vecs.transactions.first_tx_index.len();
let end = self.fee.tx_index.batch_end(target);
if skip >= end { let start_height = if start_tx == 0 {
0
} else {
indexer
.vecs
.transactions
.height
.collect_one_at(start_tx)
.unwrap()
.to_usize()
};
for h in start_height..max_height {
let first_tx: usize = indexer
.vecs
.transactions
.first_tx_index
.collect_one_at(h)
.unwrap()
.to_usize();
let n = *indexes.height.tx_index_count.collect_one_at(h).unwrap() as usize;
if first_tx + n > target {
break; break;
} }
let input_batch = self.input_value.collect_range_at(skip, end); // Batch read all per-tx data for this block
let output_batch = self.output_value.collect_range_at(skip, end); let input_values = self.input_value.collect_range_at(first_tx, first_tx + n);
let vsize_batch = size_vecs.vsize.tx_index.collect_range_at(skip, end); let output_values = self.output_value.collect_range_at(first_tx, first_tx + n);
let vsizes: Vec<VSize> = size_vecs
.vsize
.tx_index
.collect_range_at(first_tx, first_tx + n);
let txin_starts: Vec<TxInIndex> = indexer
.vecs
.transactions
.first_txin_index
.collect_range_at(first_tx, first_tx + n);
let input_begin = txin_starts[0].to_usize();
let input_end = if h + 1 < max_height {
indexer
.vecs
.inputs
.first_txin_index
.collect_one_at(h + 1)
.unwrap()
.to_usize()
} else {
indexer.vecs.inputs.outpoint.len()
};
let outpoints: Vec<OutPoint> = indexer
.vecs
.inputs
.outpoint
.collect_range_at(input_begin, input_end);
for j in 0..input_batch.len() { // Compute fee + fee_rate per tx
let fee = if unlikely(input_batch[j].is_max()) { let mut fees = Vec::with_capacity(n);
for j in 0..n {
let fee = if unlikely(input_values[j].is_max()) {
Sats::ZERO Sats::ZERO
} else { } else {
input_batch[j] - output_batch[j] input_values[j] - output_values[j]
}; };
self.fee.tx_index.push(fee); self.fee.tx_index.push(fee);
self.fee_rate self.fee_rate.tx_index.push(FeeRate::from((fee, vsizes[j])));
.tx_index fees.push(fee);
.push(FeeRate::from((fee, vsize_batch[j])));
} }
let _lock = exit.lock(); // Effective fee rate via same-block CPFP clustering
let (r1, r2) = rayon::join( let effective = cluster_fee_rates(
|| self.fee.tx_index.write(), &txin_starts,
|| self.fee_rate.tx_index.write(), &outpoints,
input_begin,
first_tx,
&fees,
&vsizes,
); );
r1?; for rate in effective {
r2?; self.effective_fee_rate.tx_index.push(rate);
}
if h % 1_000 == 0 {
let _lock = exit.lock();
self.fee.tx_index.write()?;
self.fee_rate.tx_index.write()?;
self.effective_fee_rate.tx_index.write()?;
}
} }
let _lock = exit.lock();
self.fee.tx_index.write()?;
self.fee_rate.tx_index.write()?;
self.effective_fee_rate.tx_index.write()?;
Ok(()) Ok(())
} }
} }
/// Clusters same-block parent-child txs and computes effective fee rate per cluster.
fn cluster_fee_rates(
txin_starts: &[TxInIndex],
outpoints: &[OutPoint],
outpoint_base: usize,
first_tx: usize,
fees: &[Sats],
vsizes: &[VSize],
) -> Vec<FeeRate> {
let n = fees.len();
let mut parent: Vec<usize> = (0..n).collect();
for j in 1..n {
let start = txin_starts[j].to_usize() - outpoint_base;
let end = if j + 1 < txin_starts.len() {
txin_starts[j + 1].to_usize() - outpoint_base
} else {
outpoints.len()
};
for op in &outpoints[start..end] {
if op.is_coinbase() {
continue;
}
let parent_tx = op.tx_index().to_usize();
if parent_tx >= first_tx && parent_tx < first_tx + n {
union(&mut parent, j, parent_tx - first_tx);
}
}
}
let mut cluster_fee = vec![Sats::ZERO; n];
let mut cluster_vsize = vec![VSize::from(0u64); n];
for j in 0..n {
let root = find(&mut parent, j);
cluster_fee[root] += fees[j];
cluster_vsize[root] += vsizes[j];
}
(0..n)
.map(|j| {
let root = find(&mut parent, j);
FeeRate::from((cluster_fee[root], cluster_vsize[root]))
})
.collect()
}
fn find(parent: &mut [usize], mut i: usize) -> usize {
while parent[i] != i {
parent[i] = parent[parent[i]];
i = parent[i];
}
i
}
fn union(parent: &mut [usize], a: usize, b: usize) {
let ra = find(parent, a);
let rb = find(parent, b);
if ra != rb {
parent[ra] = rb;
}
}

View File

@@ -20,6 +20,12 @@ impl Vecs {
output_value: EagerVec::forced_import(db, "output_value", version)?, output_value: EagerVec::forced_import(db, "output_value", version)?,
fee: PerTxDistribution::forced_import(db, "fee", v, indexes)?, fee: PerTxDistribution::forced_import(db, "fee", v, indexes)?,
fee_rate: PerTxDistribution::forced_import(db, "fee_rate", v, indexes)?, fee_rate: PerTxDistribution::forced_import(db, "fee_rate", v, indexes)?,
effective_fee_rate: PerTxDistribution::forced_import(
db,
"effective_fee_rate",
v,
indexes,
)?,
}) })
} }
} }

View File

@@ -10,4 +10,5 @@ pub struct Vecs<M: StorageMode = Rw> {
pub output_value: M::Stored<EagerVec<PcoVec<TxIndex, Sats>>>, pub output_value: M::Stored<EagerVec<PcoVec<TxIndex, Sats>>>,
pub fee: PerTxDistribution<Sats, M>, pub fee: PerTxDistribution<Sats, M>,
pub fee_rate: PerTxDistribution<FeeRate, M>, pub fee_rate: PerTxDistribution<FeeRate, M>,
pub effective_fee_rate: PerTxDistribution<FeeRate, M>,
} }

View File

@@ -12,7 +12,6 @@ exclude = ["examples/"]
bitcoin = { workspace = true } bitcoin = { workspace = true }
brk_error = { workspace = true, features = ["fjall", "vecdb"] } brk_error = { workspace = true, features = ["fjall", "vecdb"] }
brk_cohort = { workspace = true } brk_cohort = { workspace = true }
brk_iterator = { workspace = true }
brk_logger = { workspace = true } brk_logger = { workspace = true }
brk_reader = { workspace = true } brk_reader = { workspace = true }
brk_rpc = { workspace = true, features = ["corepc"] } brk_rpc = { workspace = true, features = ["corepc"] }

View File

@@ -7,7 +7,6 @@ use std::{
use brk_alloc::Mimalloc; use brk_alloc::Mimalloc;
use brk_indexer::Indexer; use brk_indexer::Indexer;
use brk_iterator::Blocks;
use brk_reader::Reader; use brk_reader::Reader;
use brk_rpc::{Auth, Client}; use brk_rpc::{Auth, Client};
use tracing::{debug, info}; use tracing::{debug, info};
@@ -33,9 +32,6 @@ fn main() -> color_eyre::Result<()> {
let reader = Reader::new(bitcoin_dir.join("blocks"), &client); let reader = Reader::new(bitcoin_dir.join("blocks"), &client);
debug!("Reader created."); debug!("Reader created.");
let blocks = Blocks::new(&client, &reader);
debug!("Blocks created.");
let mut indexer = Indexer::forced_import(&outputs_dir)?; let mut indexer = Indexer::forced_import(&outputs_dir)?;
debug!("Indexer imported."); debug!("Indexer imported.");
@@ -44,7 +40,7 @@ fn main() -> color_eyre::Result<()> {
loop { loop {
let i = Instant::now(); let i = Instant::now();
indexer.checked_index(&blocks, &client, &exit)?; indexer.checked_index(&reader, &client, &exit)?;
info!("Done in {:?}", i.elapsed()); info!("Done in {:?}", i.elapsed());
Mimalloc::collect(); Mimalloc::collect();

View File

@@ -9,7 +9,6 @@ use brk_alloc::Mimalloc;
use brk_bencher::Bencher; use brk_bencher::Bencher;
use brk_error::Result; use brk_error::Result;
use brk_indexer::Indexer; use brk_indexer::Indexer;
use brk_iterator::Blocks;
use brk_reader::Reader; use brk_reader::Reader;
use brk_rpc::{Auth, Client}; use brk_rpc::{Auth, Client};
use tracing::{debug, info}; use tracing::{debug, info};
@@ -33,8 +32,6 @@ fn main() -> Result<()> {
let reader = Reader::new(bitcoin_dir.join("blocks"), &client); let reader = Reader::new(bitcoin_dir.join("blocks"), &client);
let blocks = Blocks::new(&client, &reader);
let mut indexer = Indexer::forced_import(&outputs_dir)?; let mut indexer = Indexer::forced_import(&outputs_dir)?;
let mut bencher = let mut bencher =
@@ -50,7 +47,7 @@ fn main() -> Result<()> {
}); });
let i = Instant::now(); let i = Instant::now();
indexer.index(&blocks, &client, &exit)?; indexer.index(&reader, &client, &exit)?;
info!("Done in {:?}", i.elapsed()); info!("Done in {:?}", i.elapsed());
// We want to benchmark the drop too // We want to benchmark the drop too

View File

@@ -9,7 +9,6 @@ use brk_alloc::Mimalloc;
use brk_bencher::Bencher; use brk_bencher::Bencher;
use brk_error::Result; use brk_error::Result;
use brk_indexer::Indexer; use brk_indexer::Indexer;
use brk_iterator::Blocks;
use brk_reader::Reader; use brk_reader::Reader;
use brk_rpc::{Auth, Client}; use brk_rpc::{Auth, Client};
use tracing::{debug, info}; use tracing::{debug, info};
@@ -33,8 +32,6 @@ fn main() -> Result<()> {
let reader = Reader::new(bitcoin_dir.join("blocks"), &client); let reader = Reader::new(bitcoin_dir.join("blocks"), &client);
let blocks = Blocks::new(&client, &reader);
let mut indexer = Indexer::forced_import(&outputs_dir)?; let mut indexer = Indexer::forced_import(&outputs_dir)?;
let mut bencher = let mut bencher =
@@ -51,7 +48,7 @@ fn main() -> Result<()> {
loop { loop {
let i = Instant::now(); let i = Instant::now();
indexer.index(&blocks, &client, &exit)?; indexer.index(&reader, &client, &exit)?;
info!("Done in {:?}", i.elapsed()); info!("Done in {:?}", i.elapsed());
Mimalloc::collect(); Mimalloc::collect();

View File

@@ -4,7 +4,7 @@ use brk_types::{TxIndex, Txid, TxidPrefix, Version};
// One version for all data sources // One version for all data sources
// Increment on **change _OR_ addition** // Increment on **change _OR_ addition**
pub const VERSION: Version = Version::new(25); pub const VERSION: Version = Version::new(26);
pub const SNAPSHOT_BLOCK_RANGE: usize = 1_000; pub const SNAPSHOT_BLOCK_RANGE: usize = 1_000;
/// Known duplicate Bitcoin transactions (BIP30) /// Known duplicate Bitcoin transactions (BIP30)

View File

@@ -8,12 +8,14 @@ use std::{
}; };
use brk_error::Result; use brk_error::Result;
use brk_iterator::Blocks; use brk_reader::Reader;
use brk_rpc::Client; use brk_rpc::Client;
use brk_types::Height; use brk_types::Height;
use fjall::PersistMode; use fjall::PersistMode;
use tracing::{debug, info}; use tracing::{debug, info};
use vecdb::{Exit, RawDBError, ReadOnlyClone, ReadableVec, Ro, Rw, StorageMode}; use vecdb::{
Exit, RawDBError, ReadOnlyClone, ReadableVec, Ro, Rw, StorageMode, WritableVec, unlikely,
};
mod constants; mod constants;
mod indexes; mod indexes;
mod processor; mod processor;
@@ -93,22 +95,22 @@ impl Indexer {
} }
} }
pub fn index(&mut self, blocks: &Blocks, client: &Client, exit: &Exit) -> Result<Indexes> { pub fn index(&mut self, reader: &Reader, client: &Client, exit: &Exit) -> Result<Indexes> {
self.index_(blocks, client, exit, false) self.index_(reader, client, exit, false)
} }
pub fn checked_index( pub fn checked_index(
&mut self, &mut self,
blocks: &Blocks, reader: &Reader,
client: &Client, client: &Client,
exit: &Exit, exit: &Exit,
) -> Result<Indexes> { ) -> Result<Indexes> {
self.index_(blocks, client, exit, true) self.index_(reader, client, exit, true)
} }
fn index_( fn index_(
&mut self, &mut self,
blocks: &Blocks, reader: &Reader,
client: &Client, client: &Client,
exit: &Exit, exit: &Exit,
check_collisions: bool, check_collisions: bool,
@@ -172,13 +174,13 @@ impl Indexer {
let stores_res = s.spawn(|| -> Result<()> { let stores_res = s.spawn(|| -> Result<()> {
let i = Instant::now(); let i = Instant::now();
stores.commit(height)?; stores.commit(height)?;
info!("Stores exported in {:?}", i.elapsed()); debug!("Stores exported in {:?}", i.elapsed());
Ok(()) Ok(())
}); });
let vecs_res = s.spawn(|| -> Result<()> { let vecs_res = s.spawn(|| -> Result<()> {
let i = Instant::now(); let i = Instant::now();
vecs.flush(height)?; vecs.flush(height)?;
info!("Vecs exported in {:?}", i.elapsed()); debug!("Vecs exported in {:?}", i.elapsed());
Ok(()) Ok(())
}); });
stores_res.join().unwrap()?; stores_res.join().unwrap()?;
@@ -195,13 +197,22 @@ impl Indexer {
let vecs = &mut self.vecs; let vecs = &mut self.vecs;
let stores = &mut self.stores; let stores = &mut self.stores;
for block in blocks.after(prev_hash)? { for block in reader.after(prev_hash)?.iter() {
let height = block.height(); let height = block.height();
info!("Indexing block {height}..."); if unlikely(height.is_multiple_of(100)) {
info!("Indexing block {height}...");
} else {
debug!("Indexing block {height}...");
}
indexes.height = height; indexes.height = height;
vecs.blocks.position.push(block.metadata().position());
block.tx_metadata().iter().for_each(|m| {
vecs.transactions.position.push(m.position());
});
let mut processor = BlockProcessor { let mut processor = BlockProcessor {
block: &block, block: &block,
height, height,
@@ -271,13 +282,13 @@ impl Indexer {
for task in tasks { for task in tasks {
task().map_err(vecdb::RawDBError::other)?; task().map_err(vecdb::RawDBError::other)?;
} }
info!("Stores committed in {:?}", i.elapsed()); debug!("Stores committed in {:?}", i.elapsed());
let i = Instant::now(); let i = Instant::now();
fjall_db fjall_db
.persist(PersistMode::SyncData) .persist(PersistMode::SyncData)
.map_err(RawDBError::other)?; .map_err(RawDBError::other)?;
info!("Stores persisted in {:?}", i.elapsed()); debug!("Stores persisted in {:?}", i.elapsed());
} }
db.compact()?; db.compact()?;

View File

@@ -28,14 +28,14 @@ impl BlockProcessor<'_> {
.blockhash_prefix_to_height .blockhash_prefix_to_height
.insert(blockhash_prefix, height); .insert(blockhash_prefix, height);
self.stores
.height_to_coinbase_tag
.insert(height, self.block.coinbase_tag().into());
self.vecs self.vecs
.blocks .blocks
.blockhash .blockhash
.checked_push(height, blockhash.clone())?; .checked_push(height, blockhash.clone())?;
self.vecs
.blocks
.coinbase_tag
.checked_push(height, self.block.coinbase_tag())?;
self.vecs self.vecs
.blocks .blocks
.difficulty .difficulty
@@ -53,21 +53,28 @@ impl BlockProcessor<'_> {
pub fn push_block_size_and_weight(&mut self, txs: &[ComputedTx]) -> Result<()> { pub fn push_block_size_and_weight(&mut self, txs: &[ComputedTx]) -> Result<()> {
let overhead = bitcoin::block::Header::SIZE + bitcoin::VarInt::from(txs.len()).size(); let overhead = bitcoin::block::Header::SIZE + bitcoin::VarInt::from(txs.len()).size();
let mut total_size = overhead; let mut total_size = overhead;
let mut weight_wu = overhead * 4; let mut weight = overhead * 4;
for ct in txs { let mut sw_txs = 0u32;
let base = ct.base_size as usize; let mut sw_size = 0usize;
let total = ct.total_size as usize; let mut sw_weight = 0usize;
total_size += total;
weight_wu += base * 3 + total; for tx in txs {
total_size += tx.total_size as usize;
weight += tx.weight();
if tx.is_segwit() {
sw_txs += 1;
sw_size += tx.total_size as usize;
sw_weight += tx.weight();
}
} }
self.vecs
.blocks let h = self.height;
.total let blocks = &mut self.vecs.blocks;
.checked_push(self.height, total_size.into())?; blocks.total.checked_push(h, total_size.into())?;
self.vecs blocks.weight.checked_push(h, weight.into())?;
.blocks blocks.segwit_txs.checked_push(h, sw_txs.into())?;
.weight blocks.segwit_size.checked_push(h, sw_size.into())?;
.checked_push(self.height, weight_wu.into())?; blocks.segwit_weight.checked_push(h, sw_weight.into())?;
Ok(()) Ok(())
} }
} }

View File

@@ -48,6 +48,18 @@ pub struct ComputedTx<'a> {
pub total_size: u32, pub total_size: u32,
} }
impl ComputedTx<'_> {
#[inline]
pub fn is_segwit(&self) -> bool {
self.base_size != self.total_size
}
#[inline]
pub fn weight(&self) -> usize {
self.base_size as usize * 3 + self.total_size as usize
}
}
/// Reusable buffers cleared and refilled each block to avoid allocation churn. /// Reusable buffers cleared and refilled each block to avoid allocation churn.
#[derive(Default)] #[derive(Default)]
pub struct BlockBuffers { pub struct BlockBuffers {

View File

@@ -7,11 +7,11 @@ use brk_error::Result;
use brk_store::{AnyStore, Kind, Mode, Store}; use brk_store::{AnyStore, Kind, Mode, Store};
use brk_types::{ use brk_types::{
AddrHash, AddrIndexOutPoint, AddrIndexTxIndex, BlockHashPrefix, Height, OutPoint, OutputType, AddrHash, AddrIndexOutPoint, AddrIndexTxIndex, BlockHashPrefix, Height, OutPoint, OutputType,
StoredString, TxIndex, TxOutIndex, TxidPrefix, TypeIndex, Unit, Version, Vout, TxIndex, TxOutIndex, TxidPrefix, TypeIndex, Unit, Version, Vout,
}; };
use fjall::{Database, PersistMode}; use fjall::{Database, PersistMode};
use rayon::prelude::*; use rayon::prelude::*;
use tracing::info; use tracing::{debug, info};
use vecdb::{AnyVec, ReadableVec, VecIndex}; use vecdb::{AnyVec, ReadableVec, VecIndex};
use crate::{Indexes, constants::DUPLICATE_TXID_PREFIXES}; use crate::{Indexes, constants::DUPLICATE_TXID_PREFIXES};
@@ -26,7 +26,6 @@ pub struct Stores {
pub addr_type_to_addr_index_and_tx_index: ByAddrType<Store<AddrIndexTxIndex, Unit>>, pub addr_type_to_addr_index_and_tx_index: ByAddrType<Store<AddrIndexTxIndex, Unit>>,
pub addr_type_to_addr_index_and_unspent_outpoint: ByAddrType<Store<AddrIndexOutPoint, Unit>>, pub addr_type_to_addr_index_and_unspent_outpoint: ByAddrType<Store<AddrIndexOutPoint, Unit>>,
pub blockhash_prefix_to_height: Store<BlockHashPrefix, Height>, pub blockhash_prefix_to_height: Store<BlockHashPrefix, Height>,
pub height_to_coinbase_tag: Store<Height, StoredString>,
pub txid_prefix_to_tx_index: Store<TxidPrefix, TxIndex>, pub txid_prefix_to_tx_index: Store<TxidPrefix, TxIndex>,
} }
@@ -88,14 +87,6 @@ impl Stores {
Ok(Self { Ok(Self {
db: database.clone(), db: database.clone(),
height_to_coinbase_tag: Store::import(
database_ref,
path,
"height_to_coinbase_tag",
version,
Mode::PushOnly,
Kind::Sequential,
)?,
addr_type_to_addr_hash_to_addr_index: ByAddrType::new_with_index( addr_type_to_addr_hash_to_addr_index: ByAddrType::new_with_index(
create_addr_hash_to_addr_index_store, create_addr_hash_to_addr_index_store,
)?, )?,
@@ -135,7 +126,6 @@ impl Stores {
fn iter_any(&self) -> impl Iterator<Item = &dyn AnyStore> { fn iter_any(&self) -> impl Iterator<Item = &dyn AnyStore> {
[ [
&self.blockhash_prefix_to_height as &dyn AnyStore, &self.blockhash_prefix_to_height as &dyn AnyStore,
&self.height_to_coinbase_tag,
&self.txid_prefix_to_tx_index, &self.txid_prefix_to_tx_index,
] ]
.into_iter() .into_iter()
@@ -159,7 +149,6 @@ impl Stores {
fn par_iter_any_mut(&mut self) -> impl ParallelIterator<Item = &mut dyn AnyStore> { fn par_iter_any_mut(&mut self) -> impl ParallelIterator<Item = &mut dyn AnyStore> {
[ [
&mut self.blockhash_prefix_to_height as &mut dyn AnyStore, &mut self.blockhash_prefix_to_height as &mut dyn AnyStore,
&mut self.height_to_coinbase_tag,
&mut self.txid_prefix_to_tx_index, &mut self.txid_prefix_to_tx_index,
] ]
.into_par_iter() .into_par_iter()
@@ -184,11 +173,11 @@ impl Stores {
let i = Instant::now(); let i = Instant::now();
self.par_iter_any_mut() self.par_iter_any_mut()
.try_for_each(|store| store.commit(height))?; .try_for_each(|store| store.commit(height))?;
info!("Stores committed in {:?}", i.elapsed()); debug!("Stores committed in {:?}", i.elapsed());
let i = Instant::now(); let i = Instant::now();
self.db.persist(PersistMode::SyncData)?; self.db.persist(PersistMode::SyncData)?;
info!("Stores persisted in {:?}", i.elapsed()); debug!("Stores persisted in {:?}", i.elapsed());
Ok(()) Ok(())
} }
@@ -210,7 +199,6 @@ impl Stores {
} }
take!(self.blockhash_prefix_to_height); take!(self.blockhash_prefix_to_height);
take!(self.height_to_coinbase_tag);
take!(self.txid_prefix_to_tx_index); take!(self.txid_prefix_to_tx_index);
for store in self.addr_type_to_addr_hash_to_addr_index.values_mut() { for store in self.addr_type_to_addr_hash_to_addr_index.values_mut() {
@@ -257,7 +245,6 @@ impl Stores {
fn is_empty(&self) -> Result<bool> { fn is_empty(&self) -> Result<bool> {
Ok(self.blockhash_prefix_to_height.is_empty()? Ok(self.blockhash_prefix_to_height.is_empty()?
&& self.txid_prefix_to_tx_index.is_empty()? && self.txid_prefix_to_tx_index.is_empty()?
&& self.height_to_coinbase_tag.is_empty()?
&& self && self
.addr_type_to_addr_hash_to_addr_index .addr_type_to_addr_hash_to_addr_index
.values() .values()
@@ -286,12 +273,6 @@ impl Stores {
}, },
); );
(starting_indexes.height.to_usize()..vecs.blocks.blockhash.len())
.map(Height::from)
.for_each(|h| {
self.height_to_coinbase_tag.remove(h);
});
for addr_type in OutputType::ADDR_TYPES { for addr_type in OutputType::ADDR_TYPES {
for hash in vecs.iter_addr_hashes_from(addr_type, starting_indexes.height)? { for hash in vecs.iter_addr_hashes_from(addr_type, starting_indexes.height)? {
self.addr_type_to_addr_hash_to_addr_index self.addr_type_to_addr_hash_to_addr_index

View File

@@ -1,6 +1,9 @@
use brk_error::Result; use brk_error::Result;
use brk_traversable::Traversable; use brk_traversable::Traversable;
use brk_types::{BlockHash, Height, StoredF64, StoredU64, Timestamp, Version, Weight}; use brk_types::{
BlkPosition, BlockHash, CoinbaseTag, Height, StoredF64, StoredU32, StoredU64, Timestamp,
Version, Weight,
};
use rayon::prelude::*; use rayon::prelude::*;
use vecdb::{ use vecdb::{
AnyStoredVec, BytesVec, Database, ImportableVec, PcoVec, Rw, Stamp, StorageMode, WritableVec, AnyStoredVec, BytesVec, Database, ImportableVec, PcoVec, Rw, Stamp, StorageMode, WritableVec,
@@ -11,6 +14,7 @@ use crate::parallel_import;
#[derive(Traversable)] #[derive(Traversable)]
pub struct BlocksVecs<M: StorageMode = Rw> { pub struct BlocksVecs<M: StorageMode = Rw> {
pub blockhash: M::Stored<BytesVec<Height, BlockHash>>, pub blockhash: M::Stored<BytesVec<Height, BlockHash>>,
pub coinbase_tag: M::Stored<BytesVec<Height, CoinbaseTag>>,
#[traversable(wrap = "difficulty", rename = "value")] #[traversable(wrap = "difficulty", rename = "value")]
pub difficulty: M::Stored<PcoVec<Height, StoredF64>>, pub difficulty: M::Stored<PcoVec<Height, StoredF64>>,
/// Doesn't guarantee continuity due to possible reorgs and more generally the nature of mining /// Doesn't guarantee continuity due to possible reorgs and more generally the nature of mining
@@ -20,45 +24,85 @@ pub struct BlocksVecs<M: StorageMode = Rw> {
pub total: M::Stored<PcoVec<Height, StoredU64>>, pub total: M::Stored<PcoVec<Height, StoredU64>>,
#[traversable(wrap = "weight", rename = "base")] #[traversable(wrap = "weight", rename = "base")]
pub weight: M::Stored<PcoVec<Height, Weight>>, pub weight: M::Stored<PcoVec<Height, Weight>>,
#[traversable(hidden)]
pub position: M::Stored<PcoVec<Height, BlkPosition>>,
pub segwit_txs: M::Stored<PcoVec<Height, StoredU32>>,
pub segwit_size: M::Stored<PcoVec<Height, StoredU64>>,
pub segwit_weight: M::Stored<PcoVec<Height, Weight>>,
} }
impl BlocksVecs { impl BlocksVecs {
pub fn forced_import(db: &Database, version: Version) -> Result<Self> { pub fn forced_import(db: &Database, version: Version) -> Result<Self> {
let (blockhash, difficulty, timestamp, total, weight) = parallel_import! { let (
blockhash = BytesVec::forced_import(db, "blockhash", version),
difficulty = PcoVec::forced_import(db, "difficulty", version),
timestamp = PcoVec::forced_import(db, "timestamp", version),
total_size = PcoVec::forced_import(db, "total_size", version),
weight = PcoVec::forced_import(db, "block_weight", version),
};
Ok(Self {
blockhash, blockhash,
coinbase_tag,
difficulty, difficulty,
timestamp, timestamp,
total, total,
weight, weight,
position,
segwit_txs,
segwit_size,
segwit_weight,
) = parallel_import! {
blockhash = BytesVec::forced_import(db, "blockhash", version),
coinbase_tag = BytesVec::forced_import(db, "coinbase_tag", version),
difficulty = PcoVec::forced_import(db, "difficulty", version),
timestamp = PcoVec::forced_import(db, "timestamp", version),
total_size = PcoVec::forced_import(db, "total_size", version),
weight = PcoVec::forced_import(db, "block_weight", version),
position = PcoVec::forced_import(db, "block_position", version),
segwit_txs = PcoVec::forced_import(db, "segwit_txs", version),
segwit_size = PcoVec::forced_import(db, "segwit_size", version),
segwit_weight = PcoVec::forced_import(db, "segwit_weight", version),
};
Ok(Self {
blockhash,
coinbase_tag,
difficulty,
timestamp,
total,
weight,
position,
segwit_txs,
segwit_size,
segwit_weight,
}) })
} }
pub fn truncate(&mut self, height: Height, stamp: Stamp) -> Result<()> { pub fn truncate(&mut self, height: Height, stamp: Stamp) -> Result<()> {
self.blockhash self.blockhash
.truncate_if_needed_with_stamp(height, stamp)?; .truncate_if_needed_with_stamp(height, stamp)?;
self.coinbase_tag
.truncate_if_needed_with_stamp(height, stamp)?;
self.difficulty self.difficulty
.truncate_if_needed_with_stamp(height, stamp)?; .truncate_if_needed_with_stamp(height, stamp)?;
self.timestamp self.timestamp
.truncate_if_needed_with_stamp(height, stamp)?; .truncate_if_needed_with_stamp(height, stamp)?;
self.total.truncate_if_needed_with_stamp(height, stamp)?; self.total.truncate_if_needed_with_stamp(height, stamp)?;
self.weight.truncate_if_needed_with_stamp(height, stamp)?; self.weight.truncate_if_needed_with_stamp(height, stamp)?;
self.position.truncate_if_needed_with_stamp(height, stamp)?;
self.segwit_txs
.truncate_if_needed_with_stamp(height, stamp)?;
self.segwit_size
.truncate_if_needed_with_stamp(height, stamp)?;
self.segwit_weight
.truncate_if_needed_with_stamp(height, stamp)?;
Ok(()) Ok(())
} }
pub fn par_iter_mut_any(&mut self) -> impl ParallelIterator<Item = &mut dyn AnyStoredVec> { pub fn par_iter_mut_any(&mut self) -> impl ParallelIterator<Item = &mut dyn AnyStoredVec> {
[ [
&mut self.blockhash as &mut dyn AnyStoredVec, &mut self.blockhash as &mut dyn AnyStoredVec,
&mut self.coinbase_tag,
&mut self.difficulty, &mut self.difficulty,
&mut self.timestamp, &mut self.timestamp,
&mut self.total, &mut self.total,
&mut self.weight, &mut self.weight,
&mut self.position,
&mut self.segwit_txs,
&mut self.segwit_size,
&mut self.segwit_weight,
] ]
.into_par_iter() .into_par_iter()
} }

View File

@@ -1,8 +1,8 @@
use brk_error::Result; use brk_error::Result;
use brk_traversable::Traversable; use brk_traversable::Traversable;
use brk_types::{ use brk_types::{
Height, RawLockTime, StoredBool, StoredU32, TxInIndex, TxIndex, TxOutIndex, TxVersion, Txid, BlkPosition, Height, RawLockTime, StoredBool, StoredU32, TxInIndex, TxIndex, TxOutIndex,
Version, TxVersion, Txid, Version,
}; };
use rayon::prelude::*; use rayon::prelude::*;
use vecdb::{ use vecdb::{
@@ -23,6 +23,8 @@ pub struct TransactionsVecs<M: StorageMode = Rw> {
pub is_explicitly_rbf: M::Stored<PcoVec<TxIndex, StoredBool>>, pub is_explicitly_rbf: M::Stored<PcoVec<TxIndex, StoredBool>>,
pub first_txin_index: M::Stored<PcoVec<TxIndex, TxInIndex>>, pub first_txin_index: M::Stored<PcoVec<TxIndex, TxInIndex>>,
pub first_txout_index: M::Stored<BytesVec<TxIndex, TxOutIndex>>, pub first_txout_index: M::Stored<BytesVec<TxIndex, TxOutIndex>>,
#[traversable(hidden)]
pub position: M::Stored<PcoVec<TxIndex, BlkPosition>>,
} }
pub struct TxMetadataVecs<'a> { pub struct TxMetadataVecs<'a> {
@@ -70,6 +72,7 @@ impl TransactionsVecs {
is_explicitly_rbf, is_explicitly_rbf,
first_txin_index, first_txin_index,
first_txout_index, first_txout_index,
position,
) = parallel_import! { ) = parallel_import! {
first_tx_index = PcoVec::forced_import(db, "first_tx_index", version), first_tx_index = PcoVec::forced_import(db, "first_tx_index", version),
height = PcoVec::forced_import(db, "height", version), height = PcoVec::forced_import(db, "height", version),
@@ -81,6 +84,7 @@ impl TransactionsVecs {
is_explicitly_rbf = PcoVec::forced_import(db, "is_explicitly_rbf", version), is_explicitly_rbf = PcoVec::forced_import(db, "is_explicitly_rbf", version),
first_txin_index = PcoVec::forced_import(db, "first_txin_index", version), first_txin_index = PcoVec::forced_import(db, "first_txin_index", version),
first_txout_index = BytesVec::forced_import(db, "first_txout_index", version), first_txout_index = BytesVec::forced_import(db, "first_txout_index", version),
position = PcoVec::forced_import(db, "tx_position", version),
}; };
Ok(Self { Ok(Self {
first_tx_index, first_tx_index,
@@ -93,6 +97,7 @@ impl TransactionsVecs {
is_explicitly_rbf, is_explicitly_rbf,
first_txin_index, first_txin_index,
first_txout_index, first_txout_index,
position,
}) })
} }
@@ -115,6 +120,8 @@ impl TransactionsVecs {
.truncate_if_needed_with_stamp(tx_index, stamp)?; .truncate_if_needed_with_stamp(tx_index, stamp)?;
self.first_txout_index self.first_txout_index
.truncate_if_needed_with_stamp(tx_index, stamp)?; .truncate_if_needed_with_stamp(tx_index, stamp)?;
self.position
.truncate_if_needed_with_stamp(tx_index, stamp)?;
Ok(()) Ok(())
} }
@@ -130,6 +137,7 @@ impl TransactionsVecs {
&mut self.is_explicitly_rbf, &mut self.is_explicitly_rbf,
&mut self.first_txin_index, &mut self.first_txin_index,
&mut self.first_txout_index, &mut self.first_txout_index,
&mut self.position,
] ]
.into_par_iter() .into_par_iter()
} }

View File

@@ -1,4 +1,4 @@
use brk_types::{FeeRate, MempoolEntryInfo, Sats, Txid, TxidPrefix, VSize}; use brk_types::{FeeRate, MempoolEntryInfo, Sats, Timestamp, Txid, TxidPrefix, VSize};
use smallvec::SmallVec; use smallvec::SmallVec;
/// A mempool transaction entry. /// A mempool transaction entry.
@@ -16,6 +16,8 @@ pub struct Entry {
pub ancestor_vsize: VSize, pub ancestor_vsize: VSize,
/// Parent txid prefixes (most txs have 0-2 parents) /// Parent txid prefixes (most txs have 0-2 parents)
pub depends: SmallVec<[TxidPrefix; 2]>, pub depends: SmallVec<[TxidPrefix; 2]>,
/// When this tx was first seen in the mempool
pub first_seen: Timestamp,
} }
impl Entry { impl Entry {
@@ -27,6 +29,7 @@ impl Entry {
ancestor_fee: info.ancestor_fee, ancestor_fee: info.ancestor_fee,
ancestor_vsize: VSize::from(info.ancestor_size), ancestor_vsize: VSize::from(info.ancestor_size),
depends: info.depends.iter().map(TxidPrefix::from).collect(), depends: info.depends.iter().map(TxidPrefix::from).collect(),
first_seen: Timestamp::now(),
} }
} }

View File

@@ -106,6 +106,10 @@ impl MempoolInner {
self.txs.read() self.txs.read()
} }
pub fn get_entries(&self) -> RwLockReadGuard<'_, EntryPool> {
self.entries.read()
}
pub fn get_addrs(&self) -> RwLockReadGuard<'_, AddrTracker> { pub fn get_addrs(&self) -> RwLockReadGuard<'_, AddrTracker> {
self.addrs.read() self.addrs.read()
} }

View File

@@ -1,20 +1,39 @@
use brk_types::{TxWithHex, Txid}; use brk_types::{MempoolRecentTx, TxWithHex, Txid};
use derive_more::Deref; use derive_more::Deref;
use rustc_hash::FxHashMap; use rustc_hash::FxHashMap;
const RECENT_CAP: usize = 10;
/// Store of full transaction data for API access. /// Store of full transaction data for API access.
#[derive(Default, Deref)] #[derive(Default, Deref)]
pub struct TxStore(FxHashMap<Txid, TxWithHex>); pub struct TxStore {
#[deref]
txs: FxHashMap<Txid, TxWithHex>,
recent: Vec<MempoolRecentTx>,
}
impl TxStore { impl TxStore {
/// Check if a transaction exists. /// Check if a transaction exists.
pub fn contains(&self, txid: &Txid) -> bool { pub fn contains(&self, txid: &Txid) -> bool {
self.0.contains_key(txid) self.txs.contains_key(txid)
} }
/// Add transactions in bulk. /// Add transactions in bulk.
pub fn extend(&mut self, txs: FxHashMap<Txid, TxWithHex>) { pub fn extend(&mut self, txs: FxHashMap<Txid, TxWithHex>) {
self.0.extend(txs); let mut new: Vec<_> = txs
.iter()
.take(RECENT_CAP)
.map(|(txid, tx_hex)| MempoolRecentTx::from((txid, tx_hex.tx())))
.collect();
let keep = RECENT_CAP.saturating_sub(new.len());
new.extend(self.recent.drain(..keep.min(self.recent.len())));
self.recent = new;
self.txs.extend(txs);
}
/// Last 10 transactions to enter the mempool.
pub fn recent(&self) -> &[MempoolRecentTx] {
&self.recent
} }
/// Keep items matching predicate, call `on_remove` for each removed item. /// Keep items matching predicate, call `on_remove` for each removed item.
@@ -23,7 +42,7 @@ impl TxStore {
K: FnMut(&Txid) -> bool, K: FnMut(&Txid) -> bool,
R: FnMut(&Txid, &TxWithHex), R: FnMut(&Txid, &TxWithHex),
{ {
self.0.retain(|txid, tx| { self.txs.retain(|txid, tx| {
if keep(txid) { if keep(txid) {
true true
} else { } else {

View File

@@ -1,12 +1,17 @@
use bitcoin::consensus::Decodable;
use bitcoin::hex::DisplayHex;
use brk_error::{Error, Result}; use brk_error::{Error, Result};
use brk_types::{ use brk_types::{
BlockExtras, BlockHash, BlockHashPrefix, BlockInfo, BlockPool, Height, TxIndex, pools, BlockExtras, BlockHash, BlockHashPrefix, BlockHeader, BlockInfo, BlockInfoV1, BlockPool,
FeeRate, Height, Sats, Timestamp, TxIndex, VSize, pools,
}; };
use vecdb::{AnyVec, ReadableVec, VecIndex}; use vecdb::{AnyVec, ReadableVec, VecIndex};
use crate::Query; use crate::Query;
const DEFAULT_BLOCK_COUNT: u32 = 10; const DEFAULT_BLOCK_COUNT: u32 = 10;
const DEFAULT_V1_BLOCK_COUNT: u32 = 15;
const HEADER_SIZE: usize = 80;
impl Query { impl Query {
pub fn block(&self, hash: &BlockHash) -> Result<BlockInfo> { pub fn block(&self, hash: &BlockHash) -> Result<BlockInfo> {
@@ -15,58 +20,70 @@ impl Query {
} }
pub fn block_by_height(&self, height: Height) -> Result<BlockInfo> { pub fn block_by_height(&self, height: Height) -> Result<BlockInfo> {
let indexer = self.indexer();
let max_height = self.max_height(); let max_height = self.max_height();
if height > max_height { if height > max_height {
return Err(Error::OutOfRange("Block height out of range".into())); return Err(Error::OutOfRange("Block height out of range".into()));
} }
self.blocks_range(height.to_usize(), height.to_usize() + 1)?
.pop()
.ok_or(Error::NotFound("Block not found".into()))
}
let blockhash = indexer.vecs.blocks.blockhash.read_once(height)?; pub fn block_by_height_v1(&self, height: Height) -> Result<BlockInfoV1> {
let difficulty = indexer.vecs.blocks.difficulty.collect_one(height).unwrap(); let max_height = self.max_height();
let timestamp = indexer.vecs.blocks.timestamp.collect_one(height).unwrap(); if height > max_height {
let size = indexer.vecs.blocks.total.collect_one(height).unwrap(); return Err(Error::OutOfRange("Block height out of range".into()));
let weight = indexer.vecs.blocks.weight.collect_one(height).unwrap(); }
let tx_count = self.tx_count_at_height(height, max_height)?; self.blocks_v1_range(height.to_usize(), height.to_usize() + 1)?
.pop()
.ok_or(Error::NotFound("Block not found".into()))
}
Ok(BlockInfo { pub fn block_header_hex(&self, hash: &BlockHash) -> Result<String> {
id: blockhash, let height = self.height_by_hash(hash)?;
height, let header = self.read_block_header(height)?;
tx_count, Ok(bitcoin::consensus::encode::serialize_hex(&header))
size: *size, }
weight,
timestamp, pub fn block_hash_by_height(&self, height: Height) -> Result<BlockHash> {
difficulty: *difficulty, let max_height = self.max_height();
}) if height > max_height {
return Err(Error::OutOfRange("Block height out of range".into()));
}
Ok(self.indexer().vecs.blocks.blockhash.read_once(height)?)
} }
pub fn blocks(&self, start_height: Option<Height>) -> Result<Vec<BlockInfo>> { pub fn blocks(&self, start_height: Option<Height>) -> Result<Vec<BlockInfo>> {
let max_height = self.indexed_height(); let (begin, end) = self.resolve_block_range(start_height, DEFAULT_BLOCK_COUNT);
self.blocks_range(begin, end)
}
let start = start_height.unwrap_or(max_height); pub fn blocks_v1(&self, start_height: Option<Height>) -> Result<Vec<BlockInfoV1>> {
let start = start.min(max_height); let (begin, end) = self.resolve_block_range(start_height, DEFAULT_V1_BLOCK_COUNT);
self.blocks_v1_range(begin, end)
}
let start_u32: u32 = start.into(); // === Range queries (bulk reads) ===
let count = DEFAULT_BLOCK_COUNT.min(start_u32 + 1) as usize;
if count == 0 { fn blocks_range(&self, begin: usize, end: usize) -> Result<Vec<BlockInfo>> {
if begin >= end {
return Ok(Vec::new()); return Ok(Vec::new());
} }
let indexer = self.indexer(); let indexer = self.indexer();
let computer = self.computer(); let computer = self.computer();
let reader = self.reader();
// Batch-read all PcoVec data for the contiguous range (avoids // Bulk read all indexed data
// per-block page decompression — 4 reads instead of 4*count). let blockhashes = indexer.vecs.blocks.blockhash.collect_range_at(begin, end);
let end = start_u32 as usize + 1;
let begin = end - count;
let difficulties = indexer.vecs.blocks.difficulty.collect_range_at(begin, end); let difficulties = indexer.vecs.blocks.difficulty.collect_range_at(begin, end);
let timestamps = indexer.vecs.blocks.timestamp.collect_range_at(begin, end); let timestamps = indexer.vecs.blocks.timestamp.collect_range_at(begin, end);
let sizes = indexer.vecs.blocks.total.collect_range_at(begin, end); let sizes = indexer.vecs.blocks.total.collect_range_at(begin, end);
let weights = indexer.vecs.blocks.weight.collect_range_at(begin, end); let weights = indexer.vecs.blocks.weight.collect_range_at(begin, end);
let positions = indexer.vecs.blocks.position.collect_range_at(begin, end);
// Batch-read first_tx_index for tx_count computation (need one extra for next boundary) // Bulk read tx indexes for tx_count
let max_height = self.indexed_height();
let tx_index_end = if end <= max_height.to_usize() { let tx_index_end = if end <= max_height.to_usize() {
end + 1 end + 1
} else { } else {
@@ -79,24 +96,39 @@ impl Query {
.collect_range_at(begin, tx_index_end); .collect_range_at(begin, tx_index_end);
let total_txs = computer.indexes.tx_index.identity.len(); let total_txs = computer.indexes.tx_index.identity.len();
// Bulk read median time window
let median_start = begin.saturating_sub(10);
let median_timestamps: Vec<Timestamp> = indexer
.vecs
.blocks
.timestamp
.collect_range_at(median_start, end);
let count = end - begin;
let mut blocks = Vec::with_capacity(count); let mut blocks = Vec::with_capacity(count);
for i in (0..count).rev() { for i in (0..count).rev() {
let height = Height::from(begin + i); let raw_header = reader.read_raw_bytes(positions[i], HEADER_SIZE)?;
let blockhash = indexer.vecs.blocks.blockhash.read_once(height)?; let header = Self::decode_header(&raw_header)?;
let tx_count = if i + 1 < first_tx_indexes.len() { let tx_count = if i + 1 < first_tx_indexes.len() {
first_tx_indexes[i + 1].to_usize() - first_tx_indexes[i].to_usize() (first_tx_indexes[i + 1].to_usize() - first_tx_indexes[i].to_usize()) as u32
} else { } else {
total_txs - first_tx_indexes[i].to_usize() (total_txs - first_tx_indexes[i].to_usize()) as u32
}; };
let median_time =
Self::compute_median_time(&median_timestamps, begin + i, median_start);
blocks.push(BlockInfo { blocks.push(BlockInfo {
id: blockhash, id: blockhashes[i].clone(),
height, height: Height::from(begin + i),
tx_count: tx_count as u32, header,
timestamp: timestamps[i],
tx_count,
size: *sizes[i], size: *sizes[i],
weight: weights[i], weight: weights[i],
timestamp: timestamps[i], median_time,
difficulty: *difficulties[i], difficulty: *difficulties[i],
}); });
} }
@@ -104,13 +136,254 @@ impl Query {
Ok(blocks) Ok(blocks)
} }
pub(crate) fn blocks_v1_range(&self, begin: usize, end: usize) -> Result<Vec<BlockInfoV1>> {
if begin >= end {
return Ok(Vec::new());
}
let count = end - begin;
let indexer = self.indexer();
let computer = self.computer();
let reader = self.reader();
let all_pools = pools();
// Bulk read all indexed data
let blockhashes = indexer.vecs.blocks.blockhash.collect_range_at(begin, end);
let difficulties = indexer.vecs.blocks.difficulty.collect_range_at(begin, end);
let timestamps = indexer.vecs.blocks.timestamp.collect_range_at(begin, end);
let sizes = indexer.vecs.blocks.total.collect_range_at(begin, end);
let weights = indexer.vecs.blocks.weight.collect_range_at(begin, end);
let positions = indexer.vecs.blocks.position.collect_range_at(begin, end);
let pool_slugs = computer.pools.pool.collect_range_at(begin, end);
// Bulk read tx indexes
let max_height = self.indexed_height();
let tx_index_end = if end <= max_height.to_usize() {
end + 1
} else {
end
};
let first_tx_indexes: Vec<TxIndex> = indexer
.vecs
.transactions
.first_tx_index
.collect_range_at(begin, tx_index_end);
let total_txs = computer.indexes.tx_index.identity.len();
// Bulk read segwit stats
let segwit_txs = indexer.vecs.blocks.segwit_txs.collect_range_at(begin, end);
let segwit_sizes = indexer.vecs.blocks.segwit_size.collect_range_at(begin, end);
let segwit_weights = indexer
.vecs
.blocks
.segwit_weight
.collect_range_at(begin, end);
// Bulk read extras data
let fee_sats = computer
.mining
.rewards
.fees
.block
.sats
.collect_range_at(begin, end);
let subsidy_sats = computer
.mining
.rewards
.subsidy
.block
.sats
.collect_range_at(begin, end);
let input_counts = computer.inputs.count.sum.collect_range_at(begin, end);
let output_counts = computer
.outputs
.count
.total
.sum
.collect_range_at(begin, end);
let utxo_set_sizes = computer
.outputs
.count
.unspent
.height
.collect_range_at(begin, end);
let input_volumes = computer
.transactions
.volume
.transfer_volume
.block
.sats
.collect_range_at(begin, end);
let output_volumes = computer
.mining
.rewards
.output_volume
.collect_range_at(begin, end);
// Bulk read effective fee rate distribution (accounts for CPFP)
let frd = &computer
.transactions
.fees
.effective_fee_rate
.distribution
.block;
let fr_min = frd.min.height.collect_range_at(begin, end);
let fr_pct10 = frd.pct10.height.collect_range_at(begin, end);
let fr_pct25 = frd.pct25.height.collect_range_at(begin, end);
let fr_median = frd.median.height.collect_range_at(begin, end);
let fr_pct75 = frd.pct75.height.collect_range_at(begin, end);
let fr_pct90 = frd.pct90.height.collect_range_at(begin, end);
let fr_max = frd.max.height.collect_range_at(begin, end);
// Bulk read fee amount distribution (sats)
let fad = &computer.transactions.fees.fee.distribution.block;
let fa_min = fad.min.height.collect_range_at(begin, end);
let fa_pct10 = fad.pct10.height.collect_range_at(begin, end);
let fa_pct25 = fad.pct25.height.collect_range_at(begin, end);
let fa_median = fad.median.height.collect_range_at(begin, end);
let fa_pct75 = fad.pct75.height.collect_range_at(begin, end);
let fa_pct90 = fad.pct90.height.collect_range_at(begin, end);
let fa_max = fad.max.height.collect_range_at(begin, end);
// Bulk read tx positions range covering all coinbase txs (first tx of each block)
let tx_pos_begin = first_tx_indexes[0].to_usize();
let tx_pos_end = first_tx_indexes[count - 1].to_usize() + 1;
let all_tx_positions = indexer
.vecs
.transactions
.position
.collect_range_at(tx_pos_begin, tx_pos_end);
// Bulk read median time window
let median_start = begin.saturating_sub(10);
let median_timestamps = indexer
.vecs
.blocks
.timestamp
.collect_range_at(median_start, end);
let mut blocks = Vec::with_capacity(count);
for i in (0..count).rev() {
let raw_header = reader.read_raw_bytes(positions[i], HEADER_SIZE)?;
let header = Self::decode_header(&raw_header)?;
let tx_count = if i + 1 < first_tx_indexes.len() {
(first_tx_indexes[i + 1].to_usize() - first_tx_indexes[i].to_usize()) as u32
} else {
(total_txs - first_tx_indexes[i].to_usize()) as u32
};
let weight = weights[i];
let size = *sizes[i];
let total_fees = fee_sats[i];
let subsidy = subsidy_sats[i];
let total_inputs = (*input_counts[i]).saturating_sub(1);
let total_outputs = *output_counts[i];
let vsize = weight.to_vbytes_ceil();
let total_fees_u64 = u64::from(total_fees);
let non_coinbase = tx_count.saturating_sub(1) as u64;
let pool_slug = pool_slugs[i];
let pool = all_pools.get(pool_slug);
let (
coinbase_raw,
coinbase_address,
coinbase_addresses,
coinbase_signature,
coinbase_signature_ascii,
) = Self::parse_coinbase_tx(
reader,
all_tx_positions[first_tx_indexes[i].to_usize() - tx_pos_begin],
);
let median_time =
Self::compute_median_time(&median_timestamps, begin + i, median_start);
let info = BlockInfo {
id: blockhashes[i].clone(),
height: Height::from(begin + i),
header,
timestamp: timestamps[i],
tx_count,
size,
weight,
median_time,
difficulty: *difficulties[i],
};
let total_input_amt = input_volumes[i];
let total_output_amt = output_volumes[i];
let extras = BlockExtras {
total_fees,
median_fee: fr_median[i],
fee_range: [
fr_min[i],
fr_pct10[i],
fr_pct25[i],
fr_median[i],
fr_pct75[i],
fr_pct90[i],
fr_max[i],
],
reward: subsidy + total_fees,
pool: BlockPool {
id: pool.unique_id(),
name: pool.name.to_string(),
slug: pool_slug,
},
avg_fee: Sats::from(if non_coinbase > 0 {
total_fees_u64 / non_coinbase
} else {
0
}),
avg_fee_rate: FeeRate::from((total_fees, VSize::from(vsize))),
coinbase_raw,
coinbase_address,
coinbase_addresses,
coinbase_signature,
coinbase_signature_ascii,
avg_tx_size: if tx_count > 0 {
size as f64 / tx_count as f64
} else {
0.0
},
total_inputs,
total_outputs,
total_output_amt,
median_fee_amt: fa_median[i],
fee_percentiles: [
fa_min[i],
fa_pct10[i],
fa_pct25[i],
fa_median[i],
fa_pct75[i],
fa_pct90[i],
fa_max[i],
],
segwit_total_txs: *segwit_txs[i],
segwit_total_size: *segwit_sizes[i],
segwit_total_weight: segwit_weights[i],
header: raw_header.to_lower_hex_string(),
utxo_set_change: total_outputs as i64 - total_inputs as i64,
utxo_set_size: *utxo_set_sizes[i],
total_input_amt,
virtual_size: vsize as f64,
};
blocks.push(BlockInfoV1 { info, extras });
}
Ok(blocks)
}
// === Helper methods === // === Helper methods ===
pub fn height_by_hash(&self, hash: &BlockHash) -> Result<Height> { pub fn height_by_hash(&self, hash: &BlockHash) -> Result<Height> {
let indexer = self.indexer(); let indexer = self.indexer();
let prefix = BlockHashPrefix::from(hash); let prefix = BlockHashPrefix::from(hash);
indexer indexer
.stores .stores
.blockhash_prefix_to_height .blockhash_prefix_to_height
@@ -119,31 +392,103 @@ impl Query {
.ok_or(Error::NotFound("Block not found".into())) .ok_or(Error::NotFound("Block not found".into()))
} }
pub fn read_block_header(&self, height: Height) -> Result<bitcoin::block::Header> {
let position = self
.indexer()
.vecs
.blocks
.position
.collect_one(height)
.unwrap();
let raw = self.reader().read_raw_bytes(position, HEADER_SIZE)?;
bitcoin::block::Header::consensus_decode(&mut raw.as_slice())
.map_err(|_| Error::Internal("Failed to decode block header"))
}
fn max_height(&self) -> Height { fn max_height(&self) -> Height {
Height::from(self.indexer().vecs.blocks.blockhash.len().saturating_sub(1)) Height::from(self.indexer().vecs.blocks.blockhash.len().saturating_sub(1))
} }
fn tx_count_at_height(&self, height: Height, max_height: Height) -> Result<u32> { fn resolve_block_range(&self, start_height: Option<Height>, count: u32) -> (usize, usize) {
let indexer = self.indexer(); let max_height = self.height();
let computer = self.computer(); let start = start_height.unwrap_or(max_height).min(max_height);
let start_u32: u32 = start.into();
let count = count.min(start_u32 + 1) as usize;
let end = start_u32 as usize + 1;
let begin = end - count;
(begin, end)
}
let first_tx_index = indexer fn decode_header(bytes: &[u8]) -> Result<BlockHeader> {
.vecs let raw = bitcoin::block::Header::consensus_decode(&mut &bytes[..])
.transactions .map_err(|_| Error::Internal("Failed to decode block header"))?;
.first_tx_index Ok(BlockHeader::from(raw))
.collect_one(height) }
.unwrap();
let next_first_tx_index = if height < max_height { fn compute_median_time(
indexer all_timestamps: &[Timestamp],
.vecs height: usize,
.transactions window_start: usize,
.first_tx_index ) -> Timestamp {
.collect_one(height.incremented()) let rel_start = height.saturating_sub(10) - window_start;
.unwrap() let rel_end = height + 1 - window_start;
} else { let mut sorted: Vec<usize> = all_timestamps[rel_start..rel_end]
TxIndex::from(computer.indexes.tx_index.identity.len()) .iter()
.map(|t| usize::from(*t))
.collect();
sorted.sort_unstable();
Timestamp::from(sorted[sorted.len() / 2])
}
fn parse_coinbase_tx(
reader: &brk_reader::Reader,
position: brk_types::BlkPosition,
) -> (String, Option<String>, Vec<String>, String, String) {
let raw_bytes = match reader.read_raw_bytes(position, 1000) {
Ok(bytes) => bytes,
Err(_) => return (String::new(), None, vec![], String::new(), String::new()),
}; };
Ok((next_first_tx_index.to_usize() - first_tx_index.to_usize()) as u32) let tx = match bitcoin::Transaction::consensus_decode(&mut raw_bytes.as_slice()) {
Ok(tx) => tx,
Err(_) => return (String::new(), None, vec![], String::new(), String::new()),
};
let coinbase_raw = tx
.input
.first()
.map(|input| input.script_sig.as_bytes().to_lower_hex_string())
.unwrap_or_default();
let coinbase_signature_ascii = tx
.input
.first()
.map(|input| String::from_utf8_lossy(input.script_sig.as_bytes()).to_string())
.unwrap_or_default();
let coinbase_addresses: Vec<String> = tx
.output
.iter()
.filter_map(|output| {
bitcoin::Address::from_script(&output.script_pubkey, bitcoin::Network::Bitcoin)
.ok()
.map(|a| a.to_string())
})
.collect();
let coinbase_address = coinbase_addresses.first().cloned();
let coinbase_signature = tx
.output
.first()
.map(|output| output.script_pubkey.to_asm_string())
.unwrap_or_default();
(
coinbase_raw,
coinbase_address,
coinbase_addresses,
coinbase_signature,
coinbase_signature_ascii,
)
} }
} }

View File

@@ -12,7 +12,6 @@ impl Query {
fn block_raw_by_height(&self, height: Height) -> Result<Vec<u8>> { fn block_raw_by_height(&self, height: Height) -> Result<Vec<u8>> {
let indexer = self.indexer(); let indexer = self.indexer();
let computer = self.computer();
let reader = self.reader(); let reader = self.reader();
let max_height = Height::from(indexer.vecs.blocks.blockhash.len().saturating_sub(1)); let max_height = Height::from(indexer.vecs.blocks.blockhash.len().saturating_sub(1));
@@ -20,7 +19,7 @@ impl Query {
return Err(Error::OutOfRange("Block height out of range".into())); return Err(Error::OutOfRange("Block height out of range".into()));
} }
let position = computer.positions.block.collect_one(height).unwrap(); let position = indexer.vecs.blocks.position.collect_one(height).unwrap();
let size = indexer.vecs.blocks.total.collect_one(height).unwrap(); let size = indexer.vecs.blocks.total.collect_one(height).unwrap();
reader.read_raw_bytes(position, *size as usize) reader.read_raw_bytes(position, *size as usize)

View File

@@ -23,7 +23,7 @@ impl Query {
// === Helper methods === // === Helper methods ===
fn block_txids_by_height(&self, height: Height) -> Result<Vec<Txid>> { pub(crate) fn block_txids_by_height(&self, height: Height) -> Result<Vec<Txid>> {
let indexer = self.indexer(); let indexer = self.indexer();
let max_height = self.indexed_height(); let max_height = self.indexed_height();

View File

@@ -1,5 +1,8 @@
use brk_error::{Error, Result}; use brk_error::{Error, Result};
use brk_types::{MempoolBlock, MempoolInfo, RecommendedFees, Txid}; use brk_types::{
CpfpEntry, CpfpInfo, MempoolBlock, MempoolInfo, MempoolRecentTx, RecommendedFees, Txid,
TxidParam, TxidPrefix, Weight,
};
use crate::Query; use crate::Query;
@@ -40,4 +43,67 @@ impl Query {
Ok(blocks) Ok(blocks)
} }
pub fn mempool_recent(&self) -> Result<Vec<MempoolRecentTx>> {
let mempool = self.mempool().ok_or(Error::MempoolNotAvailable)?;
Ok(mempool.get_txs().recent().to_vec())
}
pub fn cpfp(&self, TxidParam { txid }: TxidParam) -> Result<CpfpInfo> {
let mempool = self.mempool().ok_or(Error::MempoolNotAvailable)?;
let entries = mempool.get_entries();
let prefix = TxidPrefix::from(&txid);
let entry = entries
.get(&prefix)
.ok_or(Error::NotFound("Transaction not in mempool".into()))?;
// Ancestors: walk up the depends chain
let mut ancestors = Vec::new();
let mut stack: Vec<TxidPrefix> = entry.depends.to_vec();
while let Some(p) = stack.pop() {
if let Some(anc) = entries.get(&p) {
ancestors.push(CpfpEntry {
txid: anc.txid.clone(),
weight: Weight::from(anc.vsize),
fee: anc.fee,
});
stack.extend(anc.depends.iter().cloned());
}
}
// Descendants: find entries that depend on this tx's prefix
let mut descendants = Vec::new();
for e in entries.entries().iter().flatten() {
if e.depends.contains(&prefix) {
descendants.push(CpfpEntry {
txid: e.txid.clone(),
weight: Weight::from(e.vsize),
fee: e.fee,
});
}
}
let effective_fee_per_vsize = entry.effective_fee_rate();
Ok(CpfpInfo {
ancestors,
descendants,
effective_fee_per_vsize,
})
}
pub fn transaction_times(&self, txids: &[Txid]) -> Result<Vec<u64>> {
let mempool = self.mempool().ok_or(Error::MempoolNotAvailable)?;
let entries = mempool.get_entries();
Ok(txids
.iter()
.map(|txid| {
entries
.get(&TxidPrefix::from(txid))
.map(|e| usize::from(e.first_seen) as u64)
.unwrap_or(0)
})
.collect())
}
} }

View File

@@ -1,7 +1,7 @@
use brk_error::{Error, Result}; use brk_error::{Error, Result};
use brk_types::{ use brk_types::{
Height, PoolBlockCounts, PoolBlockShares, PoolDetail, PoolDetailInfo, PoolInfo, PoolSlug, BlockInfoV1, Height, PoolBlockCounts, PoolBlockShares, PoolDetail, PoolDetailInfo,
PoolStats, PoolsSummary, TimePeriod, pools, PoolHashrateEntry, PoolInfo, PoolSlug, PoolStats, PoolsSummary, TimePeriod, pools,
}; };
use vecdb::{AnyVec, ReadableVec, VecIndex}; use vecdb::{AnyVec, ReadableVec, VecIndex};
@@ -177,4 +177,132 @@ impl Query {
reported_hashrate: None, reported_hashrate: None,
}) })
} }
pub fn pool_blocks(
&self,
slug: PoolSlug,
start_height: Option<Height>,
) -> Result<Vec<BlockInfoV1>> {
let computer = self.computer();
let max_height = self.height().to_usize();
let start = start_height.map(|h| h.to_usize()).unwrap_or(max_height);
// BytesVec reader gives O(1) mmap reads — efficient for backward scan
let reader = computer.pools.pool.reader();
let end = start.min(reader.len().saturating_sub(1));
let mut heights = Vec::with_capacity(10);
for h in (0..=end).rev() {
if reader.get(h) == slug {
heights.push(h);
if heights.len() >= 10 {
break;
}
}
}
let mut blocks = Vec::with_capacity(heights.len());
for h in heights {
if let Ok(mut v) = self.blocks_v1_range(h, h + 1) {
blocks.append(&mut v);
}
}
Ok(blocks)
}
pub fn pool_hashrate(&self, slug: PoolSlug) -> Result<Vec<PoolHashrateEntry>> {
let pools_list = pools();
let pool = pools_list.get(slug);
let entries = self.compute_pool_hashrate_entries(slug, 0)?;
Ok(entries
.into_iter()
.map(|(ts, hr, share)| PoolHashrateEntry {
timestamp: ts,
avg_hashrate: hr,
share,
pool_name: pool.name.to_string(),
})
.collect())
}
pub fn pools_hashrate(
&self,
time_period: Option<TimePeriod>,
) -> Result<Vec<PoolHashrateEntry>> {
let current_height = self.height().to_usize();
let start = match time_period {
Some(tp) => current_height.saturating_sub(tp.block_count()),
None => 0,
};
let pools_list = pools();
let mut entries = Vec::new();
for pool in pools_list.iter() {
if let Ok(pool_entries) = self.compute_pool_hashrate_entries(pool.slug, start) {
for (ts, hr, share) in pool_entries {
if share > 0.0 {
entries.push(PoolHashrateEntry {
timestamp: ts,
avg_hashrate: hr,
share,
pool_name: pool.name.to_string(),
});
}
}
}
}
Ok(entries)
}
/// Compute (timestamp, hashrate, share) tuples for a pool from `start_height`.
fn compute_pool_hashrate_entries(
&self,
slug: PoolSlug,
start_height: usize,
) -> Result<Vec<(brk_types::Timestamp, u128, f64)>> {
let computer = self.computer();
let indexer = self.indexer();
let end = self.height().to_usize() + 1;
let start = start_height;
let dominance_bps = computer
.pools
.major
.get(&slug)
.map(|v| &v.base.dominance.bps.height)
.or_else(|| {
computer
.pools
.minor
.get(&slug)
.map(|v| &v.dominance.bps.height)
})
.ok_or_else(|| Error::NotFound("Pool not found".into()))?;
let total = end - start;
let step = (total / 200).max(1);
// Batch read everything for the range
let timestamps = indexer.vecs.blocks.timestamp.collect_range_at(start, end);
let bps_values = dominance_bps.collect_range_at(start, end);
let day1_values = computer.indexes.height.day1.collect_range_at(start, end);
let hashrate_vec = &computer.mining.hashrate.rate.base.day1;
// Pre-read all needed hashrates by collecting unique day1 values
let max_day = day1_values.iter().map(|d| d.to_usize()).max().unwrap_or(0);
let min_day = day1_values.iter().map(|d| d.to_usize()).min().unwrap_or(0);
let hashrates = hashrate_vec.collect_range_dyn(min_day, max_day + 1);
Ok((0..total)
.step_by(step)
.filter_map(|i| {
let bps = *bps_values[i];
let share = bps as f64 / 10000.0;
let day_idx = day1_values[i].to_usize() - min_day;
let network_hr = f64::from(*hashrates.get(day_idx)?.as_ref()?);
Some((timestamps[i], (network_hr * share) as u128, share))
})
.collect())
}
} }

View File

@@ -1,5 +1,6 @@
use brk_error::Result; use brk_error::Result;
use brk_types::Dollars; use brk_types::{Dollars, ExchangeRates, HistoricalPrice, HistoricalPriceEntry, Timestamp};
use vecdb::{ReadableVec, VecIndex};
use crate::Query; use crate::Query;
@@ -18,4 +19,40 @@ impl Query {
Ok(oracle.price_dollars()) Ok(oracle.price_dollars())
} }
pub fn historical_price(&self, timestamp: Option<Timestamp>) -> Result<HistoricalPrice> {
let indexer = self.indexer();
let computer = self.computer();
let max_height = self.height().to_usize();
let end = max_height + 1;
let timestamps = indexer.vecs.blocks.timestamp.collect();
let all_prices = computer.prices.spot.cents.height.collect();
let prices = if let Some(target_ts) = timestamp {
let target = usize::from(target_ts);
let h = timestamps
.binary_search_by_key(&target, |t| usize::from(*t))
.unwrap_or_else(|i| i.min(max_height));
vec![HistoricalPriceEntry {
time: usize::from(timestamps[h]) as u64,
usd: Dollars::from(all_prices[h]),
}]
} else {
let step = (max_height / 200).max(1);
(0..end)
.step_by(step)
.map(|h| HistoricalPriceEntry {
time: usize::from(timestamps[h]) as u64,
usd: Dollars::from(all_prices[h]),
})
.collect()
};
Ok(HistoricalPrice {
prices,
exchange_rates: ExchangeRates {},
})
}
} }

View File

@@ -3,8 +3,8 @@ use std::io::Cursor;
use bitcoin::{consensus::Decodable, hex::DisplayHex}; use bitcoin::{consensus::Decodable, hex::DisplayHex};
use brk_error::{Error, Result}; use brk_error::{Error, Result};
use brk_types::{ use brk_types::{
OutputType, Sats, Transaction, TxIn, TxInIndex, TxIndex, TxOut, TxOutspend, TxStatus, Txid, Height, MerkleProof, OutputType, Sats, Transaction, TxIn, TxInIndex, TxIndex, TxOut,
TxidParam, TxidPrefix, Vin, Vout, Weight, TxOutspend, TxStatus, Txid, TxidParam, TxidPrefix, Vin, Vout, Weight,
}; };
use vecdb::{ReadableVec, VecIndex}; use vecdb::{ReadableVec, VecIndex};
@@ -72,6 +72,20 @@ impl Query {
}) })
} }
pub fn transaction_raw(&self, TxidParam { txid }: TxidParam) -> Result<Vec<u8>> {
let prefix = TxidPrefix::from(&txid);
let indexer = self.indexer();
let Ok(Some(tx_index)) = indexer
.stores
.txid_prefix_to_tx_index
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
self.transaction_raw_by_index(tx_index)
}
pub fn transaction_hex(&self, TxidParam { txid }: TxidParam) -> Result<String> { pub fn transaction_hex(&self, TxidParam { txid }: TxidParam) -> Result<String> {
// First check mempool for unconfirmed transactions // First check mempool for unconfirmed transactions
if let Some(mempool) = self.mempool() if let Some(mempool) = self.mempool()
@@ -192,7 +206,6 @@ impl Query {
pub fn transaction_by_index(&self, tx_index: TxIndex) -> Result<Transaction> { pub fn transaction_by_index(&self, tx_index: TxIndex) -> Result<Transaction> {
let indexer = self.indexer(); let indexer = self.indexer();
let reader = self.reader(); let reader = self.reader();
let computer = self.computer();
// Get tx metadata using collect_one for PcoVec, read_once for BytesVec // Get tx metadata using collect_one for PcoVec, read_once for BytesVec
let txid = indexer.vecs.transactions.txid.read_once(tx_index)?; let txid = indexer.vecs.transactions.txid.read_once(tx_index)?;
@@ -226,7 +239,12 @@ impl Query {
.first_txin_index .first_txin_index
.collect_one(tx_index) .collect_one(tx_index)
.unwrap(); .unwrap();
let position = computer.positions.tx.collect_one(tx_index).unwrap(); let position = indexer
.vecs
.transactions
.position
.collect_one(tx_index)
.unwrap();
// Get block info for status // Get block info for status
let block_hash = indexer.vecs.blocks.blockhash.read_once(height)?; let block_hash = indexer.vecs.blocks.blockhash.read_once(height)?;
@@ -337,22 +355,15 @@ impl Query {
Ok(transaction) Ok(transaction)
} }
fn transaction_hex_by_index(&self, tx_index: TxIndex) -> Result<String> { fn transaction_raw_by_index(&self, tx_index: TxIndex) -> Result<Vec<u8>> {
let indexer = self.indexer(); let indexer = self.indexer();
let reader = self.reader(); let total_size = indexer.vecs.transactions.total_size.collect_one(tx_index).unwrap();
let computer = self.computer(); let position = indexer.vecs.transactions.position.collect_one(tx_index).unwrap();
self.reader().read_raw_bytes(position, *total_size as usize)
}
let total_size = indexer fn transaction_hex_by_index(&self, tx_index: TxIndex) -> Result<String> {
.vecs Ok(self.transaction_raw_by_index(tx_index)?.to_lower_hex_string())
.transactions
.total_size
.collect_one(tx_index)
.unwrap();
let position = computer.positions.tx.collect_one(tx_index).unwrap();
let buffer = reader.read_raw_bytes(position, *total_size as usize)?;
Ok(buffer.to_lower_hex_string())
} }
fn outspend_details(&self, txin_index: TxInIndex) -> Result<TxOutspend> { fn outspend_details(&self, txin_index: TxInIndex) -> Result<TxOutspend> {
@@ -407,4 +418,93 @@ impl Query {
}), }),
}) })
} }
fn resolve_tx(&self, txid: &Txid) -> Result<(TxIndex, Height)> {
let indexer = self.indexer();
let prefix = TxidPrefix::from(txid);
let tx_index: TxIndex = indexer
.stores
.txid_prefix_to_tx_index
.get(&prefix)?
.map(|cow| cow.into_owned())
.ok_or(Error::UnknownTxid)?;
let height: Height = indexer
.vecs
.transactions
.height
.collect_one(tx_index)
.unwrap();
Ok((tx_index, height))
}
pub fn broadcast_transaction(&self, hex: &str) -> Result<Txid> {
self.client().send_raw_transaction(hex)
}
pub fn merkleblock_proof(&self, txid_param: TxidParam) -> Result<String> {
let (_, height) = self.resolve_tx(&txid_param.txid)?;
let header = self.read_block_header(height)?;
let txids = self.block_txids_by_height(height)?;
let target: bitcoin::Txid = (&txid_param.txid).into();
let btxids: Vec<bitcoin::Txid> = txids.iter().map(bitcoin::Txid::from).collect();
let mb = bitcoin::MerkleBlock::from_header_txids_with_predicate(&header, &btxids, |t| {
*t == target
});
Ok(bitcoin::consensus::encode::serialize_hex(&mb))
}
pub fn merkle_proof(&self, txid_param: TxidParam) -> Result<MerkleProof> {
let (tx_index, height) = self.resolve_tx(&txid_param.txid)?;
let first_tx = self
.indexer()
.vecs
.transactions
.first_tx_index
.collect_one(height)
.ok_or(Error::NotFound("Block not found".into()))?;
let pos = tx_index.to_usize() - first_tx.to_usize();
let txids = self.block_txids_by_height(height)?;
Ok(MerkleProof {
block_height: height,
merkle: merkle_path(&txids, pos),
pos,
})
}
}
fn merkle_path(txids: &[Txid], pos: usize) -> Vec<String> {
use bitcoin::hashes::{Hash, sha256d};
// Txid bytes are in internal order (same layout as bitcoin::Txid)
let mut hashes: Vec<[u8; 32]> = txids
.iter()
.map(|t| bitcoin::Txid::from(t).to_byte_array())
.collect();
let mut proof = Vec::new();
let mut idx = pos;
while hashes.len() > 1 {
let sibling = if idx ^ 1 < hashes.len() { idx ^ 1 } else { idx };
// Display order: reverse bytes for hex output
let mut display = hashes[sibling];
display.reverse();
proof.push(bitcoin::hex::DisplayHex::to_lower_hex_string(&display));
hashes = hashes
.chunks(2)
.map(|pair| {
let right = pair.last().unwrap();
let mut combined = [0u8; 64];
combined[..32].copy_from_slice(&pair[0]);
combined[32..].copy_from_slice(right);
sha256d::Hash::hash(&combined).to_byte_array()
})
.collect();
idx /= 2;
}
proof
} }

View File

@@ -21,7 +21,7 @@ fn main() -> Result<()> {
if let Some(block) = reader.read(Some(height), Some(height)).iter().next() { if let Some(block) = reader.read(Some(height), Some(height)).iter().next() {
println!( println!(
"height={} hash={} txs={} coinbase=\"{}\" ({:?})", "height={} hash={} txs={} coinbase=\"{:?}\" ({:?})",
block.height(), block.height(),
block.hash(), block.hash(),
block.txdata.len(), block.txdata.len(),

View File

@@ -14,7 +14,7 @@ use bitcoin::{block::Header, consensus::Decodable};
use blk_index_to_blk_path::*; use blk_index_to_blk_path::*;
use brk_error::{Error, Result}; use brk_error::{Error, Result};
use brk_rpc::Client; use brk_rpc::Client;
use brk_types::{BlkMetadata, BlkPosition, BlockHash, Height, ReadBlock}; use brk_types::{BlkPosition, BlockHash, Height, ReadBlock};
pub use crossbeam::channel::Receiver; pub use crossbeam::channel::Receiver;
use crossbeam::channel::bounded; use crossbeam::channel::bounded;
use derive_more::Deref; use derive_more::Deref;
@@ -24,28 +24,17 @@ use tracing::{error, warn};
mod blk_index_to_blk_path; mod blk_index_to_blk_path;
mod decode; mod decode;
mod scan;
mod xor_bytes; mod xor_bytes;
mod xor_index; mod xor_index;
use decode::*; use decode::*;
use scan::*;
pub use xor_bytes::*; pub use xor_bytes::*;
pub use xor_index::*; pub use xor_index::*;
const MAGIC_BYTES: [u8; 4] = [249, 190, 180, 217];
const BOUND_CAP: usize = 50; const BOUND_CAP: usize = 50;
fn find_magic(bytes: &[u8], xor_i: &mut XORIndex, xor_bytes: XORBytes) -> Option<usize> {
let mut window = [0u8; 4];
for (i, &b) in bytes.iter().enumerate() {
window.rotate_left(1);
window[3] = xor_i.byte(b, xor_bytes);
if window == MAGIC_BYTES {
return Some(i + 1);
}
}
None
}
/// ///
/// Bitcoin BLK file reader /// Bitcoin BLK file reader
/// ///
@@ -117,10 +106,46 @@ impl ReaderInner {
Ok(buffer) Ok(buffer)
} }
/// Returns a receiver streaming `ReadBlock`s from `hash + 1` to the chain tip.
/// If `hash` is `None`, starts from genesis.
pub fn after(&self, hash: Option<BlockHash>) -> Result<Receiver<ReadBlock>> {
let start = if let Some(hash) = hash.as_ref() {
let info = self.client.get_block_header_info(hash)?;
Height::from(info.height + 1)
} else {
Height::ZERO
};
let end = self.client.get_last_height()?;
if end < start {
return Ok(bounded(0).1);
}
if *end - *start < 10 {
let mut blocks: Vec<_> = self.read_rev(Some(start), Some(end)).iter().collect();
blocks.reverse();
let (send, recv) = bounded(blocks.len());
for block in blocks {
let _ = send.send(block);
}
return Ok(recv);
}
Ok(self.read(Some(start), Some(end)))
}
/// Returns a crossbeam channel receiver that streams `ReadBlock`s in chain order. /// Returns a crossbeam channel receiver that streams `ReadBlock`s in chain order.
/// ///
/// Both `start` and `end` are inclusive. `None` means unbounded. /// Both `start` and `end` are inclusive. `None` means unbounded.
pub fn read(&self, start: Option<Height>, end: Option<Height>) -> Receiver<ReadBlock> { pub fn read(&self, start: Option<Height>, end: Option<Height>) -> Receiver<ReadBlock> {
if let (Some(s), Some(e)) = (start, end)
&& s > e
{
let (_, recv) = bounded(0);
return recv;
}
let client = self.client.clone(); let client = self.client.clone();
let (send_bytes, recv_bytes) = bounded(BOUND_CAP / 2); let (send_bytes, recv_bytes) = bounded(BOUND_CAP / 2);
@@ -151,53 +176,25 @@ impl ReaderInner {
thread::spawn(move || { thread::spawn(move || {
let _ = blk_index_to_blk_path.range(first_blk_index..).try_for_each( let _ = blk_index_to_blk_path.range(first_blk_index..).try_for_each(
move |(blk_index, blk_path)| { move |(blk_index, blk_path)| {
let mut xor_i = XORIndex::default(); let Ok(mut bytes) = fs::read(blk_path) else {
let blk_index = *blk_index;
let Ok(mut blk_bytes_) = fs::read(blk_path) else {
error!("Failed to read blk file: {}", blk_path.display()); error!("Failed to read blk file: {}", blk_path.display());
return ControlFlow::Break(()); return ControlFlow::Break(());
}; };
let blk_bytes = blk_bytes_.as_mut_slice(); let result = scan_bytes(
let mut i = 0; &mut bytes,
*blk_index,
loop { 0,
let Some(offset) = find_magic(&blk_bytes[i..], &mut xor_i, xor_bytes) xor_bytes,
else { |metadata, block_bytes, xor_i| {
break; if send_bytes.send((metadata, block_bytes, xor_i)).is_err() {
}; return ControlFlow::Break(());
i += offset; }
ControlFlow::Continue(())
if i + 4 > blk_bytes.len() { },
warn!("Truncated blk file {blk_index}: not enough bytes for block length at offset {i}"); );
break; if result.interrupted {
} return ControlFlow::Break(());
let len = u32::from_le_bytes(
xor_i
.bytes(&mut blk_bytes[i..(i + 4)], xor_bytes)
.try_into()
.unwrap(),
) as usize;
i += 4;
if i + len > blk_bytes.len() {
warn!("Truncated blk file {blk_index}: block at offset {} claims {len} bytes but only {} remain", i - 4, blk_bytes.len() - i);
break;
}
let position = BlkPosition::new(blk_index, i as u32);
let metadata = BlkMetadata::new(position, len as u32);
let block_bytes = (blk_bytes[i..(i + len)]).to_vec();
if send_bytes.send((metadata, block_bytes, xor_i)).is_err() {
return ControlFlow::Break(());
}
i += len;
xor_i.add_assign(len);
} }
ControlFlow::Continue(()) ControlFlow::Continue(())
}, },
); );
@@ -288,6 +285,83 @@ impl ReaderInner {
recv_ordered recv_ordered
} }
/// Streams `ReadBlock`s in reverse order (newest first) by scanning
/// `.blk` files from the tail. Efficient for reading recent blocks.
/// Both `start` and `end` are inclusive. `None` means unbounded.
pub fn read_rev(&self, start: Option<Height>, end: Option<Height>) -> Receiver<ReadBlock> {
const CHUNK: usize = 5 * 1024 * 1024;
if let (Some(s), Some(e)) = (start, end)
&& s > e
{
return bounded(0).1;
}
let client = self.client.clone();
let xor_bytes = self.xor_bytes;
let paths = BlkIndexToBlkPath::scan(&self.blocks_dir);
*self.blk_index_to_blk_path.write() = paths.clone();
let (send, recv) = bounded(BOUND_CAP);
thread::spawn(move || {
let mut head = Vec::new();
for (&blk_index, path) in paths.iter().rev() {
let file_len = fs::metadata(path).map(|m| m.len() as usize).unwrap_or(0);
if file_len == 0 {
continue;
}
let Ok(mut file) = File::open(path) else {
return;
};
let mut read_end = file_len;
while read_end > 0 {
let read_start = read_end.saturating_sub(CHUNK);
let chunk_len = read_end - read_start;
read_end = read_start;
let _ = file.seek(SeekFrom::Start(read_start as u64));
let mut buf = vec![0u8; chunk_len + head.len()];
if file.read_exact(&mut buf[..chunk_len]).is_err() {
return;
}
buf[chunk_len..].copy_from_slice(&head);
head.clear();
let mut blocks = Vec::new();
let result = scan_bytes(
&mut buf,
blk_index,
read_start,
xor_bytes,
|metadata, bytes, xor_i| {
if let Ok(Some(block)) = decode_block(
bytes, metadata, &client, xor_i, xor_bytes, start, end, 0, 0,
) {
blocks.push(block);
}
ControlFlow::Continue(())
},
);
for block in blocks.into_iter().rev() {
let done = start.is_some_and(|s| block.height() <= s);
if send.send(block).is_err() || done {
return;
}
}
if read_start > 0 {
head = buf[..result.first_magic.unwrap_or(buf.len())].to_vec();
}
}
}
});
recv
}
fn find_start_blk_index( fn find_start_blk_index(
&self, &self,
target_start: Option<Height>, target_start: Option<Height>,
@@ -298,18 +372,6 @@ impl ReaderInner {
return Ok(0); return Ok(0);
}; };
// If start is a very recent block we only look back X blk file before the last
if let Ok(height) = self.client.get_last_height()
&& (*height).saturating_sub(*target_start) <= 3
{
return Ok(blk_index_to_blk_path
.keys()
.rev()
.nth(2)
.copied()
.unwrap_or_default());
}
let blk_indices: Vec<u16> = blk_index_to_blk_path.keys().copied().collect(); let blk_indices: Vec<u16> = blk_index_to_blk_path.keys().copied().collect();
if blk_indices.is_empty() { if blk_indices.is_empty() {

View File

@@ -0,0 +1,73 @@
use std::ops::ControlFlow;
use brk_types::{BlkMetadata, BlkPosition};
use crate::{XORBytes, XORIndex};
const MAGIC_BYTES: [u8; 4] = [249, 190, 180, 217];
pub fn find_magic(bytes: &[u8], xor_i: &mut XORIndex, xor_bytes: XORBytes) -> Option<usize> {
let mut window = [0u8; 4];
for (i, &b) in bytes.iter().enumerate() {
window.rotate_left(1);
window[3] = xor_i.byte(b, xor_bytes);
if window == MAGIC_BYTES {
return Some(i + 1);
}
}
None
}
pub struct ScanResult {
pub first_magic: Option<usize>,
pub interrupted: bool,
}
/// Scans `buf` for blocks. `file_offset` is the absolute position of `buf[0]` in the file.
/// Calls `on_block` for each complete block found.
pub fn scan_bytes(
buf: &mut [u8],
blk_index: u16,
file_offset: usize,
xor_bytes: XORBytes,
mut on_block: impl FnMut(BlkMetadata, Vec<u8>, XORIndex) -> ControlFlow<()>,
) -> ScanResult {
let mut xor_i = XORIndex::default();
xor_i.add_assign(file_offset);
let mut first_magic = None;
let mut i = 0;
while let Some(off) = find_magic(&buf[i..], &mut xor_i, xor_bytes) {
let before = i;
i += off;
first_magic.get_or_insert(before + off.saturating_sub(4));
if i + 4 > buf.len() {
break;
}
let len = u32::from_le_bytes(
xor_i
.bytes(&mut buf[i..i + 4], xor_bytes)
.try_into()
.unwrap(),
) as usize;
i += 4;
if i + len > buf.len() {
break;
}
let position = BlkPosition::new(blk_index, (file_offset + i) as u32);
let metadata = BlkMetadata::new(position, len as u32);
if on_block(metadata, buf[i..i + len].to_vec(), xor_i).is_break() {
return ScanResult {
first_magic,
interrupted: true,
};
}
i += len;
xor_i.add_assign(len);
}
ScanResult {
first_magic,
interrupted: false,
}
}

View File

@@ -240,4 +240,8 @@ impl ClientInner {
) -> Result<String> { ) -> Result<String> {
Ok(self.call_with_retry(|c| c.get_raw_transaction_hex(txid, block_hash))?) Ok(self.call_with_retry(|c| c.get_raw_transaction_hex(txid, block_hash))?)
} }
pub fn send_raw_transaction(&self, hex: &str) -> Result<bitcoin::Txid> {
Ok(self.call_once(|c| c.send_raw_transaction(hex))?)
}
} }

View File

@@ -294,6 +294,14 @@ impl ClientInner {
})?; })?;
Ok(r) Ok(r)
} }
pub fn send_raw_transaction(&self, hex: &str) -> Result<bitcoin::Txid> {
let hex = hex.to_string();
Ok(self.call_with_retry(|c| {
let args = [serde_json::Value::String(hex.clone())];
c.call("sendrawtransaction", &args)
})?)
}
} }
// Local deserialization structs for raw RPC responses // Local deserialization structs for raw RPC responses

View File

@@ -232,6 +232,10 @@ impl Client {
.get_raw_transaction_hex(txid.into(), block_hash.map(|h| h.into())) .get_raw_transaction_hex(txid.into(), block_hash.map(|h| h.into()))
} }
pub fn send_raw_transaction(&self, hex: &str) -> Result<Txid> {
self.0.send_raw_transaction(hex).map(Txid::from)
}
/// Checks if a block is in the main chain (has positive confirmations) /// Checks if a block is in the main chain (has positive confirmations)
pub fn is_in_main_chain(&self, hash: &BlockHash) -> Result<bool> { pub fn is_in_main_chain(&self, hash: &BlockHash) -> Result<bool> {
let block_info = self.get_block_info(hash)?; let block_info = self.get_block_info(hash)?;

View File

@@ -5,8 +5,8 @@ use axum::{
}; };
use brk_query::BLOCK_TXS_PAGE_SIZE; use brk_query::BLOCK_TXS_PAGE_SIZE;
use brk_types::{ use brk_types::{
BlockHashParam, BlockHashStartIndex, BlockHashTxIndex, BlockInfo, BlockStatus, BlockTimestamp, BlockHash, BlockHashParam, BlockHashStartIndex, BlockHashTxIndex, BlockInfo, BlockInfoV1,
HeightParam, TimestampParam, Transaction, Txid, BlockStatus, BlockTimestamp, Height, HeightParam, Hex, TimestampParam, Transaction, Txid,
}; };
use crate::{CacheStrategy, extended::TransformResponseExtended}; use crate::{CacheStrategy, extended::TransformResponseExtended};
@@ -61,6 +61,46 @@ impl BlockRoutes for ApiRouter<AppState> {
}, },
), ),
) )
.api_route(
"/api/v1/blocks",
get_with(
async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| {
state
.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.blocks_v1(None))
.await
},
|op| {
op.id("get_blocks_v1")
.blocks_tag()
.summary("Recent blocks with extras")
.description("Retrieve the last 10 blocks with extended data including pool identification and fee statistics.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-blocks-v1)*")
.ok_response::<Vec<BlockInfoV1>>()
.not_modified()
.server_error()
},
),
)
.api_route(
"/api/v1/blocks/{height}",
get_with(
async |uri: Uri,
headers: HeaderMap,
Path(path): Path<HeightParam>,
State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.blocks_v1(Some(path.height))).await
},
|op| {
op.id("get_blocks_v1_from_height")
.blocks_tag()
.summary("Blocks from height with extras")
.description("Retrieve up to 10 blocks with extended data going backwards from the given height.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-blocks-v1)*")
.ok_response::<Vec<BlockInfoV1>>()
.not_modified()
.bad_request()
.server_error()
},
),
)
.api_route( .api_route(
"/api/block-height/{height}", "/api/block-height/{height}",
get_with( get_with(
@@ -68,16 +108,16 @@ impl BlockRoutes for ApiRouter<AppState> {
headers: HeaderMap, headers: HeaderMap,
Path(path): Path<HeightParam>, Path(path): Path<HeightParam>,
State(state): State<AppState>| { State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.block_by_height(path.height)).await state.cached_text(&headers, CacheStrategy::Height, &uri, move |q| q.block_hash_by_height(path.height).map(|h| h.to_string())).await
}, },
|op| { |op| {
op.id("get_block_by_height") op.id("get_block_by_height")
.blocks_tag() .blocks_tag()
.summary("Block by height") .summary("Block hash by height")
.description( .description(
"Retrieve block information by block height. Returns block metadata including hash, timestamp, difficulty, size, weight, and transaction count.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-height)*", "Retrieve the block hash at a given height. Returns the hash as plain text.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-height)*",
) )
.ok_response::<BlockInfo>() .ok_response::<BlockHash>()
.not_modified() .not_modified()
.bad_request() .bad_request()
.not_found() .not_found()
@@ -230,6 +270,79 @@ impl BlockRoutes for ApiRouter<AppState> {
}, },
), ),
) )
.api_route(
"/api/blocks/tip/height",
get_with(
async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| {
state.cached_text(&headers, CacheStrategy::Height, &uri, |q| Ok(q.height().to_string())).await
},
|op| {
op.id("get_block_tip_height")
.blocks_tag()
.summary("Block tip height")
.description("Returns the height of the last block.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-tip-height)*")
.ok_response::<Height>()
.not_modified()
.server_error()
},
),
)
.api_route(
"/api/blocks/tip/hash",
get_with(
async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| {
state.cached_text(&headers, CacheStrategy::Height, &uri, |q| q.block_hash_by_height(q.height()).map(|h| h.to_string())).await
},
|op| {
op.id("get_block_tip_hash")
.blocks_tag()
.summary("Block tip hash")
.description("Returns the hash of the last block.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-tip-hash)*")
.ok_response::<BlockHash>()
.not_modified()
.server_error()
},
),
)
.api_route(
"/api/block/{hash}/header",
get_with(
async |uri: Uri, headers: HeaderMap, Path(path): Path<BlockHashParam>, State(state): State<AppState>| {
state.cached_text(&headers, CacheStrategy::Height, &uri, move |q| q.block_header_hex(&path.hash)).await
},
|op| {
op.id("get_block_header")
.blocks_tag()
.summary("Block header")
.description("Returns the hex-encoded block header.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-header)*")
.ok_response::<Hex>()
.not_modified()
.not_found()
.server_error()
},
),
)
.api_route(
"/api/v1/block/{hash}",
get_with(
async |uri: Uri, headers: HeaderMap, Path(path): Path<BlockHashParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| {
let height = q.height_by_hash(&path.hash)?;
q.block_by_height_v1(height)
}).await
},
|op| {
op.id("get_block_v1")
.blocks_tag()
.summary("Block (v1)")
.description("Returns block details with extras by hash.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-v1)*")
.ok_response::<BlockInfoV1>()
.not_modified()
.not_found()
.server_error()
},
),
)
.api_route( .api_route(
"/api/v1/mining/blocks/timestamp/{timestamp}", "/api/v1/mining/blocks/timestamp/{timestamp}",
get_with( get_with(

View File

@@ -1,13 +1,14 @@
use aide::axum::{ApiRouter, routing::get_with}; use aide::axum::{ApiRouter, routing::get_with};
use axum::{ use axum::{
extract::State, extract::{Query, State},
http::{HeaderMap, Uri}, http::{HeaderMap, Uri},
response::Redirect,
routing::get,
}; };
use brk_types::{Dollars, MempoolBlock, MempoolInfo, RecommendedFees, Txid}; use brk_types::{
Dollars, HistoricalPrice, MempoolBlock, MempoolInfo, MempoolRecentTx, OptionalTimestampParam,
RecommendedFees, Txid,
};
use crate::extended::TransformResponseExtended; use crate::{CacheStrategy, extended::TransformResponseExtended};
use super::AppState; use super::AppState;
@@ -18,9 +19,8 @@ pub trait MempoolRoutes {
impl MempoolRoutes for ApiRouter<AppState> { impl MempoolRoutes for ApiRouter<AppState> {
fn add_mempool_routes(self) -> Self { fn add_mempool_routes(self) -> Self {
self self
.route("/api/mempool", get(Redirect::temporary("/api#tag/mempool")))
.api_route( .api_route(
"/api/mempool/info", "/api/mempool",
get_with( get_with(
async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| { async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| {
state.cached_json(&headers, state.mempool_cache(), &uri, |q| q.mempool_info()).await state.cached_json(&headers, state.mempool_cache(), &uri, |q| q.mempool_info()).await
@@ -51,6 +51,22 @@ impl MempoolRoutes for ApiRouter<AppState> {
}, },
), ),
) )
.api_route(
"/api/mempool/recent",
get_with(
async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| {
state.cached_json(&headers, state.mempool_cache(), &uri, |q| q.mempool_recent()).await
},
|op| {
op.id("get_mempool_recent")
.mempool_tag()
.summary("Recent mempool transactions")
.description("Get the last 10 transactions to enter the mempool.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-mempool-recent)*")
.ok_response::<Vec<MempoolRecentTx>>()
.server_error()
},
),
)
.api_route( .api_route(
"/api/mempool/price", "/api/mempool/price",
get_with( get_with(
@@ -87,6 +103,22 @@ impl MempoolRoutes for ApiRouter<AppState> {
}, },
), ),
) )
.api_route(
"/api/v1/fees/precise",
get_with(
async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| {
state.cached_json(&headers, state.mempool_cache(), &uri, |q| q.recommended_fees()).await
},
|op| {
op.id("get_precise_fees")
.mempool_tag()
.summary("Precise recommended fees")
.description("Get recommended fee rates with up to 3 decimal places, including sub-sat feerates.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-recommended-fees-precise)*")
.ok_response::<RecommendedFees>()
.server_error()
},
),
)
.api_route( .api_route(
"/api/v1/fees/mempool-blocks", "/api/v1/fees/mempool-blocks",
get_with( get_with(
@@ -103,5 +135,22 @@ impl MempoolRoutes for ApiRouter<AppState> {
}, },
), ),
) )
.api_route(
"/api/v1/historical-price",
get_with(
async |uri: Uri, headers: HeaderMap, Query(params): Query<OptionalTimestampParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.historical_price(params.timestamp)).await
},
|op| {
op.id("get_historical_price")
.mempool_tag()
.summary("Historical price")
.description("Get historical BTC/USD price. Optionally specify a UNIX timestamp to get the price at that time.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-historical-price)*")
.ok_response::<HistoricalPrice>()
.not_modified()
.server_error()
},
),
)
} }
} }

View File

@@ -6,8 +6,9 @@ use axum::{
routing::get, routing::get,
}; };
use brk_types::{ use brk_types::{
BlockCountParam, BlockFeesEntry, BlockRewardsEntry, BlockSizesWeights, DifficultyAdjustment, BlockCountParam, BlockFeesEntry, BlockInfoV1, BlockRewardsEntry, BlockSizesWeights,
DifficultyAdjustmentEntry, HashrateSummary, PoolDetail, PoolInfo, PoolSlugParam, PoolsSummary, DifficultyAdjustment, DifficultyAdjustmentEntry, HashrateSummary, PoolDetail,
PoolHashrateEntry, PoolInfo, PoolSlugAndHeightParam, PoolSlugParam, PoolsSummary,
RewardStats, TimePeriodParam, RewardStats, TimePeriodParam,
}; };
@@ -95,6 +96,94 @@ impl MiningRoutes for ApiRouter<AppState> {
}, },
), ),
) )
.api_route(
"/api/v1/mining/pool/{slug}/blocks",
get_with(
async |uri: Uri, headers: HeaderMap, Path(path): Path<PoolSlugParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.pool_blocks(path.slug, None)).await
},
|op| {
op.id("get_pool_blocks")
.mining_tag()
.summary("Mining pool blocks")
.description("Get the 10 most recent blocks mined by a specific pool.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-blocks)*")
.ok_response::<Vec<BlockInfoV1>>()
.not_modified()
.not_found()
.server_error()
},
),
)
.api_route(
"/api/v1/mining/pool/{slug}/blocks/{height}",
get_with(
async |uri: Uri, headers: HeaderMap, Path(PoolSlugAndHeightParam {slug, height}): Path<PoolSlugAndHeightParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.pool_blocks(slug, Some(height))).await
},
|op| {
op.id("get_pool_blocks_from")
.mining_tag()
.summary("Mining pool blocks from height")
.description("Get 10 blocks mined by a specific pool before (and including) the given height.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-blocks)*")
.ok_response::<Vec<BlockInfoV1>>()
.not_modified()
.not_found()
.server_error()
},
),
)
.api_route(
"/api/v1/mining/pool/{slug}/hashrate",
get_with(
async |uri: Uri, headers: HeaderMap, Path(path): Path<PoolSlugParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.pool_hashrate(path.slug)).await
},
|op| {
op.id("get_pool_hashrate")
.mining_tag()
.summary("Mining pool hashrate")
.description("Get hashrate history for a specific mining pool.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-hashrate)*")
.ok_response::<Vec<PoolHashrateEntry>>()
.not_modified()
.not_found()
.server_error()
},
),
)
.api_route(
"/api/v1/mining/hashrate/pools",
get_with(
async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, |q| q.pools_hashrate(None)).await
},
|op| {
op.id("get_pools_hashrate")
.mining_tag()
.summary("All pools hashrate (all time)")
.description("Get hashrate data for all mining pools.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-hashrates)*")
.ok_response::<Vec<PoolHashrateEntry>>()
.not_modified()
.server_error()
},
),
)
.api_route(
"/api/v1/mining/hashrate/pools/{time_period}",
get_with(
async |uri: Uri, headers: HeaderMap, Path(path): Path<TimePeriodParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.pools_hashrate(Some(path.time_period))).await
},
|op| {
op.id("get_pools_hashrate_by_period")
.mining_tag()
.summary("All pools hashrate")
.description("Get hashrate data for all mining pools for a time period. Valid periods: 1m, 3m, 6m, 1y, 2y, 3y\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-hashrates)*")
.ok_response::<Vec<PoolHashrateEntry>>()
.not_modified()
.server_error()
},
),
)
.api_route( .api_route(
"/api/v1/mining/hashrate", "/api/v1/mining/hashrate",
get_with( get_with(

View File

@@ -1,11 +1,16 @@
use aide::axum::{ApiRouter, routing::get_with}; use aide::axum::{
ApiRouter,
routing::{get_with, post_with},
};
use axum::{ use axum::{
extract::{Path, State}, extract::{Path, State},
http::{HeaderMap, Uri}, http::{HeaderMap, Uri},
response::Redirect,
routing::get,
}; };
use brk_types::{Hex, Transaction, TxOutspend, TxStatus, TxidParam, TxidVout}; use axum::extract::Query;
use brk_types::{
CpfpInfo, Hex, MerkleProof, Transaction, TxOutspend, TxStatus, Txid, TxidParam, TxidVout,
TxidsParam,
};
use crate::{CacheStrategy, extended::TransformResponseExtended}; use crate::{CacheStrategy, extended::TransformResponseExtended};
@@ -18,8 +23,6 @@ pub trait TxRoutes {
impl TxRoutes for ApiRouter<AppState> { impl TxRoutes for ApiRouter<AppState> {
fn add_tx_routes(self) -> Self { fn add_tx_routes(self) -> Self {
self self
.route("/api/tx", get(Redirect::temporary("/api/transactions")))
.route("/api/transactions", get(Redirect::temporary("/api#tag/transactions")))
.api_route( .api_route(
"/api/tx/{txid}", "/api/tx/{txid}",
get_with( get_with(
@@ -146,5 +149,92 @@ impl TxRoutes for ApiRouter<AppState> {
.server_error(), .server_error(),
), ),
) )
.api_route(
"/api/tx",
post_with(
async |State(state): State<AppState>, body: String| {
let hex = body.trim().to_string();
state.sync(|q| q.broadcast_transaction(&hex))
.map(|txid| txid.to_string())
.map_err(crate::Error::from)
},
|op| {
op.id("post_tx")
.transactions_tag()
.summary("Broadcast transaction")
.description("Broadcast a raw transaction to the network. The transaction should be provided as hex in the request body. The txid will be returned on success.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#post-transaction)*")
.ok_response::<Txid>()
.bad_request()
.server_error()
},
),
)
.api_route(
"/api/tx/{txid}/raw",
get_with(
async |uri: Uri, headers: HeaderMap, Path(txid): Path<TxidParam>, State(state): State<AppState>| {
state.cached_bytes(&headers, CacheStrategy::Height, &uri, move |q| q.transaction_raw(txid)).await
},
|op| op
.id("get_tx_raw")
.transactions_tag()
.summary("Transaction raw")
.description("Returns a transaction as binary data.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-transaction-raw)*")
.ok_response::<Vec<u8>>()
.not_modified()
.bad_request()
.not_found()
.server_error(),
),
)
.api_route(
"/api/tx/{txid}/merkle-proof",
get_with(
async |uri: Uri, headers: HeaderMap, Path(txid): Path<TxidParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.merkle_proof(txid)).await
},
|op| op
.id("get_tx_merkle_proof")
.transactions_tag()
.summary("Transaction merkle proof")
.description("Get the merkle inclusion proof for a transaction.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-transaction-merkle-proof)*")
.ok_response::<MerkleProof>()
.not_modified()
.bad_request()
.not_found()
.server_error(),
),
)
.api_route(
"/api/v1/cpfp/{txid}",
get_with(
async |uri: Uri, headers: HeaderMap, Path(txid): Path<TxidParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::MempoolHash(0), &uri, move |q| q.cpfp(txid)).await
},
|op| op
.id("get_cpfp")
.transactions_tag()
.summary("CPFP info")
.description("Returns ancestors and descendants for a CPFP transaction.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-children-pay-for-parent)*")
.ok_response::<CpfpInfo>()
.not_found()
.server_error(),
),
)
.api_route(
"/api/v1/transaction-times",
get_with(
async |uri: Uri, headers: HeaderMap, Query(params): Query<TxidsParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::MempoolHash(0), &uri, move |q| q.transaction_times(&params.txids)).await
},
|op| op
.id("get_transaction_times")
.transactions_tag()
.summary("Transaction first-seen times")
.description("Returns timestamps when transactions were first seen in the mempool. Returns 0 for mined or unknown transactions.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-transaction-times)*")
.ok_response::<Vec<u64>>()
.server_error(),
),
)
} }
} }

View File

@@ -1,3 +1,4 @@
use aide::OperationOutput;
use axum::{ use axum::{
http::{StatusCode, header}, http::{StatusCode, header},
response::{IntoResponse, Response}, response::{IntoResponse, Response},
@@ -157,6 +158,10 @@ impl From<BrkError> for Error {
} }
} }
impl OperationOutput for Error {
type Inner = ();
}
impl IntoResponse for Error { impl IntoResponse for Error {
fn into_response(self) -> Response { fn into_response(self) -> Response {
let body = build_error_body(self.status, self.code, self.message); let body = build_error_body(self.status, self.code, self.message);

View File

@@ -2,15 +2,10 @@
pub enum Kind { pub enum Kind {
Recent, Recent,
Random, Random,
Sequential,
Vec, Vec,
} }
impl Kind { impl Kind {
pub fn is_sequential(&self) -> bool {
matches!(*self, Self::Sequential)
}
pub fn is_recent(&self) -> bool { pub fn is_recent(&self) -> bool {
matches!(*self, Self::Recent) matches!(*self, Self::Recent)
} }

View File

@@ -137,13 +137,6 @@ where
FilterPolicyEntry::Bloom(BloomConstructionPolicy::BitsPerKey(7.0)), FilterPolicyEntry::Bloom(BloomConstructionPolicy::BitsPerKey(7.0)),
])); ]));
} }
Kind::Sequential => {
options = options
.filter_block_partitioning_policy(PartitioningPolicy::all(true))
.index_block_partitioning_policy(PartitioningPolicy::all(true))
.filter_block_pinning_policy(PinningPolicy::all(false))
.index_block_pinning_policy(PinningPolicy::all(false));
}
Kind::Vec => { Kind::Vec => {
options = options options = options
.max_memtable_size(8 * 1024 * 1024) .max_memtable_size(8 * 1024 * 1024)

View File

@@ -1,11 +1,9 @@
use std::borrow::Cow;
use bitcoin::hashes::{Hash, HashEngine}; use bitcoin::hashes::{Hash, HashEngine};
use derive_more::Deref; use derive_more::Deref;
use crate::BlkMetadata; use crate::BlkMetadata;
use super::{BlockHash, Height}; use super::{BlockHash, CoinbaseTag, Height};
/// Raw block bytes and per-tx offsets for fast txid hashing. /// Raw block bytes and per-tx offsets for fast txid hashing.
/// Present when block was parsed from blk*.dat files, absent for RPC blocks. /// Present when block was parsed from blk*.dat files, absent for RPC blocks.
@@ -110,15 +108,15 @@ impl Block {
bitcoin::Txid::from_engine(engine) bitcoin::Txid::from_engine(engine)
} }
pub fn coinbase_tag(&self) -> Cow<'_, str> { pub fn coinbase_tag(&self) -> CoinbaseTag {
String::from_utf8_lossy( let bytes = self
self.txdata .txdata
.first() .first()
.and_then(|tx| tx.input.first()) .and_then(|tx| tx.input.first())
.unwrap() .unwrap()
.script_sig .script_sig
.as_bytes(), .as_bytes();
) CoinbaseTag::from(bytes)
} }
} }

View File

@@ -0,0 +1,109 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::{BlockPool, FeeRate, Sats, Weight};
/// Extended block data matching mempool.space /api/v1/blocks extras
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct BlockExtras {
/// Total fees in satoshis
#[serde(rename = "totalFees")]
pub total_fees: Sats,
/// Median fee rate in sat/vB
#[serde(rename = "medianFee")]
pub median_fee: FeeRate,
/// Fee rate range: [min, 10%, 25%, 50%, 75%, 90%, max]
#[serde(rename = "feeRange")]
pub fee_range: [FeeRate; 7],
/// Total block reward (subsidy + fees) in satoshis
pub reward: Sats,
/// Mining pool that mined this block
pub pool: BlockPool,
/// Average fee per transaction in satoshis
#[serde(rename = "avgFee")]
pub avg_fee: Sats,
/// Average fee rate in sat/vB
#[serde(rename = "avgFeeRate")]
pub avg_fee_rate: FeeRate,
/// Raw coinbase transaction scriptsig as hex
#[serde(rename = "coinbaseRaw")]
pub coinbase_raw: String,
/// Primary coinbase output address
#[serde(rename = "coinbaseAddress")]
pub coinbase_address: Option<String>,
/// All coinbase output addresses
#[serde(rename = "coinbaseAddresses")]
pub coinbase_addresses: Vec<String>,
/// Coinbase output script in ASM format
#[serde(rename = "coinbaseSignature")]
pub coinbase_signature: String,
/// Coinbase scriptsig decoded as ASCII
#[serde(rename = "coinbaseSignatureAscii")]
pub coinbase_signature_ascii: String,
/// Average transaction size in bytes
#[serde(rename = "avgTxSize")]
pub avg_tx_size: f64,
/// Total number of inputs (excluding coinbase)
#[serde(rename = "totalInputs")]
pub total_inputs: u64,
/// Total number of outputs
#[serde(rename = "totalOutputs")]
pub total_outputs: u64,
/// Total output amount in satoshis
#[serde(rename = "totalOutputAmt")]
pub total_output_amt: Sats,
/// Median fee amount in satoshis
#[serde(rename = "medianFeeAmt")]
pub median_fee_amt: Sats,
/// Fee amount percentiles in satoshis: [min, 10%, 25%, 50%, 75%, 90%, max]
#[serde(rename = "feePercentiles")]
pub fee_percentiles: [Sats; 7],
/// Number of segwit transactions
#[serde(rename = "segwitTotalTxs")]
pub segwit_total_txs: u32,
/// Total size of segwit transactions in bytes
#[serde(rename = "segwitTotalSize")]
pub segwit_total_size: u64,
/// Total weight of segwit transactions
#[serde(rename = "segwitTotalWeight")]
pub segwit_total_weight: Weight,
/// Raw 80-byte block header as hex
pub header: String,
/// UTXO set change (outputs created minus inputs spent)
#[serde(rename = "utxoSetChange")]
pub utxo_set_change: i64,
/// Total UTXO set size at this height
#[serde(rename = "utxoSetSize")]
pub utxo_set_size: u64,
/// Total input amount in satoshis
#[serde(rename = "totalInputAmt")]
pub total_input_amt: Sats,
/// Virtual size in vbytes
#[serde(rename = "virtualSize")]
pub virtual_size: f64,
}

View File

@@ -0,0 +1,43 @@
use bitcoin::block::Header;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::BlockHash;
/// Block header matching mempool.space's format.
/// Contains the same fields as bitcoin::block::Header
/// but serialized for the JSON API.
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct BlockHeader {
/// Block version, used for soft fork signaling
pub version: u32,
/// Previous block hash
#[serde(rename = "previousblockhash")]
pub previous_block_hash: BlockHash,
/// Merkle root of the transaction tree
pub merkle_root: String,
/// Block timestamp as claimed by the miner (Unix time)
pub time: u32,
/// Compact target (bits)
pub bits: u32,
/// Nonce used to produce a valid block hash
pub nonce: u32,
}
impl From<Header> for BlockHeader {
fn from(h: Header) -> Self {
Self {
version: h.version.to_consensus() as u32,
previous_block_hash: BlockHash::from(h.prev_blockhash),
merkle_root: h.merkle_root.to_string(),
time: h.time,
bits: h.bits.to_consensus(),
nonce: h.nonce,
}
}
}

View File

@@ -1,9 +1,9 @@
use schemars::JsonSchema; use schemars::JsonSchema;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::{BlockHash, Height, PoolSlug, Timestamp, Weight}; use crate::{BlockHash, BlockHeader, Height, Timestamp, Weight};
/// Block information returned by the API /// Block information matching mempool.space /api/block/{hash}
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] #[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct BlockInfo { pub struct BlockInfo {
/// Block hash /// Block hash
@@ -12,6 +12,13 @@ pub struct BlockInfo {
/// Block height /// Block height
pub height: Height, pub height: Height,
/// Block header fields
#[serde(flatten)]
pub header: BlockHeader,
/// Block timestamp (Unix time)
pub timestamp: Timestamp,
/// Number of transactions in the block /// Number of transactions in the block
pub tx_count: u32, pub tx_count: u32,
@@ -21,45 +28,10 @@ pub struct BlockInfo {
/// Block weight in weight units /// Block weight in weight units
pub weight: Weight, pub weight: Weight,
/// Block timestamp (Unix time) /// Median time of the last 11 blocks
pub timestamp: Timestamp, #[serde(rename = "mediantime")]
pub median_time: Timestamp,
/// Block difficulty as a floating point number /// Block difficulty
pub difficulty: f64, pub difficulty: f64,
/// Extra block data (pool info, fee stats)
#[serde(skip_serializing_if = "Option::is_none")]
pub extras: Option<BlockExtras>,
}
/// Extra block data including pool identification and fee statistics
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct BlockExtras {
/// Mining pool that mined this block
pub pool: BlockPool,
/// Total fees in satoshis
pub total_fees: u64,
/// Average fee per transaction in satoshis
pub avg_fee: u64,
/// Average fee rate in sat/vB
pub avg_fee_rate: u64,
/// Total block reward (subsidy + fees) in satoshis
pub reward: u64,
}
/// Mining pool identification for a block
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct BlockPool {
/// Unique pool identifier
pub id: u8,
/// Pool name
pub name: String,
/// URL-friendly pool identifier
pub slug: PoolSlug,
} }

View File

@@ -0,0 +1,15 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::{BlockExtras, BlockInfo};
/// Block information with extras, matching mempool.space /api/v1/blocks
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct BlockInfoV1 {
/// Base block information
#[serde(flatten)]
pub info: BlockInfo,
/// Extended block data
pub extras: BlockExtras,
}

View File

@@ -0,0 +1,17 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::PoolSlug;
/// Mining pool identification for a block
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct BlockPool {
/// Unique pool identifier
pub id: u8,
/// Pool name
pub name: String,
/// URL-friendly pool identifier
pub slug: PoolSlug,
}

View File

@@ -0,0 +1,86 @@
use derive_more::Deref;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use vecdb::{Bytes, Formattable};
/// Coinbase scriptSig tag for pool identification.
///
/// Stored as a fixed 101-byte record (1 byte length + 100 bytes data).
/// Uses `[u8; 101]` internally so that `size_of::<CoinbaseTag>()` matches
/// the serialized `Bytes::Array` size (vecdb requires this for alignment).
///
/// Bitcoin consensus limits coinbase scriptSig to 2-100 bytes.
#[derive(Debug, Deref, Clone, JsonSchema)]
pub struct CoinbaseTag(#[schemars(with = "String")] [u8; 101]);
impl Bytes for CoinbaseTag {
type Array = [u8; 101];
const IS_NATIVE_LAYOUT: bool = true;
#[inline]
fn to_bytes(&self) -> Self::Array {
self.0
}
#[inline]
fn from_bytes(bytes: &[u8]) -> vecdb::Result<Self> {
let arr: [u8; 101] = bytes.try_into().map_err(|_| vecdb::Error::WrongLength {
received: bytes.len(),
expected: 101,
})?;
Ok(Self(arr))
}
}
impl CoinbaseTag {
/// Returns the tag as a UTF-8 string (lossy).
#[inline]
pub fn as_str(&self) -> std::borrow::Cow<'_, str> {
let len = (self.0[0] as usize).min(100);
String::from_utf8_lossy(&self.0[1..1 + len])
}
/// Returns the tag bytes (without length prefix).
#[inline]
pub fn tag_bytes(&self) -> &[u8] {
let len = (self.0[0] as usize).min(100);
&self.0[1..1 + len]
}
}
impl From<&[u8]> for CoinbaseTag {
#[inline]
fn from(bytes: &[u8]) -> Self {
let truncated = &bytes[..bytes.len().min(100)];
let len = truncated.len() as u8;
let mut out = [0u8; 101];
out[0] = len;
out[1..1 + len as usize].copy_from_slice(truncated);
Self(out)
}
}
impl Serialize for CoinbaseTag {
fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
serializer.serialize_str(&self.as_str())
}
}
impl<'de> Deserialize<'de> for CoinbaseTag {
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
let s = String::deserialize(deserializer)?;
Ok(Self::from(s.as_bytes()))
}
}
impl Formattable for CoinbaseTag {
fn write_to(&self, buf: &mut Vec<u8>) {
buf.extend_from_slice(self.tag_bytes());
}
fn fmt_json(&self, buf: &mut Vec<u8>) {
buf.push(b'"');
buf.extend_from_slice(self.tag_bytes());
buf.push(b'"');
}
}

View File

@@ -0,0 +1,21 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::{FeeRate, Sats, Txid, Weight};
/// CPFP (Child Pays For Parent) information for a transaction
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct CpfpInfo {
pub ancestors: Vec<CpfpEntry>,
pub descendants: Vec<CpfpEntry>,
#[serde(rename = "effectiveFeePerVsize")]
pub effective_fee_per_vsize: FeeRate,
}
/// A transaction in a CPFP relationship
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct CpfpEntry {
pub txid: Txid,
pub weight: Weight,
pub fee: Sats,
}

View File

@@ -0,0 +1,24 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::Dollars;
/// Historical price response
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct HistoricalPrice {
pub prices: Vec<HistoricalPriceEntry>,
#[serde(rename = "exchangeRates")]
pub exchange_rates: ExchangeRates,
}
/// A single price data point
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct HistoricalPriceEntry {
pub time: u64,
#[serde(rename = "USD")]
pub usd: Dollars,
}
/// Exchange rates (USD base, on-chain only — no fiat pairs available)
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
pub struct ExchangeRates {}

View File

@@ -24,9 +24,13 @@ mod blk_metadata;
mod blk_position; mod blk_position;
mod block; mod block;
mod block_count_param; mod block_count_param;
mod block_extras;
mod block_fee_rates_entry; mod block_fee_rates_entry;
mod block_fees_entry; mod block_fees_entry;
mod block_header;
mod block_info; mod block_info;
mod block_info_v1;
mod block_pool;
mod block_rewards_entry; mod block_rewards_entry;
mod block_size_entry; mod block_size_entry;
mod block_sizes_weights; mod block_sizes_weights;
@@ -44,6 +48,8 @@ mod cents_compact;
mod cents_sats; mod cents_sats;
mod cents_signed; mod cents_signed;
mod cents_squared_sats; mod cents_squared_sats;
mod coinbase_tag;
mod cpfp;
mod cost_basis_bucket; mod cost_basis_bucket;
mod cost_basis_distribution; mod cost_basis_distribution;
mod cost_basis_params; mod cost_basis_params;
@@ -76,6 +82,7 @@ mod hashrate_entry;
mod hashrate_summary; mod hashrate_summary;
mod health; mod health;
mod height; mod height;
mod historical_price;
mod height_param; mod height_param;
mod hex; mod hex;
mod hour1; mod hour1;
@@ -89,6 +96,8 @@ mod limit_param;
mod mempool_block; mod mempool_block;
mod mempool_entry_info; mod mempool_entry_info;
mod mempool_info; mod mempool_info;
mod mempool_recent_tx;
mod merkle_proof;
mod minute10; mod minute10;
mod minute30; mod minute30;
mod month1; mod month1;
@@ -124,6 +133,7 @@ mod percentile;
mod pool; mod pool;
mod pool_detail; mod pool_detail;
mod pool_info; mod pool_info;
mod pool_hashrate_entry;
mod pool_slug; mod pool_slug;
mod pool_slug_param; mod pool_slug_param;
mod pool_stats; mod pool_stats;
@@ -177,6 +187,7 @@ mod tx_with_hex;
mod txid; mod txid;
mod txid_param; mod txid_param;
mod txid_prefix; mod txid_prefix;
mod txids_param;
mod txid_vout; mod txid_vout;
mod txin; mod txin;
mod txin_index; mod txin_index;
@@ -219,9 +230,13 @@ pub use blk_metadata::*;
pub use blk_position::*; pub use blk_position::*;
pub use block::*; pub use block::*;
pub use block_count_param::*; pub use block_count_param::*;
pub use block_extras::*;
pub use block_fee_rates_entry::*; pub use block_fee_rates_entry::*;
pub use block_fees_entry::*; pub use block_fees_entry::*;
pub use block_header::*;
pub use block_info::*; pub use block_info::*;
pub use block_info_v1::*;
pub use block_pool::*;
pub use block_rewards_entry::*; pub use block_rewards_entry::*;
pub use block_size_entry::*; pub use block_size_entry::*;
pub use block_sizes_weights::*; pub use block_sizes_weights::*;
@@ -239,6 +254,8 @@ pub use cents_compact::*;
pub use cents_sats::*; pub use cents_sats::*;
pub use cents_signed::*; pub use cents_signed::*;
pub use cents_squared_sats::*; pub use cents_squared_sats::*;
pub use coinbase_tag::*;
pub use cpfp::*;
pub use cost_basis_bucket::*; pub use cost_basis_bucket::*;
pub use cost_basis_distribution::*; pub use cost_basis_distribution::*;
pub use cost_basis_params::*; pub use cost_basis_params::*;
@@ -271,6 +288,7 @@ pub use hashrate_entry::*;
pub use hashrate_summary::*; pub use hashrate_summary::*;
pub use health::*; pub use health::*;
pub use height::*; pub use height::*;
pub use historical_price::*;
pub use height_param::*; pub use height_param::*;
pub use hex::*; pub use hex::*;
pub use hour1::*; pub use hour1::*;
@@ -284,6 +302,8 @@ pub use limit_param::*;
pub use mempool_block::*; pub use mempool_block::*;
pub use mempool_entry_info::*; pub use mempool_entry_info::*;
pub use mempool_info::*; pub use mempool_info::*;
pub use mempool_recent_tx::*;
pub use merkle_proof::*;
pub use minute10::*; pub use minute10::*;
pub use minute30::*; pub use minute30::*;
pub use month1::*; pub use month1::*;
@@ -319,6 +339,7 @@ pub use percentile::*;
pub use pool::*; pub use pool::*;
pub use pool_detail::*; pub use pool_detail::*;
pub use pool_info::*; pub use pool_info::*;
pub use pool_hashrate_entry::*;
pub use pool_slug::*; pub use pool_slug::*;
pub use pool_slug_param::*; pub use pool_slug_param::*;
pub use pool_stats::*; pub use pool_stats::*;
@@ -372,6 +393,7 @@ pub use tx_with_hex::*;
pub use txid::*; pub use txid::*;
pub use txid_param::*; pub use txid_param::*;
pub use txid_prefix::*; pub use txid_prefix::*;
pub use txids_param::*;
pub use txid_vout::*; pub use txid_vout::*;
pub use txin::*; pub use txin::*;
pub use txin_index::*; pub use txin_index::*;

View File

@@ -1,9 +1,11 @@
use std::collections::BTreeMap;
use schemars::JsonSchema; use schemars::JsonSchema;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::{Sats, Transaction, VSize}; use crate::{FeeRate, Sats, Transaction, VSize};
/// Mempool statistics /// Mempool statistics with incrementally maintained fee histogram.
#[derive(Debug, Default, Clone, Serialize, Deserialize, JsonSchema)] #[derive(Debug, Default, Clone, Serialize, Deserialize, JsonSchema)]
pub struct MempoolInfo { pub struct MempoolInfo {
/// Number of transactions in the mempool /// Number of transactions in the mempool
@@ -12,28 +14,50 @@ pub struct MempoolInfo {
pub vsize: VSize, pub vsize: VSize,
/// Total fees of all transactions in the mempool (satoshis) /// Total fees of all transactions in the mempool (satoshis)
pub total_fee: Sats, pub total_fee: Sats,
/// Fee histogram: `[[fee_rate, vsize], ...]` sorted by descending fee rate
#[serde(
serialize_with = "serialize_fee_histogram",
deserialize_with = "deserialize_fee_histogram"
)]
pub fee_histogram: BTreeMap<FeeRate, VSize>,
} }
impl MempoolInfo { impl MempoolInfo {
/// Increment stats for a newly added transaction.
///
/// Fee must come from `MempoolEntryInfo` (Bitcoin Core) rather than `tx.fee`
/// because `tx.fee` may be 0 for chained mempool transactions where prevouts
/// cannot be looked up via `gettxout`.
#[inline] #[inline]
pub fn add(&mut self, tx: &Transaction, fee: Sats) { pub fn add(&mut self, tx: &Transaction, fee: Sats) {
self.count += 1; self.count += 1;
self.vsize += tx.vsize(); self.vsize += tx.vsize();
self.total_fee += fee; self.total_fee += fee;
let rate = FeeRate::from((fee, tx.vsize()));
*self.fee_histogram.entry(rate).or_insert(VSize::from(0u64)) += tx.vsize();
} }
/// Decrement stats for a removed transaction.
///
/// Fee must match the fee used when the transaction was added.
#[inline] #[inline]
pub fn remove(&mut self, tx: &Transaction, fee: Sats) { pub fn remove(&mut self, tx: &Transaction, fee: Sats) {
self.count -= 1; self.count -= 1;
self.vsize -= tx.vsize(); self.vsize -= tx.vsize();
self.total_fee -= fee; self.total_fee -= fee;
let rate = FeeRate::from((fee, tx.vsize()));
if let Some(v) = self.fee_histogram.get_mut(&rate) {
*v -= tx.vsize();
if u64::from(*v) == 0 {
self.fee_histogram.remove(&rate);
}
}
} }
} }
fn serialize_fee_histogram<S: Serializer>(
map: &BTreeMap<FeeRate, VSize>,
serializer: S,
) -> Result<S::Ok, S::Error> {
let vec: Vec<(FeeRate, VSize)> = map.iter().rev().map(|(&r, &v)| (r, v)).collect();
vec.serialize(serializer)
}
fn deserialize_fee_histogram<'de, D: Deserializer<'de>>(
deserializer: D,
) -> Result<BTreeMap<FeeRate, VSize>, D::Error> {
let vec: Vec<(FeeRate, VSize)> = Vec::deserialize(deserializer)?;
Ok(vec.into_iter().collect())
}

View File

@@ -0,0 +1,24 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::{Sats, Transaction, Txid, VSize};
/// Simplified mempool transaction for the recent transactions endpoint
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct MempoolRecentTx {
pub txid: Txid,
pub fee: Sats,
pub vsize: VSize,
pub value: Sats,
}
impl From<(&Txid, &Transaction)> for MempoolRecentTx {
fn from((txid, tx): (&Txid, &Transaction)) -> Self {
Self {
txid: txid.clone(),
fee: tx.fee,
vsize: tx.vsize(),
value: tx.output.iter().map(|o| o.value).sum(),
}
}
}

View File

@@ -0,0 +1,12 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::Height;
/// Merkle inclusion proof for a transaction
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct MerkleProof {
pub block_height: Height,
pub merkle: Vec<String>,
pub pos: usize,
}

View File

@@ -1,5 +1,3 @@
use std::ops::{Add, Div};
/// Standard percentile values used throughout BRK. /// Standard percentile values used throughout BRK.
pub const PERCENTILES: [u8; 19] = [ pub const PERCENTILES: [u8; 19] = [
5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95,
@@ -8,31 +6,13 @@ pub const PERCENTILES: [u8; 19] = [
/// Length of the PERCENTILES array. /// Length of the PERCENTILES array.
pub const PERCENTILES_LEN: usize = PERCENTILES.len(); pub const PERCENTILES_LEN: usize = PERCENTILES.len();
/// Get a percentile value from a sorted slice. /// Get a percentile value from a sorted slice using nearest-rank method.
/// ///
/// # Panics /// # Panics
/// Panics if the slice is empty. /// Panics if the slice is empty.
pub fn get_percentile<T>(sorted: &[T], percentile: f64) -> T pub fn get_percentile<T: Clone>(sorted: &[T], percentile: f64) -> T {
where
T: Clone + Div<usize, Output = T> + Add<T, Output = T>,
{
let len = sorted.len(); let len = sorted.len();
assert!(len > 0, "Cannot get percentile from empty slice");
if len == 0 { let index = ((len - 1) as f64 * percentile).round() as usize;
panic!("Cannot get percentile from empty slice"); sorted[index].clone()
} else if len == 1 {
sorted[0].clone()
} else {
let index = (len - 1) as f64 * percentile;
let fract = index.fract();
if fract != 0.0 {
let left = sorted.get(index as usize).unwrap().clone();
let right = sorted.get(index.ceil() as usize).unwrap().clone();
(left + right) / 2
} else {
sorted.get(index as usize).unwrap().clone()
}
}
} }

View File

@@ -0,0 +1,19 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use super::Timestamp;
/// A single pool hashrate data point.
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct PoolHashrateEntry {
/// Unix timestamp.
pub timestamp: Timestamp,
/// Average hashrate (H/s).
#[serde(rename = "avgHashrate")]
pub avg_hashrate: u128,
/// Pool's share of total network hashrate.
pub share: f64,
/// Pool name.
#[serde(rename = "poolName")]
pub pool_name: String,
}

View File

@@ -1,9 +1,15 @@
use schemars::JsonSchema; use schemars::JsonSchema;
use serde::Deserialize; use serde::Deserialize;
use super::PoolSlug; use super::{Height, PoolSlug};
#[derive(Deserialize, JsonSchema)] #[derive(Deserialize, JsonSchema)]
pub struct PoolSlugParam { pub struct PoolSlugParam {
pub slug: PoolSlug, pub slug: PoolSlug,
} }
#[derive(Deserialize, JsonSchema)]
pub struct PoolSlugAndHeightParam {
pub slug: PoolSlug,
pub height: Height,
}

View File

@@ -7,3 +7,8 @@ use crate::Timestamp;
pub struct TimestampParam { pub struct TimestampParam {
pub timestamp: Timestamp, pub timestamp: Timestamp,
} }
#[derive(Deserialize, JsonSchema)]
pub struct OptionalTimestampParam {
pub timestamp: Option<Timestamp>,
}

View File

@@ -0,0 +1,10 @@
use schemars::JsonSchema;
use serde::Deserialize;
use crate::Txid;
#[derive(Deserialize, JsonSchema)]
pub struct TxidsParam {
#[serde(rename = "txId[]")]
pub txids: Vec<Txid>,
}

View File

@@ -13,7 +13,7 @@
Open-source, self-hostable on-chain analytics for Bitcoin. Block explorer, address index, and thousands of metrics, everything computed from your node, even the price. Open-source, self-hostable on-chain analytics for Bitcoin. Block explorer, address index, and thousands of metrics, everything computed from your node, even the price.
Similar to [Glassnode](https://glassnode.com) + [mempool.space](https://mempool.space) + [electrs](https://github.com/romanz/electrs) + [UTXO Oracle](https://utxo.live/oracle/) in a single package. Similar to [Glassnode](https://glassnode.com) + [mempool.space](https://mempool.space) + [UTXO Oracle](https://utxo.live/oracle/) in a single package.
[Bitview](https://bitview.space) is a free hosted instance of BRK. [Bitview](https://bitview.space) is a free hosted instance of BRK.

View File

@@ -107,6 +107,37 @@
* @typedef {Object} BlockCountParam * @typedef {Object} BlockCountParam
* @property {number} blockCount - Number of recent blocks to include * @property {number} blockCount - Number of recent blocks to include
*/ */
/**
* Extended block data matching mempool.space /api/v1/blocks extras
*
* @typedef {Object} BlockExtras
* @property {Sats} totalFees - Total fees in satoshis
* @property {FeeRate} medianFee - Median fee rate in sat/vB
* @property {FeeRate[]} feeRange - Fee rate range: [min, 10%, 25%, 50%, 75%, 90%, max]
* @property {Sats} reward - Total block reward (subsidy + fees) in satoshis
* @property {BlockPool} pool - Mining pool that mined this block
* @property {Sats} avgFee - Average fee per transaction in satoshis
* @property {FeeRate} avgFeeRate - Average fee rate in sat/vB
* @property {string} coinbaseRaw - Raw coinbase transaction scriptsig as hex
* @property {?string=} coinbaseAddress - Primary coinbase output address
* @property {string[]} coinbaseAddresses - All coinbase output addresses
* @property {string} coinbaseSignature - Coinbase output script in ASM format
* @property {string} coinbaseSignatureAscii - Coinbase scriptsig decoded as ASCII
* @property {number} avgTxSize - Average transaction size in bytes
* @property {number} totalInputs - Total number of inputs (excluding coinbase)
* @property {number} totalOutputs - Total number of outputs
* @property {Sats} totalOutputAmt - Total output amount in satoshis
* @property {Sats} medianFeeAmt - Median fee amount in satoshis
* @property {Sats[]} feePercentiles - Fee amount percentiles in satoshis: [min, 10%, 25%, 50%, 75%, 90%, max]
* @property {number} segwitTotalTxs - Number of segwit transactions
* @property {number} segwitTotalSize - Total size of segwit transactions in bytes
* @property {Weight} segwitTotalWeight - Total weight of segwit transactions
* @property {string} header - Raw 80-byte block header as hex
* @property {number} utxoSetChange - UTXO set change (outputs created minus inputs spent)
* @property {number} utxoSetSize - Total UTXO set size at this height
* @property {Sats} totalInputAmt - Total input amount in satoshis
* @property {number} virtualSize - Virtual size in vbytes
*/
/** /**
* A single block fees data point. * A single block fees data point.
* *
@@ -135,16 +166,51 @@
* @property {TxIndex} index - Transaction index within the block (0-based) * @property {TxIndex} index - Transaction index within the block (0-based)
*/ */
/** /**
* Block information returned by the API * Block information matching mempool.space /api/block/{hash}
* *
* @typedef {Object} BlockInfo * @typedef {Object} BlockInfo
* @property {BlockHash} id - Block hash * @property {BlockHash} id - Block hash
* @property {Height} height - Block height * @property {Height} height - Block height
* @property {number} version - Block version, used for soft fork signaling
* @property {BlockHash} previousblockhash - Previous block hash
* @property {string} merkleRoot - Merkle root of the transaction tree
* @property {number} time - Block timestamp as claimed by the miner (Unix time)
* @property {number} bits - Compact target (bits)
* @property {number} nonce - Nonce used to produce a valid block hash
* @property {Timestamp} timestamp - Block timestamp (Unix time)
* @property {number} txCount - Number of transactions in the block * @property {number} txCount - Number of transactions in the block
* @property {number} size - Block size in bytes * @property {number} size - Block size in bytes
* @property {Weight} weight - Block weight in weight units * @property {Weight} weight - Block weight in weight units
* @property {Timestamp} mediantime - Median time of the last 11 blocks
* @property {number} difficulty - Block difficulty
*/
/**
* Block information with extras, matching mempool.space /api/v1/blocks
*
* @typedef {Object} BlockInfoV1
* @property {BlockHash} id - Block hash
* @property {Height} height - Block height
* @property {number} version - Block version, used for soft fork signaling
* @property {BlockHash} previousblockhash - Previous block hash
* @property {string} merkleRoot - Merkle root of the transaction tree
* @property {number} time - Block timestamp as claimed by the miner (Unix time)
* @property {number} bits - Compact target (bits)
* @property {number} nonce - Nonce used to produce a valid block hash
* @property {Timestamp} timestamp - Block timestamp (Unix time) * @property {Timestamp} timestamp - Block timestamp (Unix time)
* @property {number} difficulty - Block difficulty as a floating point number * @property {number} txCount - Number of transactions in the block
* @property {number} size - Block size in bytes
* @property {Weight} weight - Block weight in weight units
* @property {Timestamp} mediantime - Median time of the last 11 blocks
* @property {number} difficulty - Block difficulty
* @property {BlockExtras} extras - Extended block data
*/
/**
* Mining pool identification for a block
*
* @typedef {Object} BlockPool
* @property {number} id - Unique pool identifier
* @property {string} name - Pool name
* @property {PoolSlug} slug - URL-friendly pool identifier
*/ */
/** /**
* A single block rewards data point. * A single block rewards data point.
@@ -228,6 +294,17 @@
* *
* @typedef {string} Cohort * @typedef {string} Cohort
*/ */
/**
* Coinbase scriptSig tag for pool identification.
*
* Stored as a fixed 101-byte record (1 byte length + 100 bytes data).
* Uses `[u8; 101]` internally so that `size_of::<CoinbaseTag>()` matches
* the serialized `Bytes::Array` size (vecdb requires this for alignment).
*
* Bitcoin consensus limits coinbase scriptSig to 2-100 bytes.
*
* @typedef {string} CoinbaseTag
*/
/** /**
* Bucket type for cost basis aggregation. * Bucket type for cost basis aggregation.
* Options: raw (no aggregation), lin200/lin500/lin1000 (linear $200/$500/$1000), * Options: raw (no aggregation), lin200/lin500/lin1000 (linear $200/$500/$1000),
@@ -261,6 +338,22 @@
* *
* @typedef {("supply"|"realized"|"unrealized")} CostBasisValue * @typedef {("supply"|"realized"|"unrealized")} CostBasisValue
*/ */
/**
* A transaction in a CPFP relationship
*
* @typedef {Object} CpfpEntry
* @property {Txid} txid
* @property {Weight} weight
* @property {Sats} fee
*/
/**
* CPFP (Child Pays For Parent) information for a transaction
*
* @typedef {Object} CpfpInfo
* @property {CpfpEntry[]} ancestors
* @property {CpfpEntry[]} descendants
* @property {FeeRate} effectiveFeePerVsize
*/
/** /**
* Data range with output format for API query parameters * Data range with output format for API query parameters
* *
@@ -357,6 +450,11 @@
* @property {string} message - Human-readable description * @property {string} message - Human-readable description
* @property {string} docUrl - Link to API documentation * @property {string} docUrl - Link to API documentation
*/ */
/**
* Exchange rates (USD base, on-chain only — no fiat pairs available)
*
* @typedef {Object} ExchangeRates
*/
/** /**
* Fee rate in sats/vB * Fee rate in sats/vB
* *
@@ -433,6 +531,20 @@
* *
* @typedef {Dollars} High * @typedef {Dollars} High
*/ */
/**
* Historical price response
*
* @typedef {Object} HistoricalPrice
* @property {HistoricalPriceEntry[]} prices
* @property {ExchangeRates} exchangeRates
*/
/**
* A single price data point
*
* @typedef {Object} HistoricalPriceEntry
* @property {number} time
* @property {Dollars} uSD
*/
/** @typedef {number} Hour1 */ /** @typedef {number} Hour1 */
/** @typedef {number} Hour12 */ /** @typedef {number} Hour12 */
/** @typedef {number} Hour4 */ /** @typedef {number} Hour4 */
@@ -484,12 +596,30 @@
* @property {FeeRate[]} feeRange - Fee rate range: [min, 10%, 25%, 50%, 75%, 90%, max] * @property {FeeRate[]} feeRange - Fee rate range: [min, 10%, 25%, 50%, 75%, 90%, max]
*/ */
/** /**
* Mempool statistics * Mempool statistics with incrementally maintained fee histogram.
* *
* @typedef {Object} MempoolInfo * @typedef {Object} MempoolInfo
* @property {number} count - Number of transactions in the mempool * @property {number} count - Number of transactions in the mempool
* @property {VSize} vsize - Total virtual size of all transactions in the mempool (vbytes) * @property {VSize} vsize - Total virtual size of all transactions in the mempool (vbytes)
* @property {Sats} totalFee - Total fees of all transactions in the mempool (satoshis) * @property {Sats} totalFee - Total fees of all transactions in the mempool (satoshis)
* @property {{ [key: string]: VSize }} feeHistogram - Fee histogram: `[[fee_rate, vsize], ...]` sorted by descending fee rate
*/
/**
* Simplified mempool transaction for the recent transactions endpoint
*
* @typedef {Object} MempoolRecentTx
* @property {Txid} txid
* @property {Sats} fee
* @property {VSize} vsize
* @property {Sats} value
*/
/**
* Merkle inclusion proof for a transaction
*
* @typedef {Object} MerkleProof
* @property {Height} blockHeight
* @property {string[]} merkle
* @property {number} pos
*/ */
/** @typedef {number} Minute10 */ /** @typedef {number} Minute10 */
/** @typedef {number} Minute30 */ /** @typedef {number} Minute30 */
@@ -529,6 +659,10 @@
* *
* @typedef {Dollars} Open * @typedef {Dollars} Open
*/ */
/**
* @typedef {Object} OptionalTimestampParam
* @property {(Timestamp|null)=} timestamp
*/
/** @typedef {number} OutPoint */ /** @typedef {number} OutPoint */
/** /**
* Type (P2PKH, P2WPKH, P2SH, P2TR, etc.) * Type (P2PKH, P2WPKH, P2SH, P2TR, etc.)
@@ -607,6 +741,15 @@
* @property {string[]} regexes - Coinbase tag patterns (regexes) * @property {string[]} regexes - Coinbase tag patterns (regexes)
* @property {PoolSlug} slug - URL-friendly pool identifier * @property {PoolSlug} slug - URL-friendly pool identifier
*/ */
/**
* A single pool hashrate data point.
*
* @typedef {Object} PoolHashrateEntry
* @property {Timestamp} timestamp - Unix timestamp.
* @property {number} avgHashrate - Average hashrate (H/s).
* @property {number} share - Pool's share of total network hashrate.
* @property {string} poolName - Pool name.
*/
/** /**
* Basic pool information for listing all pools * Basic pool information for listing all pools
* *
@@ -616,6 +759,11 @@
* @property {number} uniqueId - Unique numeric pool identifier * @property {number} uniqueId - Unique numeric pool identifier
*/ */
/** @typedef {("unknown"|"blockfills"|"ultimuspool"|"terrapool"|"luxor"|"onethash"|"btccom"|"bitfarms"|"huobipool"|"wayicn"|"canoepool"|"btctop"|"bitcoincom"|"pool175btc"|"gbminers"|"axbt"|"asicminer"|"bitminter"|"bitcoinrussia"|"btcserv"|"simplecoinus"|"btcguild"|"eligius"|"ozcoin"|"eclipsemc"|"maxbtc"|"triplemining"|"coinlab"|"pool50btc"|"ghashio"|"stminingcorp"|"bitparking"|"mmpool"|"polmine"|"kncminer"|"bitalo"|"f2pool"|"hhtt"|"megabigpower"|"mtred"|"nmcbit"|"yourbtcnet"|"givemecoins"|"braiinspool"|"antpool"|"multicoinco"|"bcpoolio"|"cointerra"|"kanopool"|"solock"|"ckpool"|"nicehash"|"bitclub"|"bitcoinaffiliatenetwork"|"btcc"|"bwpool"|"exxbw"|"bitsolo"|"bitfury"|"twentyoneinc"|"digitalbtc"|"eightbaochi"|"mybtccoinpool"|"tbdice"|"hashpool"|"nexious"|"bravomining"|"hotpool"|"okexpool"|"bcmonster"|"onehash"|"bixin"|"tatmaspool"|"viabtc"|"connectbtc"|"batpool"|"waterhole"|"dcexploration"|"dcex"|"btpool"|"fiftyeightcoin"|"bitcoinindia"|"shawnp0wers"|"phashio"|"rigpool"|"haozhuzhu"|"sevenpool"|"miningkings"|"hashbx"|"dpool"|"rawpool"|"haominer"|"helix"|"bitcoinukraine"|"poolin"|"secretsuperstar"|"tigerpoolnet"|"sigmapoolcom"|"okpooltop"|"hummerpool"|"tangpool"|"bytepool"|"spiderpool"|"novablock"|"miningcity"|"binancepool"|"minerium"|"lubiancom"|"okkong"|"aaopool"|"emcdpool"|"foundryusa"|"sbicrypto"|"arkpool"|"purebtccom"|"marapool"|"kucoinpool"|"entrustcharitypool"|"okminer"|"titan"|"pegapool"|"btcnuggets"|"cloudhashing"|"digitalxmintsy"|"telco214"|"btcpoolparty"|"multipool"|"transactioncoinmining"|"btcdig"|"trickysbtcpool"|"btcmp"|"eobot"|"unomp"|"patels"|"gogreenlight"|"bitcoinindiapool"|"ekanembtc"|"canoe"|"tiger"|"onem1x"|"zulupool"|"secpool"|"ocean"|"whitepool"|"wiz"|"wk057"|"futurebitapollosolo"|"carbonnegative"|"portlandhodl"|"phoenix"|"neopool"|"maxipool"|"bitfufupool"|"gdpool"|"miningdutch"|"publicpool"|"miningsquared"|"innopolistech"|"btclab"|"parasite"|"redrockpool"|"est3lar")} PoolSlug */ /** @typedef {("unknown"|"blockfills"|"ultimuspool"|"terrapool"|"luxor"|"onethash"|"btccom"|"bitfarms"|"huobipool"|"wayicn"|"canoepool"|"btctop"|"bitcoincom"|"pool175btc"|"gbminers"|"axbt"|"asicminer"|"bitminter"|"bitcoinrussia"|"btcserv"|"simplecoinus"|"btcguild"|"eligius"|"ozcoin"|"eclipsemc"|"maxbtc"|"triplemining"|"coinlab"|"pool50btc"|"ghashio"|"stminingcorp"|"bitparking"|"mmpool"|"polmine"|"kncminer"|"bitalo"|"f2pool"|"hhtt"|"megabigpower"|"mtred"|"nmcbit"|"yourbtcnet"|"givemecoins"|"braiinspool"|"antpool"|"multicoinco"|"bcpoolio"|"cointerra"|"kanopool"|"solock"|"ckpool"|"nicehash"|"bitclub"|"bitcoinaffiliatenetwork"|"btcc"|"bwpool"|"exxbw"|"bitsolo"|"bitfury"|"twentyoneinc"|"digitalbtc"|"eightbaochi"|"mybtccoinpool"|"tbdice"|"hashpool"|"nexious"|"bravomining"|"hotpool"|"okexpool"|"bcmonster"|"onehash"|"bixin"|"tatmaspool"|"viabtc"|"connectbtc"|"batpool"|"waterhole"|"dcexploration"|"dcex"|"btpool"|"fiftyeightcoin"|"bitcoinindia"|"shawnp0wers"|"phashio"|"rigpool"|"haozhuzhu"|"sevenpool"|"miningkings"|"hashbx"|"dpool"|"rawpool"|"haominer"|"helix"|"bitcoinukraine"|"poolin"|"secretsuperstar"|"tigerpoolnet"|"sigmapoolcom"|"okpooltop"|"hummerpool"|"tangpool"|"bytepool"|"spiderpool"|"novablock"|"miningcity"|"binancepool"|"minerium"|"lubiancom"|"okkong"|"aaopool"|"emcdpool"|"foundryusa"|"sbicrypto"|"arkpool"|"purebtccom"|"marapool"|"kucoinpool"|"entrustcharitypool"|"okminer"|"titan"|"pegapool"|"btcnuggets"|"cloudhashing"|"digitalxmintsy"|"telco214"|"btcpoolparty"|"multipool"|"transactioncoinmining"|"btcdig"|"trickysbtcpool"|"btcmp"|"eobot"|"unomp"|"patels"|"gogreenlight"|"bitcoinindiapool"|"ekanembtc"|"canoe"|"tiger"|"onem1x"|"zulupool"|"secpool"|"ocean"|"whitepool"|"wiz"|"wk057"|"futurebitapollosolo"|"carbonnegative"|"portlandhodl"|"phoenix"|"neopool"|"maxipool"|"bitfufupool"|"gdpool"|"miningdutch"|"publicpool"|"miningsquared"|"innopolistech"|"btclab"|"parasite"|"redrockpool"|"est3lar")} PoolSlug */
/**
* @typedef {Object} PoolSlugAndHeightParam
* @property {PoolSlug} slug
* @property {Height} height
*/
/** /**
* @typedef {Object} PoolSlugParam * @typedef {Object} PoolSlugParam
* @property {PoolSlug} slug * @property {PoolSlug} slug
@@ -915,6 +1063,10 @@
* @property {Txid} txid - Transaction ID * @property {Txid} txid - Transaction ID
* @property {Vout} vout - Output index * @property {Vout} vout - Output index
*/ */
/**
* @typedef {Object} TxidsParam
* @property {Txid[]} txId[]
*/
/** /**
* Index within its type (e.g., 0 for first P2WPKH address) * Index within its type (e.g., 0 for first P2WPKH address)
* *
@@ -4159,7 +4311,6 @@ function createTransferPattern(client, acc) {
* @property {SeriesTree_Addrs} addrs * @property {SeriesTree_Addrs} addrs
* @property {SeriesTree_Scripts} scripts * @property {SeriesTree_Scripts} scripts
* @property {SeriesTree_Mining} mining * @property {SeriesTree_Mining} mining
* @property {SeriesTree_Positions} positions
* @property {SeriesTree_Cointime} cointime * @property {SeriesTree_Cointime} cointime
* @property {SeriesTree_Constants} constants * @property {SeriesTree_Constants} constants
* @property {SeriesTree_Indexes} indexes * @property {SeriesTree_Indexes} indexes
@@ -4175,10 +4326,14 @@ function createTransferPattern(client, acc) {
/** /**
* @typedef {Object} SeriesTree_Blocks * @typedef {Object} SeriesTree_Blocks
* @property {SeriesPattern18<BlockHash>} blockhash * @property {SeriesPattern18<BlockHash>} blockhash
* @property {SeriesPattern18<CoinbaseTag>} coinbaseTag
* @property {SeriesTree_Blocks_Difficulty} difficulty * @property {SeriesTree_Blocks_Difficulty} difficulty
* @property {SeriesTree_Blocks_Time} time * @property {SeriesTree_Blocks_Time} time
* @property {SeriesTree_Blocks_Size} size * @property {SeriesTree_Blocks_Size} size
* @property {AverageBaseCumulativeMaxMedianMinPct10Pct25Pct75Pct90SumPattern<Weight>} weight * @property {AverageBaseCumulativeMaxMedianMinPct10Pct25Pct75Pct90SumPattern<Weight>} weight
* @property {SeriesPattern18<StoredU32>} segwitTxs
* @property {SeriesPattern18<StoredU64>} segwitSize
* @property {SeriesPattern18<Weight>} segwitWeight
* @property {SeriesTree_Blocks_Count} count * @property {SeriesTree_Blocks_Count} count
* @property {SeriesTree_Blocks_Lookback} lookback * @property {SeriesTree_Blocks_Lookback} lookback
* @property {SeriesTree_Blocks_Interval} interval * @property {SeriesTree_Blocks_Interval} interval
@@ -4342,6 +4497,7 @@ function createTransferPattern(client, acc) {
* @property {SeriesPattern19<Sats>} outputValue * @property {SeriesPattern19<Sats>} outputValue
* @property {_6bBlockTxPattern<Sats>} fee * @property {_6bBlockTxPattern<Sats>} fee
* @property {_6bBlockTxPattern<FeeRate>} feeRate * @property {_6bBlockTxPattern<FeeRate>} feeRate
* @property {_6bBlockTxPattern<FeeRate>} effectiveFeeRate
*/ */
/** /**
@@ -4611,6 +4767,7 @@ function createTransferPattern(client, acc) {
* @property {AverageBlockCumulativeSumPattern3} coinbase * @property {AverageBlockCumulativeSumPattern3} coinbase
* @property {SeriesTree_Mining_Rewards_Subsidy} subsidy * @property {SeriesTree_Mining_Rewards_Subsidy} subsidy
* @property {SeriesTree_Mining_Rewards_Fees} fees * @property {SeriesTree_Mining_Rewards_Fees} fees
* @property {SeriesPattern18<Sats>} outputVolume
* @property {BlockCumulativePattern} unclaimed * @property {BlockCumulativePattern} unclaimed
*/ */
@@ -4671,10 +4828,6 @@ function createTransferPattern(client, acc) {
* @property {SeriesPattern1<StoredF64>} _1y * @property {SeriesPattern1<StoredF64>} _1y
*/ */
/**
* @typedef {Object} SeriesTree_Positions
*/
/** /**
* @typedef {Object} SeriesTree_Cointime * @typedef {Object} SeriesTree_Cointime
* @property {SeriesTree_Cointime_Activity} activity * @property {SeriesTree_Cointime_Activity} activity
@@ -7586,6 +7739,7 @@ class BrkClient extends BrkClientBase {
return { return {
blocks: { blocks: {
blockhash: createSeriesPattern18(this, 'blockhash'), blockhash: createSeriesPattern18(this, 'blockhash'),
coinbaseTag: createSeriesPattern18(this, 'coinbase_tag'),
difficulty: { difficulty: {
value: createSeriesPattern1(this, 'difficulty'), value: createSeriesPattern1(this, 'difficulty'),
hashrate: createSeriesPattern1(this, 'difficulty_hashrate'), hashrate: createSeriesPattern1(this, 'difficulty_hashrate'),
@@ -7611,6 +7765,9 @@ class BrkClient extends BrkClientBase {
pct90: create_1m1w1y24hPattern(this, 'block_size_pct90'), pct90: create_1m1w1y24hPattern(this, 'block_size_pct90'),
}, },
weight: createAverageBaseCumulativeMaxMedianMinPct10Pct25Pct75Pct90SumPattern(this, 'block_weight'), weight: createAverageBaseCumulativeMaxMedianMinPct10Pct25Pct75Pct90SumPattern(this, 'block_weight'),
segwitTxs: createSeriesPattern18(this, 'segwit_txs'),
segwitSize: createSeriesPattern18(this, 'segwit_size'),
segwitWeight: createSeriesPattern18(this, 'segwit_weight'),
count: { count: {
target: create_1m1w1y24hPattern(this, 'block_count_target'), target: create_1m1w1y24hPattern(this, 'block_count_target'),
total: createAverageBlockCumulativeSumPattern2(this, 'block_count'), total: createAverageBlockCumulativeSumPattern2(this, 'block_count'),
@@ -7709,6 +7866,7 @@ class BrkClient extends BrkClientBase {
outputValue: createSeriesPattern19(this, 'output_value'), outputValue: createSeriesPattern19(this, 'output_value'),
fee: create_6bBlockTxPattern(this, 'fee'), fee: create_6bBlockTxPattern(this, 'fee'),
feeRate: create_6bBlockTxPattern(this, 'fee_rate'), feeRate: create_6bBlockTxPattern(this, 'fee_rate'),
effectiveFeeRate: create_6bBlockTxPattern(this, 'effective_fee_rate'),
}, },
versions: { versions: {
v1: createAverageBlockCumulativeSumPattern(this, 'tx_v1'), v1: createAverageBlockCumulativeSumPattern(this, 'tx_v1'),
@@ -7907,6 +8065,7 @@ class BrkClient extends BrkClientBase {
_1y: createBpsRatioPattern2(this, 'fee_to_subsidy_ratio_1y'), _1y: createBpsRatioPattern2(this, 'fee_to_subsidy_ratio_1y'),
}, },
}, },
outputVolume: createSeriesPattern18(this, 'output_volume'),
unclaimed: createBlockCumulativePattern(this, 'unclaimed_rewards'), unclaimed: createBlockCumulativePattern(this, 'unclaimed_rewards'),
}, },
hashrate: { hashrate: {
@@ -7925,8 +8084,6 @@ class BrkClient extends BrkClientBase {
value: createPhsReboundThsPattern(this, 'hash_value'), value: createPhsReboundThsPattern(this, 'hash_value'),
}, },
}, },
positions: {
},
cointime: { cointime: {
activity: { activity: {
coinblocksCreated: createAverageBlockCumulativeSumPattern(this, 'coinblocks_created'), coinblocksCreated: createAverageBlockCumulativeSumPattern(this, 'coinblocks_created'),
@@ -9352,16 +9509,16 @@ class BrkClient extends BrkClientBase {
} }
/** /**
* Block by height * Block hash by height
* *
* Retrieve block information by block height. Returns block metadata including hash, timestamp, difficulty, size, weight, and transaction count. * Retrieve the block hash at a given height. Returns the hash as plain text.
* *
* *[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-height)* * *[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-height)*
* *
* Endpoint: `GET /api/block-height/{height}` * Endpoint: `GET /api/block-height/{height}`
* *
* @param {Height} height * @param {Height} height
* @returns {Promise<BlockInfo>} * @returns {Promise<BlockHash>}
*/ */
async getBlockByHeight(height) { async getBlockByHeight(height) {
return this.getJson(`/api/block-height/${height}`); return this.getJson(`/api/block-height/${height}`);
@@ -9383,6 +9540,22 @@ class BrkClient extends BrkClientBase {
return this.getJson(`/api/block/${hash}`); return this.getJson(`/api/block/${hash}`);
} }
/**
* Block header
*
* Returns the hex-encoded block header.
*
* *[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-header)*
*
* Endpoint: `GET /api/block/{hash}/header`
*
* @param {BlockHash} hash
* @returns {Promise<Hex>}
*/
async getBlockHeader(hash) {
return this.getJson(`/api/block/${hash}/header`);
}
/** /**
* Raw block * Raw block
* *
@@ -9479,6 +9652,34 @@ class BrkClient extends BrkClientBase {
return this.getJson(`/api/blocks`); return this.getJson(`/api/blocks`);
} }
/**
* Block tip hash
*
* Returns the hash of the last block.
*
* *[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-tip-hash)*
*
* Endpoint: `GET /api/blocks/tip/hash`
* @returns {Promise<BlockHash>}
*/
async getBlockTipHash() {
return this.getJson(`/api/blocks/tip/hash`);
}
/**
* Block tip height
*
* Returns the height of the last block.
*
* *[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-tip-height)*
*
* Endpoint: `GET /api/blocks/tip/height`
* @returns {Promise<Height>}
*/
async getBlockTipHeight() {
return this.getJson(`/api/blocks/tip/height`);
}
/** /**
* Blocks from height * Blocks from height
* *
@@ -9502,11 +9703,11 @@ class BrkClient extends BrkClientBase {
* *
* *[Mempool.space docs](https://mempool.space/docs/api/rest#get-mempool)* * *[Mempool.space docs](https://mempool.space/docs/api/rest#get-mempool)*
* *
* Endpoint: `GET /api/mempool/info` * Endpoint: `GET /api/mempool`
* @returns {Promise<MempoolInfo>} * @returns {Promise<MempoolInfo>}
*/ */
async getMempool() { async getMempool() {
return this.getJson(`/api/mempool/info`); return this.getJson(`/api/mempool`);
} }
/** /**
@@ -9521,6 +9722,20 @@ class BrkClient extends BrkClientBase {
return this.getJson(`/api/mempool/price`); return this.getJson(`/api/mempool/price`);
} }
/**
* Recent mempool transactions
*
* Get the last 10 transactions to enter the mempool.
*
* *[Mempool.space docs](https://mempool.space/docs/api/rest#get-mempool-recent)*
*
* Endpoint: `GET /api/mempool/recent`
* @returns {Promise<MempoolRecentTx[]>}
*/
async getMempoolRecent() {
return this.getJson(`/api/mempool/recent`);
}
/** /**
* Mempool transaction IDs * Mempool transaction IDs
* *
@@ -9867,6 +10082,22 @@ class BrkClient extends BrkClientBase {
return this.getJson(`/api/tx/${txid}/hex`); return this.getJson(`/api/tx/${txid}/hex`);
} }
/**
* Transaction merkle proof
*
* Get the merkle inclusion proof for a transaction.
*
* *[Mempool.space docs](https://mempool.space/docs/api/rest#get-transaction-merkle-proof)*
*
* Endpoint: `GET /api/tx/{txid}/merkle-proof`
*
* @param {Txid} txid
* @returns {Promise<MerkleProof>}
*/
async getTxMerkleProof(txid) {
return this.getJson(`/api/tx/${txid}/merkle-proof`);
}
/** /**
* Output spend status * Output spend status
* *
@@ -9900,6 +10131,22 @@ class BrkClient extends BrkClientBase {
return this.getJson(`/api/tx/${txid}/outspends`); return this.getJson(`/api/tx/${txid}/outspends`);
} }
/**
* Transaction raw
*
* Returns a transaction as binary data.
*
* *[Mempool.space docs](https://mempool.space/docs/api/rest#get-transaction-raw)*
*
* Endpoint: `GET /api/tx/{txid}/raw`
*
* @param {Txid} txid
* @returns {Promise<number[]>}
*/
async getTxRaw(txid) {
return this.getJson(`/api/tx/${txid}/raw`);
}
/** /**
* Transaction status * Transaction status
* *
@@ -9916,6 +10163,68 @@ class BrkClient extends BrkClientBase {
return this.getJson(`/api/tx/${txid}/status`); return this.getJson(`/api/tx/${txid}/status`);
} }
/**
* Block (v1)
*
* Returns block details with extras by hash.
*
* *[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-v1)*
*
* Endpoint: `GET /api/v1/block/{hash}`
*
* @param {BlockHash} hash
* @returns {Promise<BlockInfoV1>}
*/
async getBlockV1(hash) {
return this.getJson(`/api/v1/block/${hash}`);
}
/**
* Recent blocks with extras
*
* Retrieve the last 10 blocks with extended data including pool identification and fee statistics.
*
* *[Mempool.space docs](https://mempool.space/docs/api/rest#get-blocks-v1)*
*
* Endpoint: `GET /api/v1/blocks`
* @returns {Promise<BlockInfoV1[]>}
*/
async getBlocksV1() {
return this.getJson(`/api/v1/blocks`);
}
/**
* Blocks from height with extras
*
* Retrieve up to 10 blocks with extended data going backwards from the given height.
*
* *[Mempool.space docs](https://mempool.space/docs/api/rest#get-blocks-v1)*
*
* Endpoint: `GET /api/v1/blocks/{height}`
*
* @param {Height} height
* @returns {Promise<BlockInfoV1[]>}
*/
async getBlocksV1FromHeight(height) {
return this.getJson(`/api/v1/blocks/${height}`);
}
/**
* CPFP info
*
* Returns ancestors and descendants for a CPFP transaction.
*
* *[Mempool.space docs](https://mempool.space/docs/api/rest#get-children-pay-for-parent)*
*
* Endpoint: `GET /api/v1/cpfp/{txid}`
*
* @param {Txid} txid
* @returns {Promise<CpfpInfo>}
*/
async getCpfp(txid) {
return this.getJson(`/api/v1/cpfp/${txid}`);
}
/** /**
* Difficulty adjustment * Difficulty adjustment
* *
@@ -9944,6 +10253,20 @@ class BrkClient extends BrkClientBase {
return this.getJson(`/api/v1/fees/mempool-blocks`); return this.getJson(`/api/v1/fees/mempool-blocks`);
} }
/**
* Precise recommended fees
*
* Get recommended fee rates with up to 3 decimal places, including sub-sat feerates.
*
* *[Mempool.space docs](https://mempool.space/docs/api/rest#get-recommended-fees-precise)*
*
* Endpoint: `GET /api/v1/fees/precise`
* @returns {Promise<RecommendedFees>}
*/
async getPreciseFees() {
return this.getJson(`/api/v1/fees/precise`);
}
/** /**
* Recommended fees * Recommended fees
* *
@@ -9958,6 +10281,26 @@ class BrkClient extends BrkClientBase {
return this.getJson(`/api/v1/fees/recommended`); return this.getJson(`/api/v1/fees/recommended`);
} }
/**
* Historical price
*
* Get historical BTC/USD price. Optionally specify a UNIX timestamp to get the price at that time.
*
* *[Mempool.space docs](https://mempool.space/docs/api/rest#get-historical-price)*
*
* Endpoint: `GET /api/v1/historical-price`
*
* @param {Timestamp=} [timestamp]
* @returns {Promise<HistoricalPrice>}
*/
async getHistoricalPrice(timestamp) {
const params = new URLSearchParams();
if (timestamp !== undefined) params.set('timestamp', String(timestamp));
const query = params.toString();
const path = `/api/v1/historical-price${query ? '?' + query : ''}`;
return this.getJson(path);
}
/** /**
* Block fee rates (WIP) * Block fee rates (WIP)
* *
@@ -10082,6 +10425,36 @@ class BrkClient extends BrkClientBase {
return this.getJson(`/api/v1/mining/hashrate`); return this.getJson(`/api/v1/mining/hashrate`);
} }
/**
* All pools hashrate (all time)
*
* Get hashrate data for all mining pools.
*
* *[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-hashrates)*
*
* Endpoint: `GET /api/v1/mining/hashrate/pools`
* @returns {Promise<PoolHashrateEntry[]>}
*/
async getPoolsHashrate() {
return this.getJson(`/api/v1/mining/hashrate/pools`);
}
/**
* All pools hashrate
*
* Get hashrate data for all mining pools for a time period. Valid periods: 1m, 3m, 6m, 1y, 2y, 3y
*
* *[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-hashrates)*
*
* Endpoint: `GET /api/v1/mining/hashrate/pools/{time_period}`
*
* @param {TimePeriod} time_period
* @returns {Promise<PoolHashrateEntry[]>}
*/
async getPoolsHashrateByPeriod(time_period) {
return this.getJson(`/api/v1/mining/hashrate/pools/${time_period}`);
}
/** /**
* Network hashrate * Network hashrate
* *
@@ -10114,6 +10487,55 @@ class BrkClient extends BrkClientBase {
return this.getJson(`/api/v1/mining/pool/${slug}`); return this.getJson(`/api/v1/mining/pool/${slug}`);
} }
/**
* Mining pool blocks
*
* Get the 10 most recent blocks mined by a specific pool.
*
* *[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-blocks)*
*
* Endpoint: `GET /api/v1/mining/pool/{slug}/blocks`
*
* @param {PoolSlug} slug
* @returns {Promise<BlockInfoV1[]>}
*/
async getPoolBlocks(slug) {
return this.getJson(`/api/v1/mining/pool/${slug}/blocks`);
}
/**
* Mining pool blocks from height
*
* Get 10 blocks mined by a specific pool before (and including) the given height.
*
* *[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-blocks)*
*
* Endpoint: `GET /api/v1/mining/pool/{slug}/blocks/{height}`
*
* @param {PoolSlug} slug
* @param {Height} height
* @returns {Promise<BlockInfoV1[]>}
*/
async getPoolBlocksFrom(slug, height) {
return this.getJson(`/api/v1/mining/pool/${slug}/blocks/${height}`);
}
/**
* Mining pool hashrate
*
* Get hashrate history for a specific mining pool.
*
* *[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-hashrate)*
*
* Endpoint: `GET /api/v1/mining/pool/{slug}/hashrate`
*
* @param {PoolSlug} slug
* @returns {Promise<PoolHashrateEntry[]>}
*/
async getPoolHashrate(slug) {
return this.getJson(`/api/v1/mining/pool/${slug}/hashrate`);
}
/** /**
* List all mining pools * List all mining pools
* *
@@ -10160,6 +10582,26 @@ class BrkClient extends BrkClientBase {
return this.getJson(`/api/v1/mining/reward-stats/${block_count}`); return this.getJson(`/api/v1/mining/reward-stats/${block_count}`);
} }
/**
* Transaction first-seen times
*
* Returns timestamps when transactions were first seen in the mempool. Returns 0 for mined or unknown transactions.
*
* *[Mempool.space docs](https://mempool.space/docs/api/rest#get-transaction-times)*
*
* Endpoint: `GET /api/v1/transaction-times`
*
* @param {Txid[]} [txId[]]
* @returns {Promise<number[]>}
*/
async getTransactionTimes(txId) {
const params = new URLSearchParams();
params.set('txId[]', String(txId));
const query = params.toString();
const path = `/api/v1/transaction-times${query ? '?' + query : ''}`;
return this.getJson(path);
}
/** /**
* Validate address * Validate address
* *

View File

@@ -49,6 +49,11 @@ BasisPointsSigned16 = int
BasisPointsSigned32 = int BasisPointsSigned32 = int
# Bitcoin amount as floating point (1 BTC = 100,000,000 satoshis) # Bitcoin amount as floating point (1 BTC = 100,000,000 satoshis)
Bitcoin = float Bitcoin = float
PoolSlug = Literal["unknown", "blockfills", "ultimuspool", "terrapool", "luxor", "onethash", "btccom", "bitfarms", "huobipool", "wayicn", "canoepool", "btctop", "bitcoincom", "pool175btc", "gbminers", "axbt", "asicminer", "bitminter", "bitcoinrussia", "btcserv", "simplecoinus", "btcguild", "eligius", "ozcoin", "eclipsemc", "maxbtc", "triplemining", "coinlab", "pool50btc", "ghashio", "stminingcorp", "bitparking", "mmpool", "polmine", "kncminer", "bitalo", "f2pool", "hhtt", "megabigpower", "mtred", "nmcbit", "yourbtcnet", "givemecoins", "braiinspool", "antpool", "multicoinco", "bcpoolio", "cointerra", "kanopool", "solock", "ckpool", "nicehash", "bitclub", "bitcoinaffiliatenetwork", "btcc", "bwpool", "exxbw", "bitsolo", "bitfury", "twentyoneinc", "digitalbtc", "eightbaochi", "mybtccoinpool", "tbdice", "hashpool", "nexious", "bravomining", "hotpool", "okexpool", "bcmonster", "onehash", "bixin", "tatmaspool", "viabtc", "connectbtc", "batpool", "waterhole", "dcexploration", "dcex", "btpool", "fiftyeightcoin", "bitcoinindia", "shawnp0wers", "phashio", "rigpool", "haozhuzhu", "sevenpool", "miningkings", "hashbx", "dpool", "rawpool", "haominer", "helix", "bitcoinukraine", "poolin", "secretsuperstar", "tigerpoolnet", "sigmapoolcom", "okpooltop", "hummerpool", "tangpool", "bytepool", "spiderpool", "novablock", "miningcity", "binancepool", "minerium", "lubiancom", "okkong", "aaopool", "emcdpool", "foundryusa", "sbicrypto", "arkpool", "purebtccom", "marapool", "kucoinpool", "entrustcharitypool", "okminer", "titan", "pegapool", "btcnuggets", "cloudhashing", "digitalxmintsy", "telco214", "btcpoolparty", "multipool", "transactioncoinmining", "btcdig", "trickysbtcpool", "btcmp", "eobot", "unomp", "patels", "gogreenlight", "bitcoinindiapool", "ekanembtc", "canoe", "tiger", "onem1x", "zulupool", "secpool", "ocean", "whitepool", "wiz", "wk057", "futurebitapollosolo", "carbonnegative", "portlandhodl", "phoenix", "neopool", "maxipool", "bitfufupool", "gdpool", "miningdutch", "publicpool", "miningsquared", "innopolistech", "btclab", "parasite", "redrockpool", "est3lar"]
# Fee rate in sats/vB
FeeRate = float
# Transaction or block weight in weight units (WU)
Weight = int
# Block height # Block height
Height = int Height = int
# UNIX timestamp in seconds # UNIX timestamp in seconds
@@ -56,8 +61,6 @@ Timestamp = int
# Block hash # Block hash
BlockHash = str BlockHash = str
TxIndex = int TxIndex = int
# Transaction or block weight in weight units (WU)
Weight = int
# Unsigned cents (u64) - for values that should never be negative. # Unsigned cents (u64) - for values that should never be negative.
# Used for invested capital, realized cap, etc. # Used for invested capital, realized cap, etc.
Cents = int Cents = int
@@ -77,6 +80,14 @@ Dollars = float
Close = Dollars Close = Dollars
# Cohort identifier for cost basis distribution. # Cohort identifier for cost basis distribution.
Cohort = str Cohort = str
# Coinbase scriptSig tag for pool identification.
#
# Stored as a fixed 101-byte record (1 byte length + 100 bytes data).
# Uses `[u8; 101]` internally so that `size_of::<CoinbaseTag>()` matches
# the serialized `Bytes::Array` size (vecdb requires this for alignment).
#
# Bitcoin consensus limits coinbase scriptSig to 2-100 bytes.
CoinbaseTag = str
# Bucket type for cost basis aggregation. # Bucket type for cost basis aggregation.
# Options: raw (no aggregation), lin200/lin500/lin1000 (linear $200/$500/$1000), # Options: raw (no aggregation), lin200/lin500/lin1000 (linear $200/$500/$1000),
# log10/log50/log100/log200 (logarithmic with 10/50/100/200 buckets per decade). # log10/log50/log100/log200 (logarithmic with 10/50/100/200 buckets per decade).
@@ -97,8 +108,8 @@ Day3 = int
EmptyAddrIndex = TypeIndex EmptyAddrIndex = TypeIndex
EmptyOutputIndex = TypeIndex EmptyOutputIndex = TypeIndex
Epoch = int Epoch = int
# Fee rate in sats/vB # Exchange rates (USD base, on-chain only — no fiat pairs available)
FeeRate = float ExchangeRates = dict
FundedAddrIndex = TypeIndex FundedAddrIndex = TypeIndex
Halving = int Halving = int
# Hex-encoded string # Hex-encoded string
@@ -147,7 +158,6 @@ P2WPKHAddrIndex = TypeIndex
P2WPKHBytes = U8x20 P2WPKHBytes = U8x20
P2WSHAddrIndex = TypeIndex P2WSHAddrIndex = TypeIndex
P2WSHBytes = U8x32 P2WSHBytes = U8x32
PoolSlug = Literal["unknown", "blockfills", "ultimuspool", "terrapool", "luxor", "onethash", "btccom", "bitfarms", "huobipool", "wayicn", "canoepool", "btctop", "bitcoincom", "pool175btc", "gbminers", "axbt", "asicminer", "bitminter", "bitcoinrussia", "btcserv", "simplecoinus", "btcguild", "eligius", "ozcoin", "eclipsemc", "maxbtc", "triplemining", "coinlab", "pool50btc", "ghashio", "stminingcorp", "bitparking", "mmpool", "polmine", "kncminer", "bitalo", "f2pool", "hhtt", "megabigpower", "mtred", "nmcbit", "yourbtcnet", "givemecoins", "braiinspool", "antpool", "multicoinco", "bcpoolio", "cointerra", "kanopool", "solock", "ckpool", "nicehash", "bitclub", "bitcoinaffiliatenetwork", "btcc", "bwpool", "exxbw", "bitsolo", "bitfury", "twentyoneinc", "digitalbtc", "eightbaochi", "mybtccoinpool", "tbdice", "hashpool", "nexious", "bravomining", "hotpool", "okexpool", "bcmonster", "onehash", "bixin", "tatmaspool", "viabtc", "connectbtc", "batpool", "waterhole", "dcexploration", "dcex", "btpool", "fiftyeightcoin", "bitcoinindia", "shawnp0wers", "phashio", "rigpool", "haozhuzhu", "sevenpool", "miningkings", "hashbx", "dpool", "rawpool", "haominer", "helix", "bitcoinukraine", "poolin", "secretsuperstar", "tigerpoolnet", "sigmapoolcom", "okpooltop", "hummerpool", "tangpool", "bytepool", "spiderpool", "novablock", "miningcity", "binancepool", "minerium", "lubiancom", "okkong", "aaopool", "emcdpool", "foundryusa", "sbicrypto", "arkpool", "purebtccom", "marapool", "kucoinpool", "entrustcharitypool", "okminer", "titan", "pegapool", "btcnuggets", "cloudhashing", "digitalxmintsy", "telco214", "btcpoolparty", "multipool", "transactioncoinmining", "btcdig", "trickysbtcpool", "btcmp", "eobot", "unomp", "patels", "gogreenlight", "bitcoinindiapool", "ekanembtc", "canoe", "tiger", "onem1x", "zulupool", "secpool", "ocean", "whitepool", "wiz", "wk057", "futurebitapollosolo", "carbonnegative", "portlandhodl", "phoenix", "neopool", "maxipool", "bitfufupool", "gdpool", "miningdutch", "publicpool", "miningsquared", "innopolistech", "btclab", "parasite", "redrockpool", "est3lar"]
# Transaction locktime # Transaction locktime
RawLockTime = int RawLockTime = int
# Fractional satoshis (f64) - for representing USD prices in sats # Fractional satoshis (f64) - for representing USD prices in sats
@@ -296,6 +306,78 @@ class BlockCountParam(TypedDict):
""" """
block_count: int block_count: int
class BlockPool(TypedDict):
"""
Mining pool identification for a block
Attributes:
id: Unique pool identifier
name: Pool name
slug: URL-friendly pool identifier
"""
id: int
name: str
slug: PoolSlug
class BlockExtras(TypedDict):
"""
Extended block data matching mempool.space /api/v1/blocks extras
Attributes:
totalFees: Total fees in satoshis
medianFee: Median fee rate in sat/vB
feeRange: Fee rate range: [min, 10%, 25%, 50%, 75%, 90%, max]
reward: Total block reward (subsidy + fees) in satoshis
pool: Mining pool that mined this block
avgFee: Average fee per transaction in satoshis
avgFeeRate: Average fee rate in sat/vB
coinbaseRaw: Raw coinbase transaction scriptsig as hex
coinbaseAddress: Primary coinbase output address
coinbaseAddresses: All coinbase output addresses
coinbaseSignature: Coinbase output script in ASM format
coinbaseSignatureAscii: Coinbase scriptsig decoded as ASCII
avgTxSize: Average transaction size in bytes
totalInputs: Total number of inputs (excluding coinbase)
totalOutputs: Total number of outputs
totalOutputAmt: Total output amount in satoshis
medianFeeAmt: Median fee amount in satoshis
feePercentiles: Fee amount percentiles in satoshis: [min, 10%, 25%, 50%, 75%, 90%, max]
segwitTotalTxs: Number of segwit transactions
segwitTotalSize: Total size of segwit transactions in bytes
segwitTotalWeight: Total weight of segwit transactions
header: Raw 80-byte block header as hex
utxoSetChange: UTXO set change (outputs created minus inputs spent)
utxoSetSize: Total UTXO set size at this height
totalInputAmt: Total input amount in satoshis
virtualSize: Virtual size in vbytes
"""
totalFees: Sats
medianFee: FeeRate
feeRange: List[FeeRate]
reward: Sats
pool: BlockPool
avgFee: Sats
avgFeeRate: FeeRate
coinbaseRaw: str
coinbaseAddress: Optional[str]
coinbaseAddresses: List[str]
coinbaseSignature: str
coinbaseSignatureAscii: str
avgTxSize: float
totalInputs: int
totalOutputs: int
totalOutputAmt: Sats
medianFeeAmt: Sats
feePercentiles: List[Sats]
segwitTotalTxs: int
segwitTotalSize: int
segwitTotalWeight: Weight
header: str
utxoSetChange: int
utxoSetSize: int
totalInputAmt: Sats
virtualSize: float
class BlockFeesEntry(TypedDict): class BlockFeesEntry(TypedDict):
""" """
A single block fees data point. A single block fees data point.
@@ -327,25 +409,76 @@ class BlockHashTxIndex(TypedDict):
class BlockInfo(TypedDict): class BlockInfo(TypedDict):
""" """
Block information returned by the API Block information matching mempool.space /api/block/{hash}
Attributes: Attributes:
id: Block hash id: Block hash
height: Block height height: Block height
version: Block version, used for soft fork signaling
previousblockhash: Previous block hash
merkle_root: Merkle root of the transaction tree
time: Block timestamp as claimed by the miner (Unix time)
bits: Compact target (bits)
nonce: Nonce used to produce a valid block hash
timestamp: Block timestamp (Unix time)
tx_count: Number of transactions in the block tx_count: Number of transactions in the block
size: Block size in bytes size: Block size in bytes
weight: Block weight in weight units weight: Block weight in weight units
timestamp: Block timestamp (Unix time) mediantime: Median time of the last 11 blocks
difficulty: Block difficulty as a floating point number difficulty: Block difficulty
""" """
id: BlockHash id: BlockHash
height: Height height: Height
version: int
previousblockhash: BlockHash
merkle_root: str
time: int
bits: int
nonce: int
timestamp: Timestamp
tx_count: int tx_count: int
size: int size: int
weight: Weight weight: Weight
timestamp: Timestamp mediantime: Timestamp
difficulty: float difficulty: float
class BlockInfoV1(TypedDict):
"""
Block information with extras, matching mempool.space /api/v1/blocks
Attributes:
id: Block hash
height: Block height
version: Block version, used for soft fork signaling
previousblockhash: Previous block hash
merkle_root: Merkle root of the transaction tree
time: Block timestamp as claimed by the miner (Unix time)
bits: Compact target (bits)
nonce: Nonce used to produce a valid block hash
timestamp: Block timestamp (Unix time)
tx_count: Number of transactions in the block
size: Block size in bytes
weight: Block weight in weight units
mediantime: Median time of the last 11 blocks
difficulty: Block difficulty
extras: Extended block data
"""
id: BlockHash
height: Height
version: int
previousblockhash: BlockHash
merkle_root: str
time: int
bits: int
nonce: int
timestamp: Timestamp
tx_count: int
size: int
weight: Weight
mediantime: Timestamp
difficulty: float
extras: BlockExtras
class BlockRewardsEntry(TypedDict): class BlockRewardsEntry(TypedDict):
""" """
A single block rewards data point. A single block rewards data point.
@@ -427,6 +560,22 @@ class CostBasisQuery(TypedDict):
bucket: CostBasisBucket bucket: CostBasisBucket
value: CostBasisValue value: CostBasisValue
class CpfpEntry(TypedDict):
"""
A transaction in a CPFP relationship
"""
txid: Txid
weight: Weight
fee: Sats
class CpfpInfo(TypedDict):
"""
CPFP (Child Pays For Parent) information for a transaction
"""
ancestors: List[CpfpEntry]
descendants: List[CpfpEntry]
effectiveFeePerVsize: FeeRate
class DataRangeFormat(TypedDict): class DataRangeFormat(TypedDict):
""" """
Data range with output format for API query parameters Data range with output format for API query parameters
@@ -647,6 +796,20 @@ class Health(TypedDict):
class HeightParam(TypedDict): class HeightParam(TypedDict):
height: Height height: Height
class HistoricalPriceEntry(TypedDict):
"""
A single price data point
"""
time: int
USD: Dollars
class HistoricalPrice(TypedDict):
"""
Historical price response
"""
prices: List[HistoricalPriceEntry]
exchangeRates: ExchangeRates
class IndexInfo(TypedDict): class IndexInfo(TypedDict):
""" """
Information about an available index and its query aliases Information about an available index and its query aliases
@@ -692,16 +855,35 @@ class MempoolBlock(TypedDict):
class MempoolInfo(TypedDict): class MempoolInfo(TypedDict):
""" """
Mempool statistics Mempool statistics with incrementally maintained fee histogram.
Attributes: Attributes:
count: Number of transactions in the mempool count: Number of transactions in the mempool
vsize: Total virtual size of all transactions in the mempool (vbytes) vsize: Total virtual size of all transactions in the mempool (vbytes)
total_fee: Total fees of all transactions in the mempool (satoshis) total_fee: Total fees of all transactions in the mempool (satoshis)
fee_histogram: Fee histogram: `[[fee_rate, vsize], ...]` sorted by descending fee rate
""" """
count: int count: int
vsize: VSize vsize: VSize
total_fee: Sats total_fee: Sats
fee_histogram: dict[str, VSize]
class MempoolRecentTx(TypedDict):
"""
Simplified mempool transaction for the recent transactions endpoint
"""
txid: Txid
fee: Sats
vsize: VSize
value: Sats
class MerkleProof(TypedDict):
"""
Merkle inclusion proof for a transaction
"""
block_height: Height
merkle: List[str]
pos: int
class OHLCCents(TypedDict): class OHLCCents(TypedDict):
""" """
@@ -730,6 +912,9 @@ class OHLCSats(TypedDict):
low: Low low: Low
close: Close close: Close
class OptionalTimestampParam(TypedDict):
timestamp: Union[Timestamp, None]
class PaginatedSeries(TypedDict): class PaginatedSeries(TypedDict):
""" """
A paginated list of available series names (1000 per page) A paginated list of available series names (1000 per page)
@@ -822,6 +1007,21 @@ class PoolDetail(TypedDict):
estimatedHashrate: int estimatedHashrate: int
reportedHashrate: Optional[int] reportedHashrate: Optional[int]
class PoolHashrateEntry(TypedDict):
"""
A single pool hashrate data point.
Attributes:
timestamp: Unix timestamp.
avgHashrate: Average hashrate (H/s).
share: Pool's share of total network hashrate.
poolName: Pool name.
"""
timestamp: Timestamp
avgHashrate: int
share: float
poolName: str
class PoolInfo(TypedDict): class PoolInfo(TypedDict):
""" """
Basic pool information for listing all pools Basic pool information for listing all pools
@@ -835,6 +1035,10 @@ class PoolInfo(TypedDict):
slug: PoolSlug slug: PoolSlug
unique_id: int unique_id: int
class PoolSlugAndHeightParam(TypedDict):
slug: PoolSlug
height: Height
class PoolSlugParam(TypedDict): class PoolSlugParam(TypedDict):
slug: PoolSlug slug: PoolSlug
@@ -1110,6 +1314,9 @@ class TxidVout(TypedDict):
txid: Txid txid: Txid
vout: Vout vout: Vout
class TxidsParam(TypedDict):
txId: List[Txid]
class Utxo(TypedDict): class Utxo(TypedDict):
""" """
Unspent transaction output Unspent transaction output
@@ -3357,10 +3564,14 @@ class SeriesTree_Blocks:
def __init__(self, client: BrkClientBase, base_path: str = ''): def __init__(self, client: BrkClientBase, base_path: str = ''):
self.blockhash: SeriesPattern18[BlockHash] = SeriesPattern18(client, 'blockhash') self.blockhash: SeriesPattern18[BlockHash] = SeriesPattern18(client, 'blockhash')
self.coinbase_tag: SeriesPattern18[CoinbaseTag] = SeriesPattern18(client, 'coinbase_tag')
self.difficulty: SeriesTree_Blocks_Difficulty = SeriesTree_Blocks_Difficulty(client) self.difficulty: SeriesTree_Blocks_Difficulty = SeriesTree_Blocks_Difficulty(client)
self.time: SeriesTree_Blocks_Time = SeriesTree_Blocks_Time(client) self.time: SeriesTree_Blocks_Time = SeriesTree_Blocks_Time(client)
self.size: SeriesTree_Blocks_Size = SeriesTree_Blocks_Size(client) self.size: SeriesTree_Blocks_Size = SeriesTree_Blocks_Size(client)
self.weight: AverageBaseCumulativeMaxMedianMinPct10Pct25Pct75Pct90SumPattern[Weight] = AverageBaseCumulativeMaxMedianMinPct10Pct25Pct75Pct90SumPattern(client, 'block_weight') self.weight: AverageBaseCumulativeMaxMedianMinPct10Pct25Pct75Pct90SumPattern[Weight] = AverageBaseCumulativeMaxMedianMinPct10Pct25Pct75Pct90SumPattern(client, 'block_weight')
self.segwit_txs: SeriesPattern18[StoredU32] = SeriesPattern18(client, 'segwit_txs')
self.segwit_size: SeriesPattern18[StoredU64] = SeriesPattern18(client, 'segwit_size')
self.segwit_weight: SeriesPattern18[Weight] = SeriesPattern18(client, 'segwit_weight')
self.count: SeriesTree_Blocks_Count = SeriesTree_Blocks_Count(client) self.count: SeriesTree_Blocks_Count = SeriesTree_Blocks_Count(client)
self.lookback: SeriesTree_Blocks_Lookback = SeriesTree_Blocks_Lookback(client) self.lookback: SeriesTree_Blocks_Lookback = SeriesTree_Blocks_Lookback(client)
self.interval: SeriesTree_Blocks_Interval = SeriesTree_Blocks_Interval(client) self.interval: SeriesTree_Blocks_Interval = SeriesTree_Blocks_Interval(client)
@@ -3413,6 +3624,7 @@ class SeriesTree_Transactions_Fees:
self.output_value: SeriesPattern19[Sats] = SeriesPattern19(client, 'output_value') self.output_value: SeriesPattern19[Sats] = SeriesPattern19(client, 'output_value')
self.fee: _6bBlockTxPattern[Sats] = _6bBlockTxPattern(client, 'fee') self.fee: _6bBlockTxPattern[Sats] = _6bBlockTxPattern(client, 'fee')
self.fee_rate: _6bBlockTxPattern[FeeRate] = _6bBlockTxPattern(client, 'fee_rate') self.fee_rate: _6bBlockTxPattern[FeeRate] = _6bBlockTxPattern(client, 'fee_rate')
self.effective_fee_rate: _6bBlockTxPattern[FeeRate] = _6bBlockTxPattern(client, 'effective_fee_rate')
class SeriesTree_Transactions_Versions: class SeriesTree_Transactions_Versions:
"""Series tree node.""" """Series tree node."""
@@ -3757,6 +3969,7 @@ class SeriesTree_Mining_Rewards:
self.coinbase: AverageBlockCumulativeSumPattern3 = AverageBlockCumulativeSumPattern3(client, 'coinbase') self.coinbase: AverageBlockCumulativeSumPattern3 = AverageBlockCumulativeSumPattern3(client, 'coinbase')
self.subsidy: SeriesTree_Mining_Rewards_Subsidy = SeriesTree_Mining_Rewards_Subsidy(client) self.subsidy: SeriesTree_Mining_Rewards_Subsidy = SeriesTree_Mining_Rewards_Subsidy(client)
self.fees: SeriesTree_Mining_Rewards_Fees = SeriesTree_Mining_Rewards_Fees(client) self.fees: SeriesTree_Mining_Rewards_Fees = SeriesTree_Mining_Rewards_Fees(client)
self.output_volume: SeriesPattern18[Sats] = SeriesPattern18(client, 'output_volume')
self.unclaimed: BlockCumulativePattern = BlockCumulativePattern(client, 'unclaimed_rewards') self.unclaimed: BlockCumulativePattern = BlockCumulativePattern(client, 'unclaimed_rewards')
class SeriesTree_Mining_Hashrate_Rate_Sma: class SeriesTree_Mining_Hashrate_Rate_Sma:
@@ -3792,12 +4005,6 @@ class SeriesTree_Mining:
self.rewards: SeriesTree_Mining_Rewards = SeriesTree_Mining_Rewards(client) self.rewards: SeriesTree_Mining_Rewards = SeriesTree_Mining_Rewards(client)
self.hashrate: SeriesTree_Mining_Hashrate = SeriesTree_Mining_Hashrate(client) self.hashrate: SeriesTree_Mining_Hashrate = SeriesTree_Mining_Hashrate(client)
class SeriesTree_Positions:
"""Series tree node."""
def __init__(self, client: BrkClientBase, base_path: str = ''):
pass
class SeriesTree_Cointime_Activity: class SeriesTree_Cointime_Activity:
"""Series tree node.""" """Series tree node."""
@@ -5664,7 +5871,6 @@ class SeriesTree:
self.addrs: SeriesTree_Addrs = SeriesTree_Addrs(client) self.addrs: SeriesTree_Addrs = SeriesTree_Addrs(client)
self.scripts: SeriesTree_Scripts = SeriesTree_Scripts(client) self.scripts: SeriesTree_Scripts = SeriesTree_Scripts(client)
self.mining: SeriesTree_Mining = SeriesTree_Mining(client) self.mining: SeriesTree_Mining = SeriesTree_Mining(client)
self.positions: SeriesTree_Positions = SeriesTree_Positions(client)
self.cointime: SeriesTree_Cointime = SeriesTree_Cointime(client) self.cointime: SeriesTree_Cointime = SeriesTree_Cointime(client)
self.constants: SeriesTree_Constants = SeriesTree_Constants(client) self.constants: SeriesTree_Constants = SeriesTree_Constants(client)
self.indexes: SeriesTree_Indexes = SeriesTree_Indexes(client) self.indexes: SeriesTree_Indexes = SeriesTree_Indexes(client)
@@ -6917,10 +7123,10 @@ class BrkClient(BrkClientBase):
Endpoint: `GET /api/address/{address}/utxo`""" Endpoint: `GET /api/address/{address}/utxo`"""
return self.get_json(f'/api/address/{address}/utxo') return self.get_json(f'/api/address/{address}/utxo')
def get_block_by_height(self, height: Height) -> BlockInfo: def get_block_by_height(self, height: Height) -> BlockHash:
"""Block by height. """Block hash by height.
Retrieve block information by block height. Returns block metadata including hash, timestamp, difficulty, size, weight, and transaction count. Retrieve the block hash at a given height. Returns the hash as plain text.
*[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-height)* *[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-height)*
@@ -6937,6 +7143,16 @@ class BrkClient(BrkClientBase):
Endpoint: `GET /api/block/{hash}`""" Endpoint: `GET /api/block/{hash}`"""
return self.get_json(f'/api/block/{hash}') return self.get_json(f'/api/block/{hash}')
def get_block_header(self, hash: BlockHash) -> Hex:
"""Block header.
Returns the hex-encoded block header.
*[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-header)*
Endpoint: `GET /api/block/{hash}/header`"""
return self.get_json(f'/api/block/{hash}/header')
def get_block_raw(self, hash: BlockHash) -> List[float]: def get_block_raw(self, hash: BlockHash) -> List[float]:
"""Raw block. """Raw block.
@@ -6997,6 +7213,26 @@ class BrkClient(BrkClientBase):
Endpoint: `GET /api/blocks`""" Endpoint: `GET /api/blocks`"""
return self.get_json('/api/blocks') return self.get_json('/api/blocks')
def get_block_tip_hash(self) -> BlockHash:
"""Block tip hash.
Returns the hash of the last block.
*[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-tip-hash)*
Endpoint: `GET /api/blocks/tip/hash`"""
return self.get_json('/api/blocks/tip/hash')
def get_block_tip_height(self) -> Height:
"""Block tip height.
Returns the height of the last block.
*[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-tip-height)*
Endpoint: `GET /api/blocks/tip/height`"""
return self.get_json('/api/blocks/tip/height')
def get_blocks_from_height(self, height: Height) -> List[BlockInfo]: def get_blocks_from_height(self, height: Height) -> List[BlockInfo]:
"""Blocks from height. """Blocks from height.
@@ -7014,8 +7250,8 @@ class BrkClient(BrkClientBase):
*[Mempool.space docs](https://mempool.space/docs/api/rest#get-mempool)* *[Mempool.space docs](https://mempool.space/docs/api/rest#get-mempool)*
Endpoint: `GET /api/mempool/info`""" Endpoint: `GET /api/mempool`"""
return self.get_json('/api/mempool/info') return self.get_json('/api/mempool')
def get_live_price(self) -> Dollars: def get_live_price(self) -> Dollars:
"""Live BTC/USD price. """Live BTC/USD price.
@@ -7025,6 +7261,16 @@ class BrkClient(BrkClientBase):
Endpoint: `GET /api/mempool/price`""" Endpoint: `GET /api/mempool/price`"""
return self.get_json('/api/mempool/price') return self.get_json('/api/mempool/price')
def get_mempool_recent(self) -> List[MempoolRecentTx]:
"""Recent mempool transactions.
Get the last 10 transactions to enter the mempool.
*[Mempool.space docs](https://mempool.space/docs/api/rest#get-mempool-recent)*
Endpoint: `GET /api/mempool/recent`"""
return self.get_json('/api/mempool/recent')
def get_mempool_txids(self) -> List[Txid]: def get_mempool_txids(self) -> List[Txid]:
"""Mempool transaction IDs. """Mempool transaction IDs.
@@ -7239,6 +7485,16 @@ class BrkClient(BrkClientBase):
Endpoint: `GET /api/tx/{txid}/hex`""" Endpoint: `GET /api/tx/{txid}/hex`"""
return self.get_json(f'/api/tx/{txid}/hex') return self.get_json(f'/api/tx/{txid}/hex')
def get_tx_merkle_proof(self, txid: Txid) -> MerkleProof:
"""Transaction merkle proof.
Get the merkle inclusion proof for a transaction.
*[Mempool.space docs](https://mempool.space/docs/api/rest#get-transaction-merkle-proof)*
Endpoint: `GET /api/tx/{txid}/merkle-proof`"""
return self.get_json(f'/api/tx/{txid}/merkle-proof')
def get_tx_outspend(self, txid: Txid, vout: Vout) -> TxOutspend: def get_tx_outspend(self, txid: Txid, vout: Vout) -> TxOutspend:
"""Output spend status. """Output spend status.
@@ -7259,6 +7515,16 @@ class BrkClient(BrkClientBase):
Endpoint: `GET /api/tx/{txid}/outspends`""" Endpoint: `GET /api/tx/{txid}/outspends`"""
return self.get_json(f'/api/tx/{txid}/outspends') return self.get_json(f'/api/tx/{txid}/outspends')
def get_tx_raw(self, txid: Txid) -> List[float]:
"""Transaction raw.
Returns a transaction as binary data.
*[Mempool.space docs](https://mempool.space/docs/api/rest#get-transaction-raw)*
Endpoint: `GET /api/tx/{txid}/raw`"""
return self.get_json(f'/api/tx/{txid}/raw')
def get_tx_status(self, txid: Txid) -> TxStatus: def get_tx_status(self, txid: Txid) -> TxStatus:
"""Transaction status. """Transaction status.
@@ -7269,6 +7535,46 @@ class BrkClient(BrkClientBase):
Endpoint: `GET /api/tx/{txid}/status`""" Endpoint: `GET /api/tx/{txid}/status`"""
return self.get_json(f'/api/tx/{txid}/status') return self.get_json(f'/api/tx/{txid}/status')
def get_block_v1(self, hash: BlockHash) -> BlockInfoV1:
"""Block (v1).
Returns block details with extras by hash.
*[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-v1)*
Endpoint: `GET /api/v1/block/{hash}`"""
return self.get_json(f'/api/v1/block/{hash}')
def get_blocks_v1(self) -> List[BlockInfoV1]:
"""Recent blocks with extras.
Retrieve the last 10 blocks with extended data including pool identification and fee statistics.
*[Mempool.space docs](https://mempool.space/docs/api/rest#get-blocks-v1)*
Endpoint: `GET /api/v1/blocks`"""
return self.get_json('/api/v1/blocks')
def get_blocks_v1_from_height(self, height: Height) -> List[BlockInfoV1]:
"""Blocks from height with extras.
Retrieve up to 10 blocks with extended data going backwards from the given height.
*[Mempool.space docs](https://mempool.space/docs/api/rest#get-blocks-v1)*
Endpoint: `GET /api/v1/blocks/{height}`"""
return self.get_json(f'/api/v1/blocks/{height}')
def get_cpfp(self, txid: Txid) -> CpfpInfo:
"""CPFP info.
Returns ancestors and descendants for a CPFP transaction.
*[Mempool.space docs](https://mempool.space/docs/api/rest#get-children-pay-for-parent)*
Endpoint: `GET /api/v1/cpfp/{txid}`"""
return self.get_json(f'/api/v1/cpfp/{txid}')
def get_difficulty_adjustment(self) -> DifficultyAdjustment: def get_difficulty_adjustment(self) -> DifficultyAdjustment:
"""Difficulty adjustment. """Difficulty adjustment.
@@ -7289,6 +7595,16 @@ class BrkClient(BrkClientBase):
Endpoint: `GET /api/v1/fees/mempool-blocks`""" Endpoint: `GET /api/v1/fees/mempool-blocks`"""
return self.get_json('/api/v1/fees/mempool-blocks') return self.get_json('/api/v1/fees/mempool-blocks')
def get_precise_fees(self) -> RecommendedFees:
"""Precise recommended fees.
Get recommended fee rates with up to 3 decimal places, including sub-sat feerates.
*[Mempool.space docs](https://mempool.space/docs/api/rest#get-recommended-fees-precise)*
Endpoint: `GET /api/v1/fees/precise`"""
return self.get_json('/api/v1/fees/precise')
def get_recommended_fees(self) -> RecommendedFees: def get_recommended_fees(self) -> RecommendedFees:
"""Recommended fees. """Recommended fees.
@@ -7299,6 +7615,20 @@ class BrkClient(BrkClientBase):
Endpoint: `GET /api/v1/fees/recommended`""" Endpoint: `GET /api/v1/fees/recommended`"""
return self.get_json('/api/v1/fees/recommended') return self.get_json('/api/v1/fees/recommended')
def get_historical_price(self, timestamp: Optional[Timestamp] = None) -> HistoricalPrice:
"""Historical price.
Get historical BTC/USD price. Optionally specify a UNIX timestamp to get the price at that time.
*[Mempool.space docs](https://mempool.space/docs/api/rest#get-historical-price)*
Endpoint: `GET /api/v1/historical-price`"""
params = []
if timestamp is not None: params.append(f'timestamp={timestamp}')
query = '&'.join(params)
path = f'/api/v1/historical-price{"?" + query if query else ""}'
return self.get_json(path)
def get_block_fee_rates(self, time_period: TimePeriod) -> Any: def get_block_fee_rates(self, time_period: TimePeriod) -> Any:
"""Block fee rates (WIP). """Block fee rates (WIP).
@@ -7379,6 +7709,26 @@ class BrkClient(BrkClientBase):
Endpoint: `GET /api/v1/mining/hashrate`""" Endpoint: `GET /api/v1/mining/hashrate`"""
return self.get_json('/api/v1/mining/hashrate') return self.get_json('/api/v1/mining/hashrate')
def get_pools_hashrate(self) -> List[PoolHashrateEntry]:
"""All pools hashrate (all time).
Get hashrate data for all mining pools.
*[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-hashrates)*
Endpoint: `GET /api/v1/mining/hashrate/pools`"""
return self.get_json('/api/v1/mining/hashrate/pools')
def get_pools_hashrate_by_period(self, time_period: TimePeriod) -> List[PoolHashrateEntry]:
"""All pools hashrate.
Get hashrate data for all mining pools for a time period. Valid periods: 1m, 3m, 6m, 1y, 2y, 3y
*[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-hashrates)*
Endpoint: `GET /api/v1/mining/hashrate/pools/{time_period}`"""
return self.get_json(f'/api/v1/mining/hashrate/pools/{time_period}')
def get_hashrate_by_period(self, time_period: TimePeriod) -> HashrateSummary: def get_hashrate_by_period(self, time_period: TimePeriod) -> HashrateSummary:
"""Network hashrate. """Network hashrate.
@@ -7399,6 +7749,36 @@ class BrkClient(BrkClientBase):
Endpoint: `GET /api/v1/mining/pool/{slug}`""" Endpoint: `GET /api/v1/mining/pool/{slug}`"""
return self.get_json(f'/api/v1/mining/pool/{slug}') return self.get_json(f'/api/v1/mining/pool/{slug}')
def get_pool_blocks(self, slug: PoolSlug) -> List[BlockInfoV1]:
"""Mining pool blocks.
Get the 10 most recent blocks mined by a specific pool.
*[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-blocks)*
Endpoint: `GET /api/v1/mining/pool/{slug}/blocks`"""
return self.get_json(f'/api/v1/mining/pool/{slug}/blocks')
def get_pool_blocks_from(self, slug: PoolSlug, height: Height) -> List[BlockInfoV1]:
"""Mining pool blocks from height.
Get 10 blocks mined by a specific pool before (and including) the given height.
*[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-blocks)*
Endpoint: `GET /api/v1/mining/pool/{slug}/blocks/{height}`"""
return self.get_json(f'/api/v1/mining/pool/{slug}/blocks/{height}')
def get_pool_hashrate(self, slug: PoolSlug) -> List[PoolHashrateEntry]:
"""Mining pool hashrate.
Get hashrate history for a specific mining pool.
*[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-hashrate)*
Endpoint: `GET /api/v1/mining/pool/{slug}/hashrate`"""
return self.get_json(f'/api/v1/mining/pool/{slug}/hashrate')
def get_pools(self) -> List[PoolInfo]: def get_pools(self) -> List[PoolInfo]:
"""List all mining pools. """List all mining pools.
@@ -7429,6 +7809,20 @@ class BrkClient(BrkClientBase):
Endpoint: `GET /api/v1/mining/reward-stats/{block_count}`""" Endpoint: `GET /api/v1/mining/reward-stats/{block_count}`"""
return self.get_json(f'/api/v1/mining/reward-stats/{block_count}') return self.get_json(f'/api/v1/mining/reward-stats/{block_count}')
def get_transaction_times(self, txId: List[Txid]) -> List[float]:
"""Transaction first-seen times.
Returns timestamps when transactions were first seen in the mempool. Returns 0 for mined or unknown transactions.
*[Mempool.space docs](https://mempool.space/docs/api/rest#get-transaction-times)*
Endpoint: `GET /api/v1/transaction-times`"""
params = []
params.append(f'txId[]={txId}')
query = '&'.join(params)
path = f'/api/v1/transaction-times{"?" + query if query else ""}'
return self.get_json(path)
def validate_address(self, address: str) -> AddrValidation: def validate_address(self, address: str) -> AddrValidation:
"""Validate address. """Validate address.

1
website/.gitignore vendored
View File

@@ -2,3 +2,4 @@
!scripts/**/_*.js !scripts/**/_*.js
*_old.js *_old.js
*dump* *dump*
TODO.md

View File

@@ -54,6 +54,7 @@
* @typedef {Brk._0sdM0M1M1sdM2M2sdM3sdP0P1P1sdP2P2sdP3sdSdZscorePattern} Ratio1ySdPattern * @typedef {Brk._0sdM0M1M1sdM2M2sdM3sdP0P1P1sdP2P2sdP3sdSdZscorePattern} Ratio1ySdPattern
* @typedef {Brk.Dollars} Dollars * @typedef {Brk.Dollars} Dollars
* @typedef {Brk.BlockInfo} BlockInfo * @typedef {Brk.BlockInfo} BlockInfo
* @typedef {Brk.BlockInfoV1} BlockInfoV1
* ActivePriceRatioPattern: ratio pattern with price (extended) * ActivePriceRatioPattern: ratio pattern with price (extended)
* @typedef {Brk.BpsPriceRatioPattern} ActivePriceRatioPattern * @typedef {Brk.BpsPriceRatioPattern} ActivePriceRatioPattern
* PriceRatioPercentilesPattern: price pattern with ratio + percentiles (no SMAs/stdDev) * PriceRatioPercentilesPattern: price pattern with ratio + percentiles (no SMAs/stdDev)

View File

@@ -50,7 +50,10 @@ export function init() {
} else { } else {
startPolling(); startPolling();
} }
}).observe(explorerElement, { attributes: true, attributeFilter: ["hidden"] }); }).observe(explorerElement, {
attributes: true,
attributeFilter: ["hidden"],
});
document.addEventListener("visibilitychange", () => { document.addEventListener("visibilitychange", () => {
if (!document.hidden && !explorerElement.hidden) { if (!document.hidden && !explorerElement.hidden) {
@@ -65,12 +68,12 @@ async function loadLatest() {
if (loading) return; if (loading) return;
loading = true; loading = true;
try { try {
const blocks = await brk.getBlocks(); const blocks = await brk.getBlocksV1();
// First load: insert all blocks before sentinel // First load: insert all blocks before sentinel
if (newestHeight === -1) { if (newestHeight === -1) {
for (const block of blocks) { for (const block of blocks) {
sentinel.before(createBlockCube(block)); sentinel.after(createBlockCube(block));
} }
newestHeight = blocks[0].height; newestHeight = blocks[0].height;
oldestHeight = blocks[blocks.length - 1].height; oldestHeight = blocks[blocks.length - 1].height;
@@ -78,7 +81,8 @@ async function loadLatest() {
// Subsequent polls: prepend only new blocks // Subsequent polls: prepend only new blocks
const newBlocks = blocks.filter((b) => b.height > newestHeight); const newBlocks = blocks.filter((b) => b.height > newestHeight);
if (newBlocks.length) { if (newBlocks.length) {
chain.prepend(...newBlocks.map((b) => createBlockCube(b))); // sentinel.after(createBlockCube(block));
sentinel.after(...newBlocks.map((b) => createBlockCube(b)));
newestHeight = newBlocks[0].height; newestHeight = newBlocks[0].height;
} }
} }
@@ -92,9 +96,9 @@ async function loadOlder() {
if (loading || oldestHeight <= 0) return; if (loading || oldestHeight <= 0) return;
loading = true; loading = true;
try { try {
const blocks = await brk.getBlocksFromHeight(oldestHeight - 1); const blocks = await brk.getBlocksV1FromHeight(oldestHeight - 1);
for (const block of blocks) { for (const block of blocks) {
sentinel.before(createBlockCube(block)); sentinel.after(createBlockCube(block));
} }
if (blocks.length) { if (blocks.length) {
oldestHeight = blocks[blocks.length - 1].height; oldestHeight = blocks[blocks.length - 1].height;
@@ -105,7 +109,7 @@ async function loadOlder() {
loading = false; loading = false;
} }
/** @param {BlockInfo} block */ /** @param {BlockInfoV1} block */
function createBlockCube(block) { function createBlockCube(block) {
const { cubeElement, leftFaceElement, rightFaceElement, topFaceElement } = const { cubeElement, leftFaceElement, rightFaceElement, topFaceElement } =
createCube(); createCube();
@@ -128,20 +132,23 @@ function createBlockCube(block) {
const feesElement = window.document.createElement("div"); const feesElement = window.document.createElement("div");
feesElement.classList.add("fees"); feesElement.classList.add("fees");
leftFaceElement.append(feesElement); leftFaceElement.append(feesElement);
const extras = block.extras;
const medianFee = extras ? extras.medianFee : 0;
const feeRange = extras ? extras.feeRange : [0, 0, 0, 0, 0, 0, 0];
const averageFeeElement = window.document.createElement("p"); const averageFeeElement = window.document.createElement("p");
feesElement.append(averageFeeElement); feesElement.append(averageFeeElement);
averageFeeElement.innerHTML = `~1.41`; averageFeeElement.innerHTML = `~${Number(medianFee).toFixed(2)}`;
const feeRangeElement = window.document.createElement("p"); const feeRangeElement = window.document.createElement("p");
feesElement.append(feeRangeElement); feesElement.append(feeRangeElement);
const minFeeElement = window.document.createElement("span"); const minFeeElement = window.document.createElement("span");
minFeeElement.innerHTML = `0.11`; minFeeElement.innerHTML = `${Number(feeRange[0]).toFixed(2)}`;
feeRangeElement.append(minFeeElement); feeRangeElement.append(minFeeElement);
const dashElement = window.document.createElement("span"); const dashElement = window.document.createElement("span");
dashElement.style.opacity = "0.5"; dashElement.style.opacity = "0.5";
dashElement.innerHTML = `-`; dashElement.innerHTML = `-`;
feeRangeElement.append(dashElement); feeRangeElement.append(dashElement);
const maxFeeElement = window.document.createElement("span"); const maxFeeElement = window.document.createElement("span");
maxFeeElement.innerHTML = `12.1`; maxFeeElement.innerHTML = `${Number(feeRange[6]).toFixed(1)}`;
feeRangeElement.append(maxFeeElement); feeRangeElement.append(maxFeeElement);
const feeUnitElement = window.document.createElement("p"); const feeUnitElement = window.document.createElement("p");
feesElement.append(feeUnitElement); feesElement.append(feeUnitElement);
@@ -149,7 +156,7 @@ function createBlockCube(block) {
feeUnitElement.innerHTML = `sat/vB`; feeUnitElement.innerHTML = `sat/vB`;
const spanMiner = window.document.createElement("span"); const spanMiner = window.document.createElement("span");
spanMiner.innerHTML = "TODO"; spanMiner.innerHTML = extras ? extras.pool.name : "Unknown";
topFaceElement.append(spanMiner); topFaceElement.append(spanMiner);
return cubeElement; return cubeElement;

View File

@@ -1,9 +1,11 @@
#explorer { #explorer {
width: 100%;
--cube: 4.5rem; --cube: 4.5rem;
#chain { #chain {
display: flex; display: flex;
flex-direction: column; flex-direction: column-reverse;
gap: calc(var(--cube) * 0.66); gap: calc(var(--cube) * 0.66);
padding: 2rem; padding: 2rem;