global: snapshot

This commit is contained in:
nym21
2026-03-31 22:53:25 +02:00
parent d038141a8a
commit ae26db6df2
83 changed files with 3398 additions and 710 deletions

View File

@@ -82,18 +82,19 @@ pub fn generate_api_methods(output: &mut String, endpoints: &[Endpoint]) {
} else {
writeln!(output, " const params = new URLSearchParams();").unwrap();
for param in &endpoint.query_params {
let ident = sanitize_ident(&param.name);
if param.required {
writeln!(
output,
" params.set('{}', String({}));",
param.name, param.name
param.name, ident
)
.unwrap();
} else {
writeln!(
output,
" if ({} !== undefined) params.set('{}', String({}));",
param.name, param.name, param.name
ident, param.name, ident
)
.unwrap();
}
@@ -127,14 +128,19 @@ fn endpoint_to_method_name(endpoint: &Endpoint) -> String {
fn build_method_params(endpoint: &Endpoint) -> String {
let mut params = Vec::new();
for param in &endpoint.path_params {
params.push(param.name.clone());
params.push(sanitize_ident(&param.name));
}
for param in &endpoint.query_params {
params.push(param.name.clone());
params.push(sanitize_ident(&param.name));
}
params.join(", ")
}
/// Strip characters invalid in JS identifiers (e.g. `[]` from `txId[]`).
fn sanitize_ident(name: &str) -> String {
name.replace(['[', ']'], "")
}
fn build_path_template(path: &str, path_params: &[Parameter]) -> String {
let mut result = path.to_string();
for param in path_params {

View File

@@ -143,18 +143,19 @@ pub fn generate_api_methods(output: &mut String, endpoints: &[Endpoint]) {
} else {
writeln!(output, " let mut query = Vec::new();").unwrap();
for param in &endpoint.query_params {
let ident = sanitize_ident(&param.name);
if param.required {
writeln!(
output,
" query.push(format!(\"{}={{}}\", {}));",
param.name, param.name
param.name, ident
)
.unwrap();
} else {
writeln!(
output,
" if let Some(v) = {} {{ query.push(format!(\"{}={{}}\", v)); }}",
param.name, param.name
ident, param.name
)
.unwrap();
}
@@ -198,26 +199,35 @@ fn build_method_params(endpoint: &Endpoint) -> String {
let mut params = Vec::new();
for param in &endpoint.path_params {
let rust_type = param_type_to_rust(&param.param_type);
params.push(format!(", {}: {}", param.name, rust_type));
params.push(format!(", {}: {}", sanitize_ident(&param.name), rust_type));
}
for param in &endpoint.query_params {
let rust_type = param_type_to_rust(&param.param_type);
let name = sanitize_ident(&param.name);
if param.required {
params.push(format!(", {}: {}", param.name, rust_type));
params.push(format!(", {}: {}", name, rust_type));
} else {
params.push(format!(", {}: Option<{}>", param.name, rust_type));
params.push(format!(", {}: Option<{}>", name, rust_type));
}
}
params.join("")
}
/// Strip characters invalid in Rust identifiers (e.g. `[]` from `txId[]`).
fn sanitize_ident(name: &str) -> String {
name.replace(['[', ']'], "")
}
/// Convert parameter type to Rust type for function signatures.
fn param_type_to_rust(param_type: &str) -> String {
if let Some(inner) = param_type.strip_suffix("[]") {
return format!("&[{}]", param_type_to_rust(inner));
}
match param_type {
"string" | "*" => "&str".to_string(),
"integer" | "number" => "i64".to_string(),
"boolean" => "bool".to_string(),
other => other.to_string(), // Domain types like Index, SeriesName, Format
other => other.to_string(),
}
}

View File

@@ -74,6 +74,9 @@ pub fn escape_python_keyword(name: &str) -> String {
"try", "while", "with", "yield",
];
// Strip characters invalid in identifiers (e.g. `[]` from `txId[]`)
let name = name.replace(['[', ']'], "");
// Prefix with underscore if starts with digit
let name = if name.starts_with(|c: char| c.is_ascii_digit()) {
format!("_{}", name)

View File

@@ -13,7 +13,6 @@ brk_alloc = { workspace = true }
brk_computer = { workspace = true }
brk_error = { workspace = true, features = ["tokio", "vecdb"] }
brk_indexer = { workspace = true }
brk_iterator = { workspace = true }
brk_logger = { workspace = true }
brk_mempool = { workspace = true }
brk_query = { workspace = true }

View File

@@ -10,7 +10,6 @@ use brk_alloc::Mimalloc;
use brk_computer::Computer;
use brk_error::Result;
use brk_indexer::Indexer;
use brk_iterator::Blocks;
use brk_mempool::Mempool;
use brk_query::AsyncQuery;
use brk_reader::Reader;
@@ -37,8 +36,6 @@ pub fn main() -> anyhow::Result<()> {
let reader = Reader::new(config.blocksdir(), &client);
let blocks = Blocks::new(&client, &reader);
let mut indexer = Indexer::forced_import(&config.brkdir())?;
#[cfg(not(debug_assertions))]
@@ -52,7 +49,7 @@ pub fn main() -> anyhow::Result<()> {
info!("Indexing {blocks_behind} blocks before starting server...");
info!("---");
sleep(Duration::from_secs(10));
indexer.index(&blocks, &client, &exit)?;
indexer.index(&reader, &client, &exit)?;
drop(indexer);
Mimalloc::collect();
indexer = Indexer::forced_import(&config.brkdir())?;
@@ -102,14 +99,14 @@ pub fn main() -> anyhow::Result<()> {
let total_start = Instant::now();
let starting_indexes = if cfg!(debug_assertions) {
indexer.checked_index(&blocks, &client, &exit)?
indexer.checked_index(&reader, &client, &exit)?
} else {
indexer.index(&blocks, &client, &exit)?
indexer.index(&reader, &client, &exit)?
};
Mimalloc::collect();
computer.compute(&indexer, starting_indexes, &reader, &exit)?;
computer.compute(&indexer, starting_indexes, &exit)?;
info!("Total time: {:?}", total_start.elapsed());
info!("Waiting for new blocks...");

View File

@@ -13,6 +13,7 @@ use serde::de::DeserializeOwned;
pub use brk_cohort::*;
pub use brk_types::*;
/// Error type for BRK client operations.
#[derive(Debug)]
pub struct BrkError {
@@ -3124,7 +3125,6 @@ pub struct SeriesTree {
pub addrs: SeriesTree_Addrs,
pub scripts: SeriesTree_Scripts,
pub mining: SeriesTree_Mining,
pub positions: SeriesTree_Positions,
pub cointime: SeriesTree_Cointime,
pub constants: SeriesTree_Constants,
pub indexes: SeriesTree_Indexes,
@@ -3147,7 +3147,6 @@ impl SeriesTree {
addrs: SeriesTree_Addrs::new(client.clone(), format!("{base_path}_addrs")),
scripts: SeriesTree_Scripts::new(client.clone(), format!("{base_path}_scripts")),
mining: SeriesTree_Mining::new(client.clone(), format!("{base_path}_mining")),
positions: SeriesTree_Positions::new(client.clone(), format!("{base_path}_positions")),
cointime: SeriesTree_Cointime::new(client.clone(), format!("{base_path}_cointime")),
constants: SeriesTree_Constants::new(client.clone(), format!("{base_path}_constants")),
indexes: SeriesTree_Indexes::new(client.clone(), format!("{base_path}_indexes")),
@@ -3165,10 +3164,14 @@ impl SeriesTree {
/// Series tree node.
pub struct SeriesTree_Blocks {
pub blockhash: SeriesPattern18<BlockHash>,
pub coinbase_tag: SeriesPattern18<CoinbaseTag>,
pub difficulty: SeriesTree_Blocks_Difficulty,
pub time: SeriesTree_Blocks_Time,
pub size: SeriesTree_Blocks_Size,
pub weight: AverageBaseCumulativeMaxMedianMinPct10Pct25Pct75Pct90SumPattern<Weight>,
pub segwit_txs: SeriesPattern18<StoredU32>,
pub segwit_size: SeriesPattern18<StoredU64>,
pub segwit_weight: SeriesPattern18<Weight>,
pub count: SeriesTree_Blocks_Count,
pub lookback: SeriesTree_Blocks_Lookback,
pub interval: SeriesTree_Blocks_Interval,
@@ -3181,10 +3184,14 @@ impl SeriesTree_Blocks {
pub fn new(client: Arc<BrkClientBase>, base_path: String) -> Self {
Self {
blockhash: SeriesPattern18::new(client.clone(), "blockhash".to_string()),
coinbase_tag: SeriesPattern18::new(client.clone(), "coinbase_tag".to_string()),
difficulty: SeriesTree_Blocks_Difficulty::new(client.clone(), format!("{base_path}_difficulty")),
time: SeriesTree_Blocks_Time::new(client.clone(), format!("{base_path}_time")),
size: SeriesTree_Blocks_Size::new(client.clone(), format!("{base_path}_size")),
weight: AverageBaseCumulativeMaxMedianMinPct10Pct25Pct75Pct90SumPattern::new(client.clone(), "block_weight".to_string()),
segwit_txs: SeriesPattern18::new(client.clone(), "segwit_txs".to_string()),
segwit_size: SeriesPattern18::new(client.clone(), "segwit_size".to_string()),
segwit_weight: SeriesPattern18::new(client.clone(), "segwit_weight".to_string()),
count: SeriesTree_Blocks_Count::new(client.clone(), format!("{base_path}_count")),
lookback: SeriesTree_Blocks_Lookback::new(client.clone(), format!("{base_path}_lookback")),
interval: SeriesTree_Blocks_Interval::new(client.clone(), format!("{base_path}_interval")),
@@ -3538,6 +3545,7 @@ pub struct SeriesTree_Transactions_Fees {
pub output_value: SeriesPattern19<Sats>,
pub fee: _6bBlockTxPattern<Sats>,
pub fee_rate: _6bBlockTxPattern<FeeRate>,
pub effective_fee_rate: _6bBlockTxPattern<FeeRate>,
}
impl SeriesTree_Transactions_Fees {
@@ -3547,6 +3555,7 @@ impl SeriesTree_Transactions_Fees {
output_value: SeriesPattern19::new(client.clone(), "output_value".to_string()),
fee: _6bBlockTxPattern::new(client.clone(), "fee".to_string()),
fee_rate: _6bBlockTxPattern::new(client.clone(), "fee_rate".to_string()),
effective_fee_rate: _6bBlockTxPattern::new(client.clone(), "effective_fee_rate".to_string()),
}
}
}
@@ -4179,6 +4188,7 @@ pub struct SeriesTree_Mining_Rewards {
pub coinbase: AverageBlockCumulativeSumPattern3,
pub subsidy: SeriesTree_Mining_Rewards_Subsidy,
pub fees: SeriesTree_Mining_Rewards_Fees,
pub output_volume: SeriesPattern18<Sats>,
pub unclaimed: BlockCumulativePattern,
}
@@ -4188,6 +4198,7 @@ impl SeriesTree_Mining_Rewards {
coinbase: AverageBlockCumulativeSumPattern3::new(client.clone(), "coinbase".to_string()),
subsidy: SeriesTree_Mining_Rewards_Subsidy::new(client.clone(), format!("{base_path}_subsidy")),
fees: SeriesTree_Mining_Rewards_Fees::new(client.clone(), format!("{base_path}_fees")),
output_volume: SeriesPattern18::new(client.clone(), "output_volume".to_string()),
unclaimed: BlockCumulativePattern::new(client.clone(), "unclaimed_rewards".to_string()),
}
}
@@ -4325,17 +4336,6 @@ impl SeriesTree_Mining_Hashrate_Rate_Sma {
}
}
/// Series tree node.
pub struct SeriesTree_Positions {
}
impl SeriesTree_Positions {
pub fn new(client: Arc<BrkClientBase>, base_path: String) -> Self {
Self {
}
}
}
/// Series tree node.
pub struct SeriesTree_Cointime {
pub activity: SeriesTree_Cointime_Activity,
@@ -8320,14 +8320,14 @@ impl BrkClient {
self.base.get_json(&format!("/api/address/{address}/utxo"))
}
/// Block by height
/// Block hash by height
///
/// Retrieve block information by block height. Returns block metadata including hash, timestamp, difficulty, size, weight, and transaction count.
/// Retrieve the block hash at a given height. Returns the hash as plain text.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-height)*
///
/// Endpoint: `GET /api/block-height/{height}`
pub fn get_block_by_height(&self, height: Height) -> Result<BlockInfo> {
pub fn get_block_by_height(&self, height: Height) -> Result<BlockHash> {
self.base.get_json(&format!("/api/block-height/{height}"))
}
@@ -8342,6 +8342,17 @@ impl BrkClient {
self.base.get_json(&format!("/api/block/{hash}"))
}
/// Block header
///
/// Returns the hex-encoded block header.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-header)*
///
/// Endpoint: `GET /api/block/{hash}/header`
pub fn get_block_header(&self, hash: BlockHash) -> Result<Hex> {
self.base.get_json(&format!("/api/block/{hash}/header"))
}
/// Raw block
///
/// Returns the raw block data in binary format.
@@ -8408,6 +8419,28 @@ impl BrkClient {
self.base.get_json(&format!("/api/blocks"))
}
/// Block tip hash
///
/// Returns the hash of the last block.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-tip-hash)*
///
/// Endpoint: `GET /api/blocks/tip/hash`
pub fn get_block_tip_hash(&self) -> Result<BlockHash> {
self.base.get_json(&format!("/api/blocks/tip/hash"))
}
/// Block tip height
///
/// Returns the height of the last block.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-tip-height)*
///
/// Endpoint: `GET /api/blocks/tip/height`
pub fn get_block_tip_height(&self) -> Result<Height> {
self.base.get_json(&format!("/api/blocks/tip/height"))
}
/// Blocks from height
///
/// Retrieve up to 10 blocks going backwards from the given height. For example, height=100 returns blocks 100, 99, 98, ..., 91. Height=0 returns only block 0.
@@ -8425,9 +8458,9 @@ impl BrkClient {
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-mempool)*
///
/// Endpoint: `GET /api/mempool/info`
/// Endpoint: `GET /api/mempool`
pub fn get_mempool(&self) -> Result<MempoolInfo> {
self.base.get_json(&format!("/api/mempool/info"))
self.base.get_json(&format!("/api/mempool"))
}
/// Live BTC/USD price
@@ -8439,6 +8472,17 @@ impl BrkClient {
self.base.get_json(&format!("/api/mempool/price"))
}
/// Recent mempool transactions
///
/// Get the last 10 transactions to enter the mempool.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-mempool-recent)*
///
/// Endpoint: `GET /api/mempool/recent`
pub fn get_mempool_recent(&self) -> Result<Vec<MempoolRecentTx>> {
self.base.get_json(&format!("/api/mempool/recent"))
}
/// Mempool transaction IDs
///
/// Get all transaction IDs currently in the mempool.
@@ -8679,6 +8723,17 @@ impl BrkClient {
self.base.get_json(&format!("/api/tx/{txid}/hex"))
}
/// Transaction merkle proof
///
/// Get the merkle inclusion proof for a transaction.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-transaction-merkle-proof)*
///
/// Endpoint: `GET /api/tx/{txid}/merkle-proof`
pub fn get_tx_merkle_proof(&self, txid: Txid) -> Result<MerkleProof> {
self.base.get_json(&format!("/api/tx/{txid}/merkle-proof"))
}
/// Output spend status
///
/// Get the spending status of a transaction output. Returns whether the output has been spent and, if so, the spending transaction details.
@@ -8701,6 +8756,17 @@ impl BrkClient {
self.base.get_json(&format!("/api/tx/{txid}/outspends"))
}
/// Transaction raw
///
/// Returns a transaction as binary data.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-transaction-raw)*
///
/// Endpoint: `GET /api/tx/{txid}/raw`
pub fn get_tx_raw(&self, txid: Txid) -> Result<Vec<f64>> {
self.base.get_json(&format!("/api/tx/{txid}/raw"))
}
/// Transaction status
///
/// Retrieve the confirmation status of a transaction. Returns whether the transaction is confirmed and, if so, the block height, hash, and timestamp.
@@ -8712,6 +8778,50 @@ impl BrkClient {
self.base.get_json(&format!("/api/tx/{txid}/status"))
}
/// Block (v1)
///
/// Returns block details with extras by hash.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-v1)*
///
/// Endpoint: `GET /api/v1/block/{hash}`
pub fn get_block_v1(&self, hash: BlockHash) -> Result<BlockInfoV1> {
self.base.get_json(&format!("/api/v1/block/{hash}"))
}
/// Recent blocks with extras
///
/// Retrieve the last 10 blocks with extended data including pool identification and fee statistics.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-blocks-v1)*
///
/// Endpoint: `GET /api/v1/blocks`
pub fn get_blocks_v1(&self) -> Result<Vec<BlockInfoV1>> {
self.base.get_json(&format!("/api/v1/blocks"))
}
/// Blocks from height with extras
///
/// Retrieve up to 10 blocks with extended data going backwards from the given height.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-blocks-v1)*
///
/// Endpoint: `GET /api/v1/blocks/{height}`
pub fn get_blocks_v1_from_height(&self, height: Height) -> Result<Vec<BlockInfoV1>> {
self.base.get_json(&format!("/api/v1/blocks/{height}"))
}
/// CPFP info
///
/// Returns ancestors and descendants for a CPFP transaction.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-children-pay-for-parent)*
///
/// Endpoint: `GET /api/v1/cpfp/{txid}`
pub fn get_cpfp(&self, txid: Txid) -> Result<CpfpInfo> {
self.base.get_json(&format!("/api/v1/cpfp/{txid}"))
}
/// Difficulty adjustment
///
/// Get current difficulty adjustment information including progress through the current epoch, estimated retarget date, and difficulty change prediction.
@@ -8734,6 +8844,17 @@ impl BrkClient {
self.base.get_json(&format!("/api/v1/fees/mempool-blocks"))
}
/// Precise recommended fees
///
/// Get recommended fee rates with up to 3 decimal places, including sub-sat feerates.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-recommended-fees-precise)*
///
/// Endpoint: `GET /api/v1/fees/precise`
pub fn get_precise_fees(&self) -> Result<RecommendedFees> {
self.base.get_json(&format!("/api/v1/fees/precise"))
}
/// Recommended fees
///
/// Get recommended fee rates for different confirmation targets based on current mempool state.
@@ -8745,6 +8866,21 @@ impl BrkClient {
self.base.get_json(&format!("/api/v1/fees/recommended"))
}
/// Historical price
///
/// Get historical BTC/USD price. Optionally specify a UNIX timestamp to get the price at that time.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-historical-price)*
///
/// Endpoint: `GET /api/v1/historical-price`
pub fn get_historical_price(&self, timestamp: Option<Timestamp>) -> Result<HistoricalPrice> {
let mut query = Vec::new();
if let Some(v) = timestamp { query.push(format!("timestamp={}", v)); }
let query_str = if query.is_empty() { String::new() } else { format!("?{}", query.join("&")) };
let path = format!("/api/v1/historical-price{}", query_str);
self.base.get_json(&path)
}
/// Block fee rates (WIP)
///
/// **Work in progress.** Get block fee rate percentiles (min, 10th, 25th, median, 75th, 90th, max) for a time period. Valid periods: 24h, 3d, 1w, 1m, 3m, 6m, 1y, 2y, 3y
@@ -8833,6 +8969,28 @@ impl BrkClient {
self.base.get_json(&format!("/api/v1/mining/hashrate"))
}
/// All pools hashrate (all time)
///
/// Get hashrate data for all mining pools.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-hashrates)*
///
/// Endpoint: `GET /api/v1/mining/hashrate/pools`
pub fn get_pools_hashrate(&self) -> Result<Vec<PoolHashrateEntry>> {
self.base.get_json(&format!("/api/v1/mining/hashrate/pools"))
}
/// All pools hashrate
///
/// Get hashrate data for all mining pools for a time period. Valid periods: 1m, 3m, 6m, 1y, 2y, 3y
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-hashrates)*
///
/// Endpoint: `GET /api/v1/mining/hashrate/pools/{time_period}`
pub fn get_pools_hashrate_by_period(&self, time_period: TimePeriod) -> Result<Vec<PoolHashrateEntry>> {
self.base.get_json(&format!("/api/v1/mining/hashrate/pools/{time_period}"))
}
/// Network hashrate
///
/// Get network hashrate and difficulty data for a time period. Valid periods: 24h, 3d, 1w, 1m, 3m, 6m, 1y, 2y, 3y
@@ -8855,6 +9013,39 @@ impl BrkClient {
self.base.get_json(&format!("/api/v1/mining/pool/{slug}"))
}
/// Mining pool blocks
///
/// Get the 10 most recent blocks mined by a specific pool.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-blocks)*
///
/// Endpoint: `GET /api/v1/mining/pool/{slug}/blocks`
pub fn get_pool_blocks(&self, slug: PoolSlug) -> Result<Vec<BlockInfoV1>> {
self.base.get_json(&format!("/api/v1/mining/pool/{slug}/blocks"))
}
/// Mining pool blocks from height
///
/// Get 10 blocks mined by a specific pool before (and including) the given height.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-blocks)*
///
/// Endpoint: `GET /api/v1/mining/pool/{slug}/blocks/{height}`
pub fn get_pool_blocks_from(&self, slug: PoolSlug, height: Height) -> Result<Vec<BlockInfoV1>> {
self.base.get_json(&format!("/api/v1/mining/pool/{slug}/blocks/{height}"))
}
/// Mining pool hashrate
///
/// Get hashrate history for a specific mining pool.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-hashrate)*
///
/// Endpoint: `GET /api/v1/mining/pool/{slug}/hashrate`
pub fn get_pool_hashrate(&self, slug: PoolSlug) -> Result<Vec<PoolHashrateEntry>> {
self.base.get_json(&format!("/api/v1/mining/pool/{slug}/hashrate"))
}
/// List all mining pools
///
/// Get list of all known mining pools with their identifiers.
@@ -8888,6 +9079,21 @@ impl BrkClient {
self.base.get_json(&format!("/api/v1/mining/reward-stats/{block_count}"))
}
/// Transaction first-seen times
///
/// Returns timestamps when transactions were first seen in the mempool. Returns 0 for mined or unknown transactions.
///
/// *[Mempool.space docs](https://mempool.space/docs/api/rest#get-transaction-times)*
///
/// Endpoint: `GET /api/v1/transaction-times`
pub fn get_transaction_times(&self, txId: Txid[]) -> Result<Vec<f64>> {
let mut query = Vec::new();
query.push(format!("txId[]={}", txId));
let query_str = if query.is_empty() { String::new() } else { format!("?{}", query.join("&")) };
let path = format!("/api/v1/transaction-times{}", query_str);
self.base.get_json(&path)
}
/// Validate address
///
/// Validate a Bitcoin address and get information about its type and scriptPubKey.

View File

@@ -14,3 +14,6 @@ brk_traversable = { workspace = true }
vecdb = { workspace = true }
rayon = { workspace = true }
serde = { workspace = true }
[package.metadata.cargo-machete]
ignored = ["vecdb"]

View File

@@ -14,11 +14,8 @@ brk_error = { workspace = true, features = ["vecdb"] }
brk_cohort = { workspace = true }
brk_indexer = { workspace = true }
brk_oracle = { workspace = true }
brk_iterator = { workspace = true }
brk_logger = { workspace = true }
brk_reader = { workspace = true }
brk_rpc = { workspace = true, features = ["corepc"] }
brk_store = { workspace = true }
brk_traversable = { workspace = true }
brk_types = { workspace = true }
derive_more = { workspace = true }
@@ -33,6 +30,7 @@ smallvec = { workspace = true }
vecdb = { workspace = true }
[dev-dependencies]
brk_reader = { workspace = true }
brk_alloc = { workspace = true }
brk_bencher = { workspace = true }
color-eyre = { workspace = true }

View File

@@ -8,7 +8,6 @@ use std::{
use brk_alloc::Mimalloc;
use brk_computer::Computer;
use brk_indexer::Indexer;
use brk_iterator::Blocks;
use brk_reader::Reader;
use brk_rpc::{Auth, Client};
use vecdb::Exit;
@@ -31,8 +30,6 @@ pub fn main() -> color_eyre::Result<()> {
let reader = Reader::new(bitcoin_dir.join("blocks"), &client);
let blocks = Blocks::new(&client, &reader);
let mut indexer = Indexer::forced_import(&outputs_dir)?;
let exit = Exit::new();
@@ -42,7 +39,7 @@ pub fn main() -> color_eyre::Result<()> {
let chain_height = client.get_last_height()?;
let indexed_height = indexer.vecs.starting_height();
if u32::from(chain_height).saturating_sub(u32::from(indexed_height)) > 1000 {
indexer.checked_index(&blocks, &client, &exit)?;
indexer.checked_index(&reader, &client, &exit)?;
drop(indexer);
Mimalloc::collect();
indexer = Indexer::forced_import(&outputs_dir)?;
@@ -52,11 +49,11 @@ pub fn main() -> color_eyre::Result<()> {
loop {
let i = Instant::now();
let starting_indexes = indexer.checked_index(&blocks, &client, &exit)?;
let starting_indexes = indexer.checked_index(&reader, &client, &exit)?;
Mimalloc::collect();
computer.compute(&indexer, starting_indexes, &reader, &exit)?;
computer.compute(&indexer, starting_indexes, &exit)?;
dbg!(i.elapsed());
sleep(Duration::from_secs(10));
}

View File

@@ -5,7 +5,6 @@ use brk_bencher::Bencher;
use brk_computer::Computer;
use brk_error::Result;
use brk_indexer::Indexer;
use brk_iterator::Blocks;
use brk_reader::Reader;
use brk_rpc::{Auth, Client};
use tracing::{debug, info};
@@ -28,8 +27,6 @@ pub fn main() -> Result<()> {
let reader = Reader::new(bitcoin_dir.join("blocks"), &client);
let blocks = Blocks::new(&client, &reader);
let mut indexer = Indexer::forced_import(&outputs_dir)?;
let mut computer = Computer::forced_import(&outputs_benches_dir, &indexer)?;
@@ -47,13 +44,13 @@ pub fn main() -> Result<()> {
});
let i = Instant::now();
let starting_indexes = indexer.index(&blocks, &client, &exit)?;
let starting_indexes = indexer.index(&reader, &client, &exit)?;
info!("Done in {:?}", i.elapsed());
Mimalloc::collect();
let i = Instant::now();
computer.compute(&indexer, starting_indexes, &reader, &exit)?;
computer.compute(&indexer, starting_indexes, &exit)?;
info!("Done in {:?}", i.elapsed());
// We want to benchmark the drop too

View File

@@ -9,7 +9,6 @@ use brk_alloc::Mimalloc;
use brk_bencher::Bencher;
use brk_computer::Computer;
use brk_indexer::Indexer;
use brk_iterator::Blocks;
use brk_reader::Reader;
use brk_rpc::{Auth, Client};
use tracing::{debug, info};
@@ -45,15 +44,13 @@ pub fn main() -> color_eyre::Result<()> {
let reader = Reader::new(bitcoin_dir.join("blocks"), &client);
let blocks = Blocks::new(&client, &reader);
let mut indexer = Indexer::forced_import(&outputs_dir)?;
// Pre-run indexer if too far behind, then drop and reimport to reduce memory
let chain_height = client.get_last_height()?;
let indexed_height = indexer.vecs.starting_height();
if chain_height.saturating_sub(*indexed_height) > 1000 {
indexer.index(&blocks, &client, &exit)?;
indexer.index(&reader, &client, &exit)?;
drop(indexer);
Mimalloc::collect();
indexer = Indexer::forced_import(&outputs_dir)?;
@@ -63,13 +60,13 @@ pub fn main() -> color_eyre::Result<()> {
loop {
let i = Instant::now();
let starting_indexes = indexer.index(&blocks, &client, &exit)?;
let starting_indexes = indexer.index(&reader, &client, &exit)?;
info!("Done in {:?}", i.elapsed());
Mimalloc::collect();
let i = Instant::now();
computer.compute(&indexer, starting_indexes, &reader, &exit)?;
computer.compute(&indexer, starting_indexes, &exit)?;
info!("Done in {:?}", i.elapsed());
sleep(Duration::from_secs(60));

View File

@@ -7,7 +7,7 @@ use brk_types::{
use rayon::prelude::*;
use rustc_hash::FxHashSet;
use tracing::{debug, info};
use vecdb::{AnyStoredVec, AnyVec, Exit, ReadableVec, VecIndex, WritableVec};
use vecdb::{AnyStoredVec, AnyVec, Exit, ReadableVec, VecIndex, WritableVec, unlikely};
use crate::{
distribution::{
@@ -243,7 +243,11 @@ pub(crate) fn process_blocks(
for height in starting_height.to_usize()..=last_height.to_usize() {
let height = Height::from(height);
info!("Processing chain at {}...", height);
if unlikely(height.is_multiple_of(100)) {
info!("Processing chain at {}...", height);
} else {
debug!("Processing chain at {}...", height);
}
// Get block metadata from pre-collected vecs
let offset = height.to_usize() - start_usize;

View File

@@ -4,7 +4,6 @@ use std::{fs, path::Path, thread, time::Instant};
use brk_error::Result;
use brk_indexer::Indexer;
use brk_reader::Reader;
use brk_traversable::Traversable;
use brk_types::Version;
use tracing::info;
@@ -23,7 +22,6 @@ mod market;
mod mining;
mod outputs;
mod pools;
mod positions;
pub mod prices;
mod scripts;
mod supply;
@@ -35,7 +33,6 @@ pub struct Computer<M: StorageMode = Rw> {
pub mining: Box<mining::Vecs<M>>,
pub transactions: Box<transactions::Vecs<M>>,
pub scripts: Box<scripts::Vecs<M>>,
pub positions: Box<positions::Vecs<M>>,
pub cointime: Box<cointime::Vecs<M>>,
pub constants: Box<constants::Vecs>,
pub indexes: Box<indexes::Vecs<M>>,
@@ -63,24 +60,12 @@ impl Computer {
const STACK_SIZE: usize = 8 * 1024 * 1024;
let big_thread = || thread::Builder::new().stack_size(STACK_SIZE);
let (indexes, positions) = timed("Imported indexes/positions", || {
thread::scope(|s| -> Result<_> {
let positions_handle = big_thread().spawn_scoped(s, || -> Result<_> {
Ok(Box::new(positions::Vecs::forced_import(
&computed_path,
VERSION,
)?))
})?;
let indexes = Box::new(indexes::Vecs::forced_import(
&computed_path,
VERSION,
indexer,
)?);
let positions = positions_handle.join().unwrap()?;
Ok((indexes, positions))
})
let indexes = timed("Imported indexes", || -> Result<_> {
Ok(Box::new(indexes::Vecs::forced_import(
&computed_path,
VERSION,
indexer,
)?))
})?;
let (constants, prices) = timed("Imported prices/constants", || -> Result<_> {
@@ -257,7 +242,6 @@ impl Computer {
market,
distribution,
supply,
positions,
pools,
cointime,
indexes,
@@ -278,7 +262,6 @@ impl Computer {
mining::DB_NAME,
transactions::DB_NAME,
scripts::DB_NAME,
positions::DB_NAME,
cointime::DB_NAME,
indicators::DB_NAME,
indexes::DB_NAME,
@@ -319,7 +302,6 @@ impl Computer {
&mut self,
indexer: &Indexer,
starting_indexes: brk_indexer::Indexes,
reader: &Reader,
exit: &Exit,
) -> Result<()> {
internal::cache_clear_all();
@@ -387,13 +369,6 @@ impl Computer {
)
})?;
let positions = scope.spawn(|| {
timed("Computed positions", || {
self.positions
.compute(indexer, &starting_indexes, reader, exit)
})
});
timed("Computed transactions", || {
self.transactions.compute(
indexer,
@@ -419,7 +394,6 @@ impl Computer {
)
})?;
positions.join().unwrap()?;
market.join().unwrap()?;
Ok(())
})?;
@@ -561,7 +535,6 @@ impl_iter_named!(
mining,
transactions,
scripts,
positions,
cointime,
constants,
indicators,

View File

@@ -25,7 +25,7 @@ impl Vecs {
indexer,
indexes,
&blocks.lookback,
&transactions.fees,
transactions,
prices,
starting_indexes,
exit,

View File

@@ -17,7 +17,7 @@ impl Vecs {
indexer: &Indexer,
indexes: &indexes::Vecs,
lookback: &blocks::LookbackVecs,
transactions_fees: &transactions::FeesVecs,
transactions: &transactions::Vecs,
prices: &prices::Vecs,
starting_indexes: &Indexes,
exit: &Exit,
@@ -67,7 +67,7 @@ impl Vecs {
starting_indexes.height,
&indexer.vecs.transactions.first_tx_index,
&indexes.height.tx_index_count,
&transactions_fees.fee.tx_index,
&transactions.fees.fee.tx_index,
exit,
)?;
Ok(())
@@ -95,6 +95,13 @@ impl Vecs {
self.subsidy
.compute_rest(starting_indexes.height, prices, exit)?;
self.output_volume.compute_subtract(
starting_indexes.height,
&transactions.volume.transfer_volume.block.sats,
&self.fees.block.sats,
exit,
)?;
self.unclaimed.block.sats.compute_transform(
starting_indexes.height,
&self.subsidy.block.sats,

View File

@@ -1,6 +1,6 @@
use brk_error::Result;
use brk_types::Version;
use vecdb::Database;
use vecdb::{Database, EagerVec, ImportableVec};
use super::Vecs;
use crate::{
@@ -44,6 +44,7 @@ impl Vecs {
cached_starts,
)?,
fees: AmountPerBlockFull::forced_import(db, "fees", version, indexes, cached_starts)?,
output_volume: EagerVec::forced_import(db, "output_volume", version)?,
unclaimed: AmountPerBlockCumulative::forced_import(
db,
"unclaimed_rewards",

View File

@@ -1,6 +1,6 @@
use brk_traversable::Traversable;
use brk_types::{BasisPoints16, BasisPoints32};
use vecdb::{Rw, StorageMode};
use brk_types::{BasisPoints16, BasisPoints32, Height, Sats};
use vecdb::{EagerVec, PcoVec, Rw, StorageMode};
use crate::internal::{
AmountPerBlockCumulative, AmountPerBlockCumulativeRolling, AmountPerBlockFull,
@@ -12,6 +12,7 @@ pub struct Vecs<M: StorageMode = Rw> {
pub coinbase: AmountPerBlockCumulativeRolling<M>,
pub subsidy: AmountPerBlockCumulativeRolling<M>,
pub fees: AmountPerBlockFull<M>,
pub output_volume: M::Stored<EagerVec<PcoVec<Height, Sats>>>,
pub unclaimed: AmountPerBlockCumulative<M>,
#[traversable(wrap = "fees", rename = "dominance")]
pub fee_dominance: PercentPerBlock<BasisPoints16, M>,

View File

@@ -2,7 +2,6 @@ use std::{collections::BTreeMap, path::Path};
use brk_error::Result;
use brk_indexer::Indexer;
use brk_store::AnyStore;
use brk_traversable::Traversable;
use brk_types::{Addr, AddrBytes, Height, Indexes, OutputType, PoolSlug, Pools, TxOutIndex, pools};
use rayon::prelude::*;
@@ -114,8 +113,18 @@ impl Vecs {
starting_indexes: &Indexes,
exit: &Exit,
) -> Result<()> {
let dep_version = indexer.vecs.blocks.coinbase_tag.version();
let pool_vec_version = self.pool.header().vec_version();
let pool_computed = self.pool.header().computed_version();
let expected = pool_vec_version + dep_version;
if expected != pool_computed {
tracing::warn!(
"Pool version mismatch: vec_version={pool_vec_version:?} + dep={dep_version:?} = {expected:?}, stored computed={pool_computed:?}, len={}",
self.pool.len()
);
}
self.pool
.validate_computed_version_or_reset(indexer.stores.height_to_coinbase_tag.version())?;
.validate_computed_version_or_reset(dep_version)?;
let first_txout_index = indexer.vecs.transactions.first_txout_index.reader();
let output_type = indexer.vecs.outputs.output_type.reader();
@@ -142,12 +151,12 @@ impl Vecs {
self.pool.truncate_if_needed_at(min)?;
indexer
.stores
.height_to_coinbase_tag
.iter()
.skip(min)
.try_for_each(|(_, coinbase_tag)| -> Result<()> {
let len = indexer.vecs.blocks.coinbase_tag.len();
indexer.vecs.blocks.coinbase_tag.try_for_each_range_at(
min,
len,
|coinbase_tag| -> Result<()> {
let tx_index = first_tx_index_cursor.next().unwrap();
let out_start = first_txout_index.get(tx_index.to_usize());
@@ -174,12 +183,13 @@ impl Vecs {
.map(|bytes| Addr::try_from(&bytes).unwrap())
.and_then(|addr| self.pools.find_from_addr(&addr))
})
.or_else(|| self.pools.find_from_coinbase_tag(&coinbase_tag))
.or_else(|| self.pools.find_from_coinbase_tag(&coinbase_tag.as_str()))
.unwrap_or(unknown);
self.pool.push(pool.slug);
Ok(())
})?;
},
)?;
let _lock = exit.lock();
self.pool.write()?;

View File

@@ -1,147 +0,0 @@
use std::{fs, path::Path};
use brk_error::Result;
use brk_indexer::Indexer;
use brk_reader::{Reader, XOR_LEN, XORBytes};
use brk_traversable::Traversable;
use brk_types::{BlkPosition, Height, Indexes, TxIndex, Version};
use tracing::info;
use vecdb::{
AnyStoredVec, AnyVec, Database, Exit, ImportableVec, PcoVec, ReadableVec, Rw, StorageMode,
WritableVec,
};
use crate::internal::db_utils::{finalize_db, open_db};
pub const DB_NAME: &str = "positions";
#[derive(Traversable)]
#[traversable(hidden)]
pub struct Vecs<M: StorageMode = Rw> {
db: Database,
pub block: M::Stored<PcoVec<Height, BlkPosition>>,
pub tx: M::Stored<PcoVec<TxIndex, BlkPosition>>,
}
impl Vecs {
pub(crate) fn forced_import(parent_path: &Path, parent_version: Version) -> Result<Self> {
let db = open_db(parent_path, DB_NAME, 1_000_000)?;
let version = parent_version;
let this = Self {
block: PcoVec::forced_import(&db, "position", version + Version::TWO)?,
tx: PcoVec::forced_import(&db, "position", version + Version::TWO)?,
db,
};
finalize_db(&this.db, &this)?;
Ok(this)
}
pub(crate) fn compute(
&mut self,
indexer: &Indexer,
starting_indexes: &Indexes,
reader: &Reader,
exit: &Exit,
) -> Result<()> {
self.db.sync_bg_tasks()?;
self.compute_(indexer, starting_indexes, reader, exit)?;
let exit = exit.clone();
self.db.run_bg(move |db| {
let _lock = exit.lock();
db.compact_deferred_default()
});
Ok(())
}
fn check_xor_bytes(&mut self, reader: &Reader) -> Result<()> {
let xor_path = self.db.path().join("xor.dat");
let current = reader.xor_bytes();
let cached = fs::read(&xor_path)
.ok()
.and_then(|b| <[u8; XOR_LEN]>::try_from(b).ok())
.map(XORBytes::from);
match cached {
Some(c) if c == current => return Ok(()),
Some(_) => {
info!("XOR bytes changed, resetting positions...");
self.block.reset()?;
self.tx.reset()?;
}
None => {}
}
fs::write(&xor_path, *current)?;
Ok(())
}
fn compute_(
&mut self,
indexer: &Indexer,
starting_indexes: &Indexes,
parser: &Reader,
exit: &Exit,
) -> Result<()> {
self.check_xor_bytes(parser)?;
// Validate computed versions against dependencies
let dep_version = indexer.vecs.transactions.first_tx_index.version()
+ indexer.vecs.transactions.height.version();
self.block.validate_computed_version_or_reset(dep_version)?;
self.tx.validate_computed_version_or_reset(dep_version)?;
let min_tx_index = TxIndex::from(self.tx.len()).min(starting_indexes.tx_index);
let Some(min_height) = indexer
.vecs
.transactions
.height
.collect_one(min_tx_index)
.map(|h: Height| h.min(starting_indexes.height))
else {
return Ok(());
};
let first_tx_at_min_height = indexer
.vecs
.transactions
.first_tx_index
.collect_one(min_height)
.unwrap();
self.block.truncate_if_needed(min_height)?;
self.tx.truncate_if_needed(first_tx_at_min_height)?;
parser
.read(
Some(min_height),
Some((indexer.vecs.transactions.first_tx_index.len() - 1).into()),
)
.iter()
.try_for_each(|block| -> Result<()> {
self.block.push(block.metadata().position());
block.tx_metadata().iter().for_each(|metadata| {
self.tx.push(metadata.position());
});
if *block.height() % 1_000 == 0 {
let _lock = exit.lock();
self.block.write()?;
self.tx.write()?;
}
Ok(())
})?;
let _lock = exit.lock();
self.block.write()?;
self.tx.write()?;
Ok(())
}
}

View File

@@ -1,6 +1,6 @@
use brk_error::Result;
use brk_indexer::Indexer;
use brk_types::{FeeRate, Indexes, Sats};
use brk_types::{FeeRate, Indexes, OutPoint, Sats, TxInIndex, VSize};
use vecdb::{AnyStoredVec, AnyVec, Exit, ReadableVec, VecIndex, WritableVec, unlikely};
use super::super::size;
@@ -33,26 +33,47 @@ impl Vecs {
exit,
)?;
self.compute_fee_and_fee_rate(size_vecs, starting_indexes, exit)?;
self.compute_fees(indexer, indexes, size_vecs, starting_indexes, exit)?;
let (r3, r4) = rayon::join(
let (r1, (r2, r3)) = rayon::join(
|| {
self.fee
.derive_from_with_skip(indexer, indexes, starting_indexes, exit, 1)
},
|| {
self.fee_rate
.derive_from_with_skip(indexer, indexes, starting_indexes, exit, 1)
rayon::join(
|| {
self.fee_rate.derive_from_with_skip(
indexer,
indexes,
starting_indexes,
exit,
1,
)
},
|| {
self.effective_fee_rate.derive_from_with_skip(
indexer,
indexes,
starting_indexes,
exit,
1,
)
},
)
},
);
r1?;
r2?;
r3?;
r4?;
Ok(())
}
fn compute_fee_and_fee_rate(
fn compute_fees(
&mut self,
indexer: &Indexer,
indexes: &indexes::Vecs,
size_vecs: &size::Vecs,
starting_indexes: &Indexes,
exit: &Exit,
@@ -67,6 +88,9 @@ impl Vecs {
self.fee_rate
.tx_index
.validate_computed_version_or_reset(dep_version)?;
self.effective_fee_rate
.tx_index
.validate_computed_version_or_reset(dep_version)?;
let target = self
.input_value
@@ -78,6 +102,7 @@ impl Vecs {
.tx_index
.len()
.min(self.fee_rate.tx_index.len())
.min(self.effective_fee_rate.tx_index.len())
.min(starting_indexes.tx_index.to_usize());
if min >= target {
@@ -90,39 +115,171 @@ impl Vecs {
self.fee_rate
.tx_index
.truncate_if_needed(starting_indexes.tx_index)?;
self.effective_fee_rate
.tx_index
.truncate_if_needed(starting_indexes.tx_index)?;
loop {
let skip = self.fee.tx_index.len();
let end = self.fee.tx_index.batch_end(target);
if skip >= end {
let start_tx = self.fee.tx_index.len();
let max_height = indexer.vecs.transactions.first_tx_index.len();
let start_height = if start_tx == 0 {
0
} else {
indexer
.vecs
.transactions
.height
.collect_one_at(start_tx)
.unwrap()
.to_usize()
};
for h in start_height..max_height {
let first_tx: usize = indexer
.vecs
.transactions
.first_tx_index
.collect_one_at(h)
.unwrap()
.to_usize();
let n = *indexes.height.tx_index_count.collect_one_at(h).unwrap() as usize;
if first_tx + n > target {
break;
}
let input_batch = self.input_value.collect_range_at(skip, end);
let output_batch = self.output_value.collect_range_at(skip, end);
let vsize_batch = size_vecs.vsize.tx_index.collect_range_at(skip, end);
// Batch read all per-tx data for this block
let input_values = self.input_value.collect_range_at(first_tx, first_tx + n);
let output_values = self.output_value.collect_range_at(first_tx, first_tx + n);
let vsizes: Vec<VSize> = size_vecs
.vsize
.tx_index
.collect_range_at(first_tx, first_tx + n);
let txin_starts: Vec<TxInIndex> = indexer
.vecs
.transactions
.first_txin_index
.collect_range_at(first_tx, first_tx + n);
let input_begin = txin_starts[0].to_usize();
let input_end = if h + 1 < max_height {
indexer
.vecs
.inputs
.first_txin_index
.collect_one_at(h + 1)
.unwrap()
.to_usize()
} else {
indexer.vecs.inputs.outpoint.len()
};
let outpoints: Vec<OutPoint> = indexer
.vecs
.inputs
.outpoint
.collect_range_at(input_begin, input_end);
for j in 0..input_batch.len() {
let fee = if unlikely(input_batch[j].is_max()) {
// Compute fee + fee_rate per tx
let mut fees = Vec::with_capacity(n);
for j in 0..n {
let fee = if unlikely(input_values[j].is_max()) {
Sats::ZERO
} else {
input_batch[j] - output_batch[j]
input_values[j] - output_values[j]
};
self.fee.tx_index.push(fee);
self.fee_rate
.tx_index
.push(FeeRate::from((fee, vsize_batch[j])));
self.fee_rate.tx_index.push(FeeRate::from((fee, vsizes[j])));
fees.push(fee);
}
let _lock = exit.lock();
let (r1, r2) = rayon::join(
|| self.fee.tx_index.write(),
|| self.fee_rate.tx_index.write(),
// Effective fee rate via same-block CPFP clustering
let effective = cluster_fee_rates(
&txin_starts,
&outpoints,
input_begin,
first_tx,
&fees,
&vsizes,
);
r1?;
r2?;
for rate in effective {
self.effective_fee_rate.tx_index.push(rate);
}
if h % 1_000 == 0 {
let _lock = exit.lock();
self.fee.tx_index.write()?;
self.fee_rate.tx_index.write()?;
self.effective_fee_rate.tx_index.write()?;
}
}
let _lock = exit.lock();
self.fee.tx_index.write()?;
self.fee_rate.tx_index.write()?;
self.effective_fee_rate.tx_index.write()?;
Ok(())
}
}
/// Clusters same-block parent-child txs and computes effective fee rate per cluster.
fn cluster_fee_rates(
txin_starts: &[TxInIndex],
outpoints: &[OutPoint],
outpoint_base: usize,
first_tx: usize,
fees: &[Sats],
vsizes: &[VSize],
) -> Vec<FeeRate> {
let n = fees.len();
let mut parent: Vec<usize> = (0..n).collect();
for j in 1..n {
let start = txin_starts[j].to_usize() - outpoint_base;
let end = if j + 1 < txin_starts.len() {
txin_starts[j + 1].to_usize() - outpoint_base
} else {
outpoints.len()
};
for op in &outpoints[start..end] {
if op.is_coinbase() {
continue;
}
let parent_tx = op.tx_index().to_usize();
if parent_tx >= first_tx && parent_tx < first_tx + n {
union(&mut parent, j, parent_tx - first_tx);
}
}
}
let mut cluster_fee = vec![Sats::ZERO; n];
let mut cluster_vsize = vec![VSize::from(0u64); n];
for j in 0..n {
let root = find(&mut parent, j);
cluster_fee[root] += fees[j];
cluster_vsize[root] += vsizes[j];
}
(0..n)
.map(|j| {
let root = find(&mut parent, j);
FeeRate::from((cluster_fee[root], cluster_vsize[root]))
})
.collect()
}
fn find(parent: &mut [usize], mut i: usize) -> usize {
while parent[i] != i {
parent[i] = parent[parent[i]];
i = parent[i];
}
i
}
fn union(parent: &mut [usize], a: usize, b: usize) {
let ra = find(parent, a);
let rb = find(parent, b);
if ra != rb {
parent[ra] = rb;
}
}

View File

@@ -20,6 +20,12 @@ impl Vecs {
output_value: EagerVec::forced_import(db, "output_value", version)?,
fee: PerTxDistribution::forced_import(db, "fee", v, indexes)?,
fee_rate: PerTxDistribution::forced_import(db, "fee_rate", v, indexes)?,
effective_fee_rate: PerTxDistribution::forced_import(
db,
"effective_fee_rate",
v,
indexes,
)?,
})
}
}

View File

@@ -10,4 +10,5 @@ pub struct Vecs<M: StorageMode = Rw> {
pub output_value: M::Stored<EagerVec<PcoVec<TxIndex, Sats>>>,
pub fee: PerTxDistribution<Sats, M>,
pub fee_rate: PerTxDistribution<FeeRate, M>,
pub effective_fee_rate: PerTxDistribution<FeeRate, M>,
}

View File

@@ -12,7 +12,6 @@ exclude = ["examples/"]
bitcoin = { workspace = true }
brk_error = { workspace = true, features = ["fjall", "vecdb"] }
brk_cohort = { workspace = true }
brk_iterator = { workspace = true }
brk_logger = { workspace = true }
brk_reader = { workspace = true }
brk_rpc = { workspace = true, features = ["corepc"] }

View File

@@ -7,7 +7,6 @@ use std::{
use brk_alloc::Mimalloc;
use brk_indexer::Indexer;
use brk_iterator::Blocks;
use brk_reader::Reader;
use brk_rpc::{Auth, Client};
use tracing::{debug, info};
@@ -33,9 +32,6 @@ fn main() -> color_eyre::Result<()> {
let reader = Reader::new(bitcoin_dir.join("blocks"), &client);
debug!("Reader created.");
let blocks = Blocks::new(&client, &reader);
debug!("Blocks created.");
let mut indexer = Indexer::forced_import(&outputs_dir)?;
debug!("Indexer imported.");
@@ -44,7 +40,7 @@ fn main() -> color_eyre::Result<()> {
loop {
let i = Instant::now();
indexer.checked_index(&blocks, &client, &exit)?;
indexer.checked_index(&reader, &client, &exit)?;
info!("Done in {:?}", i.elapsed());
Mimalloc::collect();

View File

@@ -9,7 +9,6 @@ use brk_alloc::Mimalloc;
use brk_bencher::Bencher;
use brk_error::Result;
use brk_indexer::Indexer;
use brk_iterator::Blocks;
use brk_reader::Reader;
use brk_rpc::{Auth, Client};
use tracing::{debug, info};
@@ -33,8 +32,6 @@ fn main() -> Result<()> {
let reader = Reader::new(bitcoin_dir.join("blocks"), &client);
let blocks = Blocks::new(&client, &reader);
let mut indexer = Indexer::forced_import(&outputs_dir)?;
let mut bencher =
@@ -50,7 +47,7 @@ fn main() -> Result<()> {
});
let i = Instant::now();
indexer.index(&blocks, &client, &exit)?;
indexer.index(&reader, &client, &exit)?;
info!("Done in {:?}", i.elapsed());
// We want to benchmark the drop too

View File

@@ -9,7 +9,6 @@ use brk_alloc::Mimalloc;
use brk_bencher::Bencher;
use brk_error::Result;
use brk_indexer::Indexer;
use brk_iterator::Blocks;
use brk_reader::Reader;
use brk_rpc::{Auth, Client};
use tracing::{debug, info};
@@ -33,8 +32,6 @@ fn main() -> Result<()> {
let reader = Reader::new(bitcoin_dir.join("blocks"), &client);
let blocks = Blocks::new(&client, &reader);
let mut indexer = Indexer::forced_import(&outputs_dir)?;
let mut bencher =
@@ -51,7 +48,7 @@ fn main() -> Result<()> {
loop {
let i = Instant::now();
indexer.index(&blocks, &client, &exit)?;
indexer.index(&reader, &client, &exit)?;
info!("Done in {:?}", i.elapsed());
Mimalloc::collect();

View File

@@ -4,7 +4,7 @@ use brk_types::{TxIndex, Txid, TxidPrefix, Version};
// One version for all data sources
// Increment on **change _OR_ addition**
pub const VERSION: Version = Version::new(25);
pub const VERSION: Version = Version::new(26);
pub const SNAPSHOT_BLOCK_RANGE: usize = 1_000;
/// Known duplicate Bitcoin transactions (BIP30)

View File

@@ -8,12 +8,14 @@ use std::{
};
use brk_error::Result;
use brk_iterator::Blocks;
use brk_reader::Reader;
use brk_rpc::Client;
use brk_types::Height;
use fjall::PersistMode;
use tracing::{debug, info};
use vecdb::{Exit, RawDBError, ReadOnlyClone, ReadableVec, Ro, Rw, StorageMode};
use vecdb::{
Exit, RawDBError, ReadOnlyClone, ReadableVec, Ro, Rw, StorageMode, WritableVec, unlikely,
};
mod constants;
mod indexes;
mod processor;
@@ -93,22 +95,22 @@ impl Indexer {
}
}
pub fn index(&mut self, blocks: &Blocks, client: &Client, exit: &Exit) -> Result<Indexes> {
self.index_(blocks, client, exit, false)
pub fn index(&mut self, reader: &Reader, client: &Client, exit: &Exit) -> Result<Indexes> {
self.index_(reader, client, exit, false)
}
pub fn checked_index(
&mut self,
blocks: &Blocks,
reader: &Reader,
client: &Client,
exit: &Exit,
) -> Result<Indexes> {
self.index_(blocks, client, exit, true)
self.index_(reader, client, exit, true)
}
fn index_(
&mut self,
blocks: &Blocks,
reader: &Reader,
client: &Client,
exit: &Exit,
check_collisions: bool,
@@ -172,13 +174,13 @@ impl Indexer {
let stores_res = s.spawn(|| -> Result<()> {
let i = Instant::now();
stores.commit(height)?;
info!("Stores exported in {:?}", i.elapsed());
debug!("Stores exported in {:?}", i.elapsed());
Ok(())
});
let vecs_res = s.spawn(|| -> Result<()> {
let i = Instant::now();
vecs.flush(height)?;
info!("Vecs exported in {:?}", i.elapsed());
debug!("Vecs exported in {:?}", i.elapsed());
Ok(())
});
stores_res.join().unwrap()?;
@@ -195,13 +197,22 @@ impl Indexer {
let vecs = &mut self.vecs;
let stores = &mut self.stores;
for block in blocks.after(prev_hash)? {
for block in reader.after(prev_hash)?.iter() {
let height = block.height();
info!("Indexing block {height}...");
if unlikely(height.is_multiple_of(100)) {
info!("Indexing block {height}...");
} else {
debug!("Indexing block {height}...");
}
indexes.height = height;
vecs.blocks.position.push(block.metadata().position());
block.tx_metadata().iter().for_each(|m| {
vecs.transactions.position.push(m.position());
});
let mut processor = BlockProcessor {
block: &block,
height,
@@ -271,13 +282,13 @@ impl Indexer {
for task in tasks {
task().map_err(vecdb::RawDBError::other)?;
}
info!("Stores committed in {:?}", i.elapsed());
debug!("Stores committed in {:?}", i.elapsed());
let i = Instant::now();
fjall_db
.persist(PersistMode::SyncData)
.map_err(RawDBError::other)?;
info!("Stores persisted in {:?}", i.elapsed());
debug!("Stores persisted in {:?}", i.elapsed());
}
db.compact()?;

View File

@@ -28,14 +28,14 @@ impl BlockProcessor<'_> {
.blockhash_prefix_to_height
.insert(blockhash_prefix, height);
self.stores
.height_to_coinbase_tag
.insert(height, self.block.coinbase_tag().into());
self.vecs
.blocks
.blockhash
.checked_push(height, blockhash.clone())?;
self.vecs
.blocks
.coinbase_tag
.checked_push(height, self.block.coinbase_tag())?;
self.vecs
.blocks
.difficulty
@@ -53,21 +53,28 @@ impl BlockProcessor<'_> {
pub fn push_block_size_and_weight(&mut self, txs: &[ComputedTx]) -> Result<()> {
let overhead = bitcoin::block::Header::SIZE + bitcoin::VarInt::from(txs.len()).size();
let mut total_size = overhead;
let mut weight_wu = overhead * 4;
for ct in txs {
let base = ct.base_size as usize;
let total = ct.total_size as usize;
total_size += total;
weight_wu += base * 3 + total;
let mut weight = overhead * 4;
let mut sw_txs = 0u32;
let mut sw_size = 0usize;
let mut sw_weight = 0usize;
for tx in txs {
total_size += tx.total_size as usize;
weight += tx.weight();
if tx.is_segwit() {
sw_txs += 1;
sw_size += tx.total_size as usize;
sw_weight += tx.weight();
}
}
self.vecs
.blocks
.total
.checked_push(self.height, total_size.into())?;
self.vecs
.blocks
.weight
.checked_push(self.height, weight_wu.into())?;
let h = self.height;
let blocks = &mut self.vecs.blocks;
blocks.total.checked_push(h, total_size.into())?;
blocks.weight.checked_push(h, weight.into())?;
blocks.segwit_txs.checked_push(h, sw_txs.into())?;
blocks.segwit_size.checked_push(h, sw_size.into())?;
blocks.segwit_weight.checked_push(h, sw_weight.into())?;
Ok(())
}
}

View File

@@ -48,6 +48,18 @@ pub struct ComputedTx<'a> {
pub total_size: u32,
}
impl ComputedTx<'_> {
#[inline]
pub fn is_segwit(&self) -> bool {
self.base_size != self.total_size
}
#[inline]
pub fn weight(&self) -> usize {
self.base_size as usize * 3 + self.total_size as usize
}
}
/// Reusable buffers cleared and refilled each block to avoid allocation churn.
#[derive(Default)]
pub struct BlockBuffers {

View File

@@ -7,11 +7,11 @@ use brk_error::Result;
use brk_store::{AnyStore, Kind, Mode, Store};
use brk_types::{
AddrHash, AddrIndexOutPoint, AddrIndexTxIndex, BlockHashPrefix, Height, OutPoint, OutputType,
StoredString, TxIndex, TxOutIndex, TxidPrefix, TypeIndex, Unit, Version, Vout,
TxIndex, TxOutIndex, TxidPrefix, TypeIndex, Unit, Version, Vout,
};
use fjall::{Database, PersistMode};
use rayon::prelude::*;
use tracing::info;
use tracing::{debug, info};
use vecdb::{AnyVec, ReadableVec, VecIndex};
use crate::{Indexes, constants::DUPLICATE_TXID_PREFIXES};
@@ -26,7 +26,6 @@ pub struct Stores {
pub addr_type_to_addr_index_and_tx_index: ByAddrType<Store<AddrIndexTxIndex, Unit>>,
pub addr_type_to_addr_index_and_unspent_outpoint: ByAddrType<Store<AddrIndexOutPoint, Unit>>,
pub blockhash_prefix_to_height: Store<BlockHashPrefix, Height>,
pub height_to_coinbase_tag: Store<Height, StoredString>,
pub txid_prefix_to_tx_index: Store<TxidPrefix, TxIndex>,
}
@@ -88,14 +87,6 @@ impl Stores {
Ok(Self {
db: database.clone(),
height_to_coinbase_tag: Store::import(
database_ref,
path,
"height_to_coinbase_tag",
version,
Mode::PushOnly,
Kind::Sequential,
)?,
addr_type_to_addr_hash_to_addr_index: ByAddrType::new_with_index(
create_addr_hash_to_addr_index_store,
)?,
@@ -135,7 +126,6 @@ impl Stores {
fn iter_any(&self) -> impl Iterator<Item = &dyn AnyStore> {
[
&self.blockhash_prefix_to_height as &dyn AnyStore,
&self.height_to_coinbase_tag,
&self.txid_prefix_to_tx_index,
]
.into_iter()
@@ -159,7 +149,6 @@ impl Stores {
fn par_iter_any_mut(&mut self) -> impl ParallelIterator<Item = &mut dyn AnyStore> {
[
&mut self.blockhash_prefix_to_height as &mut dyn AnyStore,
&mut self.height_to_coinbase_tag,
&mut self.txid_prefix_to_tx_index,
]
.into_par_iter()
@@ -184,11 +173,11 @@ impl Stores {
let i = Instant::now();
self.par_iter_any_mut()
.try_for_each(|store| store.commit(height))?;
info!("Stores committed in {:?}", i.elapsed());
debug!("Stores committed in {:?}", i.elapsed());
let i = Instant::now();
self.db.persist(PersistMode::SyncData)?;
info!("Stores persisted in {:?}", i.elapsed());
debug!("Stores persisted in {:?}", i.elapsed());
Ok(())
}
@@ -210,7 +199,6 @@ impl Stores {
}
take!(self.blockhash_prefix_to_height);
take!(self.height_to_coinbase_tag);
take!(self.txid_prefix_to_tx_index);
for store in self.addr_type_to_addr_hash_to_addr_index.values_mut() {
@@ -257,7 +245,6 @@ impl Stores {
fn is_empty(&self) -> Result<bool> {
Ok(self.blockhash_prefix_to_height.is_empty()?
&& self.txid_prefix_to_tx_index.is_empty()?
&& self.height_to_coinbase_tag.is_empty()?
&& self
.addr_type_to_addr_hash_to_addr_index
.values()
@@ -286,12 +273,6 @@ impl Stores {
},
);
(starting_indexes.height.to_usize()..vecs.blocks.blockhash.len())
.map(Height::from)
.for_each(|h| {
self.height_to_coinbase_tag.remove(h);
});
for addr_type in OutputType::ADDR_TYPES {
for hash in vecs.iter_addr_hashes_from(addr_type, starting_indexes.height)? {
self.addr_type_to_addr_hash_to_addr_index

View File

@@ -1,6 +1,9 @@
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{BlockHash, Height, StoredF64, StoredU64, Timestamp, Version, Weight};
use brk_types::{
BlkPosition, BlockHash, CoinbaseTag, Height, StoredF64, StoredU32, StoredU64, Timestamp,
Version, Weight,
};
use rayon::prelude::*;
use vecdb::{
AnyStoredVec, BytesVec, Database, ImportableVec, PcoVec, Rw, Stamp, StorageMode, WritableVec,
@@ -11,6 +14,7 @@ use crate::parallel_import;
#[derive(Traversable)]
pub struct BlocksVecs<M: StorageMode = Rw> {
pub blockhash: M::Stored<BytesVec<Height, BlockHash>>,
pub coinbase_tag: M::Stored<BytesVec<Height, CoinbaseTag>>,
#[traversable(wrap = "difficulty", rename = "value")]
pub difficulty: M::Stored<PcoVec<Height, StoredF64>>,
/// Doesn't guarantee continuity due to possible reorgs and more generally the nature of mining
@@ -20,45 +24,85 @@ pub struct BlocksVecs<M: StorageMode = Rw> {
pub total: M::Stored<PcoVec<Height, StoredU64>>,
#[traversable(wrap = "weight", rename = "base")]
pub weight: M::Stored<PcoVec<Height, Weight>>,
#[traversable(hidden)]
pub position: M::Stored<PcoVec<Height, BlkPosition>>,
pub segwit_txs: M::Stored<PcoVec<Height, StoredU32>>,
pub segwit_size: M::Stored<PcoVec<Height, StoredU64>>,
pub segwit_weight: M::Stored<PcoVec<Height, Weight>>,
}
impl BlocksVecs {
pub fn forced_import(db: &Database, version: Version) -> Result<Self> {
let (blockhash, difficulty, timestamp, total, weight) = parallel_import! {
blockhash = BytesVec::forced_import(db, "blockhash", version),
difficulty = PcoVec::forced_import(db, "difficulty", version),
timestamp = PcoVec::forced_import(db, "timestamp", version),
total_size = PcoVec::forced_import(db, "total_size", version),
weight = PcoVec::forced_import(db, "block_weight", version),
};
Ok(Self {
let (
blockhash,
coinbase_tag,
difficulty,
timestamp,
total,
weight,
position,
segwit_txs,
segwit_size,
segwit_weight,
) = parallel_import! {
blockhash = BytesVec::forced_import(db, "blockhash", version),
coinbase_tag = BytesVec::forced_import(db, "coinbase_tag", version),
difficulty = PcoVec::forced_import(db, "difficulty", version),
timestamp = PcoVec::forced_import(db, "timestamp", version),
total_size = PcoVec::forced_import(db, "total_size", version),
weight = PcoVec::forced_import(db, "block_weight", version),
position = PcoVec::forced_import(db, "block_position", version),
segwit_txs = PcoVec::forced_import(db, "segwit_txs", version),
segwit_size = PcoVec::forced_import(db, "segwit_size", version),
segwit_weight = PcoVec::forced_import(db, "segwit_weight", version),
};
Ok(Self {
blockhash,
coinbase_tag,
difficulty,
timestamp,
total,
weight,
position,
segwit_txs,
segwit_size,
segwit_weight,
})
}
pub fn truncate(&mut self, height: Height, stamp: Stamp) -> Result<()> {
self.blockhash
.truncate_if_needed_with_stamp(height, stamp)?;
self.coinbase_tag
.truncate_if_needed_with_stamp(height, stamp)?;
self.difficulty
.truncate_if_needed_with_stamp(height, stamp)?;
self.timestamp
.truncate_if_needed_with_stamp(height, stamp)?;
self.total.truncate_if_needed_with_stamp(height, stamp)?;
self.weight.truncate_if_needed_with_stamp(height, stamp)?;
self.position.truncate_if_needed_with_stamp(height, stamp)?;
self.segwit_txs
.truncate_if_needed_with_stamp(height, stamp)?;
self.segwit_size
.truncate_if_needed_with_stamp(height, stamp)?;
self.segwit_weight
.truncate_if_needed_with_stamp(height, stamp)?;
Ok(())
}
pub fn par_iter_mut_any(&mut self) -> impl ParallelIterator<Item = &mut dyn AnyStoredVec> {
[
&mut self.blockhash as &mut dyn AnyStoredVec,
&mut self.coinbase_tag,
&mut self.difficulty,
&mut self.timestamp,
&mut self.total,
&mut self.weight,
&mut self.position,
&mut self.segwit_txs,
&mut self.segwit_size,
&mut self.segwit_weight,
]
.into_par_iter()
}

View File

@@ -1,8 +1,8 @@
use brk_error::Result;
use brk_traversable::Traversable;
use brk_types::{
Height, RawLockTime, StoredBool, StoredU32, TxInIndex, TxIndex, TxOutIndex, TxVersion, Txid,
Version,
BlkPosition, Height, RawLockTime, StoredBool, StoredU32, TxInIndex, TxIndex, TxOutIndex,
TxVersion, Txid, Version,
};
use rayon::prelude::*;
use vecdb::{
@@ -23,6 +23,8 @@ pub struct TransactionsVecs<M: StorageMode = Rw> {
pub is_explicitly_rbf: M::Stored<PcoVec<TxIndex, StoredBool>>,
pub first_txin_index: M::Stored<PcoVec<TxIndex, TxInIndex>>,
pub first_txout_index: M::Stored<BytesVec<TxIndex, TxOutIndex>>,
#[traversable(hidden)]
pub position: M::Stored<PcoVec<TxIndex, BlkPosition>>,
}
pub struct TxMetadataVecs<'a> {
@@ -70,6 +72,7 @@ impl TransactionsVecs {
is_explicitly_rbf,
first_txin_index,
first_txout_index,
position,
) = parallel_import! {
first_tx_index = PcoVec::forced_import(db, "first_tx_index", version),
height = PcoVec::forced_import(db, "height", version),
@@ -81,6 +84,7 @@ impl TransactionsVecs {
is_explicitly_rbf = PcoVec::forced_import(db, "is_explicitly_rbf", version),
first_txin_index = PcoVec::forced_import(db, "first_txin_index", version),
first_txout_index = BytesVec::forced_import(db, "first_txout_index", version),
position = PcoVec::forced_import(db, "tx_position", version),
};
Ok(Self {
first_tx_index,
@@ -93,6 +97,7 @@ impl TransactionsVecs {
is_explicitly_rbf,
first_txin_index,
first_txout_index,
position,
})
}
@@ -115,6 +120,8 @@ impl TransactionsVecs {
.truncate_if_needed_with_stamp(tx_index, stamp)?;
self.first_txout_index
.truncate_if_needed_with_stamp(tx_index, stamp)?;
self.position
.truncate_if_needed_with_stamp(tx_index, stamp)?;
Ok(())
}
@@ -130,6 +137,7 @@ impl TransactionsVecs {
&mut self.is_explicitly_rbf,
&mut self.first_txin_index,
&mut self.first_txout_index,
&mut self.position,
]
.into_par_iter()
}

View File

@@ -1,4 +1,4 @@
use brk_types::{FeeRate, MempoolEntryInfo, Sats, Txid, TxidPrefix, VSize};
use brk_types::{FeeRate, MempoolEntryInfo, Sats, Timestamp, Txid, TxidPrefix, VSize};
use smallvec::SmallVec;
/// A mempool transaction entry.
@@ -16,6 +16,8 @@ pub struct Entry {
pub ancestor_vsize: VSize,
/// Parent txid prefixes (most txs have 0-2 parents)
pub depends: SmallVec<[TxidPrefix; 2]>,
/// When this tx was first seen in the mempool
pub first_seen: Timestamp,
}
impl Entry {
@@ -27,6 +29,7 @@ impl Entry {
ancestor_fee: info.ancestor_fee,
ancestor_vsize: VSize::from(info.ancestor_size),
depends: info.depends.iter().map(TxidPrefix::from).collect(),
first_seen: Timestamp::now(),
}
}

View File

@@ -106,6 +106,10 @@ impl MempoolInner {
self.txs.read()
}
pub fn get_entries(&self) -> RwLockReadGuard<'_, EntryPool> {
self.entries.read()
}
pub fn get_addrs(&self) -> RwLockReadGuard<'_, AddrTracker> {
self.addrs.read()
}

View File

@@ -1,20 +1,39 @@
use brk_types::{TxWithHex, Txid};
use brk_types::{MempoolRecentTx, TxWithHex, Txid};
use derive_more::Deref;
use rustc_hash::FxHashMap;
const RECENT_CAP: usize = 10;
/// Store of full transaction data for API access.
#[derive(Default, Deref)]
pub struct TxStore(FxHashMap<Txid, TxWithHex>);
pub struct TxStore {
#[deref]
txs: FxHashMap<Txid, TxWithHex>,
recent: Vec<MempoolRecentTx>,
}
impl TxStore {
/// Check if a transaction exists.
pub fn contains(&self, txid: &Txid) -> bool {
self.0.contains_key(txid)
self.txs.contains_key(txid)
}
/// Add transactions in bulk.
pub fn extend(&mut self, txs: FxHashMap<Txid, TxWithHex>) {
self.0.extend(txs);
let mut new: Vec<_> = txs
.iter()
.take(RECENT_CAP)
.map(|(txid, tx_hex)| MempoolRecentTx::from((txid, tx_hex.tx())))
.collect();
let keep = RECENT_CAP.saturating_sub(new.len());
new.extend(self.recent.drain(..keep.min(self.recent.len())));
self.recent = new;
self.txs.extend(txs);
}
/// Last 10 transactions to enter the mempool.
pub fn recent(&self) -> &[MempoolRecentTx] {
&self.recent
}
/// Keep items matching predicate, call `on_remove` for each removed item.
@@ -23,7 +42,7 @@ impl TxStore {
K: FnMut(&Txid) -> bool,
R: FnMut(&Txid, &TxWithHex),
{
self.0.retain(|txid, tx| {
self.txs.retain(|txid, tx| {
if keep(txid) {
true
} else {

View File

@@ -1,12 +1,17 @@
use bitcoin::consensus::Decodable;
use bitcoin::hex::DisplayHex;
use brk_error::{Error, Result};
use brk_types::{
BlockExtras, BlockHash, BlockHashPrefix, BlockInfo, BlockPool, Height, TxIndex, pools,
BlockExtras, BlockHash, BlockHashPrefix, BlockHeader, BlockInfo, BlockInfoV1, BlockPool,
FeeRate, Height, Sats, Timestamp, TxIndex, VSize, pools,
};
use vecdb::{AnyVec, ReadableVec, VecIndex};
use crate::Query;
const DEFAULT_BLOCK_COUNT: u32 = 10;
const DEFAULT_V1_BLOCK_COUNT: u32 = 15;
const HEADER_SIZE: usize = 80;
impl Query {
pub fn block(&self, hash: &BlockHash) -> Result<BlockInfo> {
@@ -15,58 +20,70 @@ impl Query {
}
pub fn block_by_height(&self, height: Height) -> Result<BlockInfo> {
let indexer = self.indexer();
let max_height = self.max_height();
if height > max_height {
return Err(Error::OutOfRange("Block height out of range".into()));
}
self.blocks_range(height.to_usize(), height.to_usize() + 1)?
.pop()
.ok_or(Error::NotFound("Block not found".into()))
}
let blockhash = indexer.vecs.blocks.blockhash.read_once(height)?;
let difficulty = indexer.vecs.blocks.difficulty.collect_one(height).unwrap();
let timestamp = indexer.vecs.blocks.timestamp.collect_one(height).unwrap();
let size = indexer.vecs.blocks.total.collect_one(height).unwrap();
let weight = indexer.vecs.blocks.weight.collect_one(height).unwrap();
let tx_count = self.tx_count_at_height(height, max_height)?;
pub fn block_by_height_v1(&self, height: Height) -> Result<BlockInfoV1> {
let max_height = self.max_height();
if height > max_height {
return Err(Error::OutOfRange("Block height out of range".into()));
}
self.blocks_v1_range(height.to_usize(), height.to_usize() + 1)?
.pop()
.ok_or(Error::NotFound("Block not found".into()))
}
Ok(BlockInfo {
id: blockhash,
height,
tx_count,
size: *size,
weight,
timestamp,
difficulty: *difficulty,
})
pub fn block_header_hex(&self, hash: &BlockHash) -> Result<String> {
let height = self.height_by_hash(hash)?;
let header = self.read_block_header(height)?;
Ok(bitcoin::consensus::encode::serialize_hex(&header))
}
pub fn block_hash_by_height(&self, height: Height) -> Result<BlockHash> {
let max_height = self.max_height();
if height > max_height {
return Err(Error::OutOfRange("Block height out of range".into()));
}
Ok(self.indexer().vecs.blocks.blockhash.read_once(height)?)
}
pub fn blocks(&self, start_height: Option<Height>) -> Result<Vec<BlockInfo>> {
let max_height = self.indexed_height();
let (begin, end) = self.resolve_block_range(start_height, DEFAULT_BLOCK_COUNT);
self.blocks_range(begin, end)
}
let start = start_height.unwrap_or(max_height);
let start = start.min(max_height);
pub fn blocks_v1(&self, start_height: Option<Height>) -> Result<Vec<BlockInfoV1>> {
let (begin, end) = self.resolve_block_range(start_height, DEFAULT_V1_BLOCK_COUNT);
self.blocks_v1_range(begin, end)
}
let start_u32: u32 = start.into();
let count = DEFAULT_BLOCK_COUNT.min(start_u32 + 1) as usize;
// === Range queries (bulk reads) ===
if count == 0 {
fn blocks_range(&self, begin: usize, end: usize) -> Result<Vec<BlockInfo>> {
if begin >= end {
return Ok(Vec::new());
}
let indexer = self.indexer();
let computer = self.computer();
let reader = self.reader();
// Batch-read all PcoVec data for the contiguous range (avoids
// per-block page decompression — 4 reads instead of 4*count).
let end = start_u32 as usize + 1;
let begin = end - count;
// Bulk read all indexed data
let blockhashes = indexer.vecs.blocks.blockhash.collect_range_at(begin, end);
let difficulties = indexer.vecs.blocks.difficulty.collect_range_at(begin, end);
let timestamps = indexer.vecs.blocks.timestamp.collect_range_at(begin, end);
let sizes = indexer.vecs.blocks.total.collect_range_at(begin, end);
let weights = indexer.vecs.blocks.weight.collect_range_at(begin, end);
let positions = indexer.vecs.blocks.position.collect_range_at(begin, end);
// Batch-read first_tx_index for tx_count computation (need one extra for next boundary)
// Bulk read tx indexes for tx_count
let max_height = self.indexed_height();
let tx_index_end = if end <= max_height.to_usize() {
end + 1
} else {
@@ -79,24 +96,39 @@ impl Query {
.collect_range_at(begin, tx_index_end);
let total_txs = computer.indexes.tx_index.identity.len();
// Bulk read median time window
let median_start = begin.saturating_sub(10);
let median_timestamps: Vec<Timestamp> = indexer
.vecs
.blocks
.timestamp
.collect_range_at(median_start, end);
let count = end - begin;
let mut blocks = Vec::with_capacity(count);
for i in (0..count).rev() {
let height = Height::from(begin + i);
let blockhash = indexer.vecs.blocks.blockhash.read_once(height)?;
let raw_header = reader.read_raw_bytes(positions[i], HEADER_SIZE)?;
let header = Self::decode_header(&raw_header)?;
let tx_count = if i + 1 < first_tx_indexes.len() {
first_tx_indexes[i + 1].to_usize() - first_tx_indexes[i].to_usize()
(first_tx_indexes[i + 1].to_usize() - first_tx_indexes[i].to_usize()) as u32
} else {
total_txs - first_tx_indexes[i].to_usize()
(total_txs - first_tx_indexes[i].to_usize()) as u32
};
let median_time =
Self::compute_median_time(&median_timestamps, begin + i, median_start);
blocks.push(BlockInfo {
id: blockhash,
height,
tx_count: tx_count as u32,
id: blockhashes[i].clone(),
height: Height::from(begin + i),
header,
timestamp: timestamps[i],
tx_count,
size: *sizes[i],
weight: weights[i],
timestamp: timestamps[i],
median_time,
difficulty: *difficulties[i],
});
}
@@ -104,13 +136,254 @@ impl Query {
Ok(blocks)
}
pub(crate) fn blocks_v1_range(&self, begin: usize, end: usize) -> Result<Vec<BlockInfoV1>> {
if begin >= end {
return Ok(Vec::new());
}
let count = end - begin;
let indexer = self.indexer();
let computer = self.computer();
let reader = self.reader();
let all_pools = pools();
// Bulk read all indexed data
let blockhashes = indexer.vecs.blocks.blockhash.collect_range_at(begin, end);
let difficulties = indexer.vecs.blocks.difficulty.collect_range_at(begin, end);
let timestamps = indexer.vecs.blocks.timestamp.collect_range_at(begin, end);
let sizes = indexer.vecs.blocks.total.collect_range_at(begin, end);
let weights = indexer.vecs.blocks.weight.collect_range_at(begin, end);
let positions = indexer.vecs.blocks.position.collect_range_at(begin, end);
let pool_slugs = computer.pools.pool.collect_range_at(begin, end);
// Bulk read tx indexes
let max_height = self.indexed_height();
let tx_index_end = if end <= max_height.to_usize() {
end + 1
} else {
end
};
let first_tx_indexes: Vec<TxIndex> = indexer
.vecs
.transactions
.first_tx_index
.collect_range_at(begin, tx_index_end);
let total_txs = computer.indexes.tx_index.identity.len();
// Bulk read segwit stats
let segwit_txs = indexer.vecs.blocks.segwit_txs.collect_range_at(begin, end);
let segwit_sizes = indexer.vecs.blocks.segwit_size.collect_range_at(begin, end);
let segwit_weights = indexer
.vecs
.blocks
.segwit_weight
.collect_range_at(begin, end);
// Bulk read extras data
let fee_sats = computer
.mining
.rewards
.fees
.block
.sats
.collect_range_at(begin, end);
let subsidy_sats = computer
.mining
.rewards
.subsidy
.block
.sats
.collect_range_at(begin, end);
let input_counts = computer.inputs.count.sum.collect_range_at(begin, end);
let output_counts = computer
.outputs
.count
.total
.sum
.collect_range_at(begin, end);
let utxo_set_sizes = computer
.outputs
.count
.unspent
.height
.collect_range_at(begin, end);
let input_volumes = computer
.transactions
.volume
.transfer_volume
.block
.sats
.collect_range_at(begin, end);
let output_volumes = computer
.mining
.rewards
.output_volume
.collect_range_at(begin, end);
// Bulk read effective fee rate distribution (accounts for CPFP)
let frd = &computer
.transactions
.fees
.effective_fee_rate
.distribution
.block;
let fr_min = frd.min.height.collect_range_at(begin, end);
let fr_pct10 = frd.pct10.height.collect_range_at(begin, end);
let fr_pct25 = frd.pct25.height.collect_range_at(begin, end);
let fr_median = frd.median.height.collect_range_at(begin, end);
let fr_pct75 = frd.pct75.height.collect_range_at(begin, end);
let fr_pct90 = frd.pct90.height.collect_range_at(begin, end);
let fr_max = frd.max.height.collect_range_at(begin, end);
// Bulk read fee amount distribution (sats)
let fad = &computer.transactions.fees.fee.distribution.block;
let fa_min = fad.min.height.collect_range_at(begin, end);
let fa_pct10 = fad.pct10.height.collect_range_at(begin, end);
let fa_pct25 = fad.pct25.height.collect_range_at(begin, end);
let fa_median = fad.median.height.collect_range_at(begin, end);
let fa_pct75 = fad.pct75.height.collect_range_at(begin, end);
let fa_pct90 = fad.pct90.height.collect_range_at(begin, end);
let fa_max = fad.max.height.collect_range_at(begin, end);
// Bulk read tx positions range covering all coinbase txs (first tx of each block)
let tx_pos_begin = first_tx_indexes[0].to_usize();
let tx_pos_end = first_tx_indexes[count - 1].to_usize() + 1;
let all_tx_positions = indexer
.vecs
.transactions
.position
.collect_range_at(tx_pos_begin, tx_pos_end);
// Bulk read median time window
let median_start = begin.saturating_sub(10);
let median_timestamps = indexer
.vecs
.blocks
.timestamp
.collect_range_at(median_start, end);
let mut blocks = Vec::with_capacity(count);
for i in (0..count).rev() {
let raw_header = reader.read_raw_bytes(positions[i], HEADER_SIZE)?;
let header = Self::decode_header(&raw_header)?;
let tx_count = if i + 1 < first_tx_indexes.len() {
(first_tx_indexes[i + 1].to_usize() - first_tx_indexes[i].to_usize()) as u32
} else {
(total_txs - first_tx_indexes[i].to_usize()) as u32
};
let weight = weights[i];
let size = *sizes[i];
let total_fees = fee_sats[i];
let subsidy = subsidy_sats[i];
let total_inputs = (*input_counts[i]).saturating_sub(1);
let total_outputs = *output_counts[i];
let vsize = weight.to_vbytes_ceil();
let total_fees_u64 = u64::from(total_fees);
let non_coinbase = tx_count.saturating_sub(1) as u64;
let pool_slug = pool_slugs[i];
let pool = all_pools.get(pool_slug);
let (
coinbase_raw,
coinbase_address,
coinbase_addresses,
coinbase_signature,
coinbase_signature_ascii,
) = Self::parse_coinbase_tx(
reader,
all_tx_positions[first_tx_indexes[i].to_usize() - tx_pos_begin],
);
let median_time =
Self::compute_median_time(&median_timestamps, begin + i, median_start);
let info = BlockInfo {
id: blockhashes[i].clone(),
height: Height::from(begin + i),
header,
timestamp: timestamps[i],
tx_count,
size,
weight,
median_time,
difficulty: *difficulties[i],
};
let total_input_amt = input_volumes[i];
let total_output_amt = output_volumes[i];
let extras = BlockExtras {
total_fees,
median_fee: fr_median[i],
fee_range: [
fr_min[i],
fr_pct10[i],
fr_pct25[i],
fr_median[i],
fr_pct75[i],
fr_pct90[i],
fr_max[i],
],
reward: subsidy + total_fees,
pool: BlockPool {
id: pool.unique_id(),
name: pool.name.to_string(),
slug: pool_slug,
},
avg_fee: Sats::from(if non_coinbase > 0 {
total_fees_u64 / non_coinbase
} else {
0
}),
avg_fee_rate: FeeRate::from((total_fees, VSize::from(vsize))),
coinbase_raw,
coinbase_address,
coinbase_addresses,
coinbase_signature,
coinbase_signature_ascii,
avg_tx_size: if tx_count > 0 {
size as f64 / tx_count as f64
} else {
0.0
},
total_inputs,
total_outputs,
total_output_amt,
median_fee_amt: fa_median[i],
fee_percentiles: [
fa_min[i],
fa_pct10[i],
fa_pct25[i],
fa_median[i],
fa_pct75[i],
fa_pct90[i],
fa_max[i],
],
segwit_total_txs: *segwit_txs[i],
segwit_total_size: *segwit_sizes[i],
segwit_total_weight: segwit_weights[i],
header: raw_header.to_lower_hex_string(),
utxo_set_change: total_outputs as i64 - total_inputs as i64,
utxo_set_size: *utxo_set_sizes[i],
total_input_amt,
virtual_size: vsize as f64,
};
blocks.push(BlockInfoV1 { info, extras });
}
Ok(blocks)
}
// === Helper methods ===
pub fn height_by_hash(&self, hash: &BlockHash) -> Result<Height> {
let indexer = self.indexer();
let prefix = BlockHashPrefix::from(hash);
indexer
.stores
.blockhash_prefix_to_height
@@ -119,31 +392,103 @@ impl Query {
.ok_or(Error::NotFound("Block not found".into()))
}
pub fn read_block_header(&self, height: Height) -> Result<bitcoin::block::Header> {
let position = self
.indexer()
.vecs
.blocks
.position
.collect_one(height)
.unwrap();
let raw = self.reader().read_raw_bytes(position, HEADER_SIZE)?;
bitcoin::block::Header::consensus_decode(&mut raw.as_slice())
.map_err(|_| Error::Internal("Failed to decode block header"))
}
fn max_height(&self) -> Height {
Height::from(self.indexer().vecs.blocks.blockhash.len().saturating_sub(1))
}
fn tx_count_at_height(&self, height: Height, max_height: Height) -> Result<u32> {
let indexer = self.indexer();
let computer = self.computer();
fn resolve_block_range(&self, start_height: Option<Height>, count: u32) -> (usize, usize) {
let max_height = self.height();
let start = start_height.unwrap_or(max_height).min(max_height);
let start_u32: u32 = start.into();
let count = count.min(start_u32 + 1) as usize;
let end = start_u32 as usize + 1;
let begin = end - count;
(begin, end)
}
let first_tx_index = indexer
.vecs
.transactions
.first_tx_index
.collect_one(height)
.unwrap();
let next_first_tx_index = if height < max_height {
indexer
.vecs
.transactions
.first_tx_index
.collect_one(height.incremented())
.unwrap()
} else {
TxIndex::from(computer.indexes.tx_index.identity.len())
fn decode_header(bytes: &[u8]) -> Result<BlockHeader> {
let raw = bitcoin::block::Header::consensus_decode(&mut &bytes[..])
.map_err(|_| Error::Internal("Failed to decode block header"))?;
Ok(BlockHeader::from(raw))
}
fn compute_median_time(
all_timestamps: &[Timestamp],
height: usize,
window_start: usize,
) -> Timestamp {
let rel_start = height.saturating_sub(10) - window_start;
let rel_end = height + 1 - window_start;
let mut sorted: Vec<usize> = all_timestamps[rel_start..rel_end]
.iter()
.map(|t| usize::from(*t))
.collect();
sorted.sort_unstable();
Timestamp::from(sorted[sorted.len() / 2])
}
fn parse_coinbase_tx(
reader: &brk_reader::Reader,
position: brk_types::BlkPosition,
) -> (String, Option<String>, Vec<String>, String, String) {
let raw_bytes = match reader.read_raw_bytes(position, 1000) {
Ok(bytes) => bytes,
Err(_) => return (String::new(), None, vec![], String::new(), String::new()),
};
Ok((next_first_tx_index.to_usize() - first_tx_index.to_usize()) as u32)
let tx = match bitcoin::Transaction::consensus_decode(&mut raw_bytes.as_slice()) {
Ok(tx) => tx,
Err(_) => return (String::new(), None, vec![], String::new(), String::new()),
};
let coinbase_raw = tx
.input
.first()
.map(|input| input.script_sig.as_bytes().to_lower_hex_string())
.unwrap_or_default();
let coinbase_signature_ascii = tx
.input
.first()
.map(|input| String::from_utf8_lossy(input.script_sig.as_bytes()).to_string())
.unwrap_or_default();
let coinbase_addresses: Vec<String> = tx
.output
.iter()
.filter_map(|output| {
bitcoin::Address::from_script(&output.script_pubkey, bitcoin::Network::Bitcoin)
.ok()
.map(|a| a.to_string())
})
.collect();
let coinbase_address = coinbase_addresses.first().cloned();
let coinbase_signature = tx
.output
.first()
.map(|output| output.script_pubkey.to_asm_string())
.unwrap_or_default();
(
coinbase_raw,
coinbase_address,
coinbase_addresses,
coinbase_signature,
coinbase_signature_ascii,
)
}
}

View File

@@ -12,7 +12,6 @@ impl Query {
fn block_raw_by_height(&self, height: Height) -> Result<Vec<u8>> {
let indexer = self.indexer();
let computer = self.computer();
let reader = self.reader();
let max_height = Height::from(indexer.vecs.blocks.blockhash.len().saturating_sub(1));
@@ -20,7 +19,7 @@ impl Query {
return Err(Error::OutOfRange("Block height out of range".into()));
}
let position = computer.positions.block.collect_one(height).unwrap();
let position = indexer.vecs.blocks.position.collect_one(height).unwrap();
let size = indexer.vecs.blocks.total.collect_one(height).unwrap();
reader.read_raw_bytes(position, *size as usize)

View File

@@ -23,7 +23,7 @@ impl Query {
// === Helper methods ===
fn block_txids_by_height(&self, height: Height) -> Result<Vec<Txid>> {
pub(crate) fn block_txids_by_height(&self, height: Height) -> Result<Vec<Txid>> {
let indexer = self.indexer();
let max_height = self.indexed_height();

View File

@@ -1,5 +1,8 @@
use brk_error::{Error, Result};
use brk_types::{MempoolBlock, MempoolInfo, RecommendedFees, Txid};
use brk_types::{
CpfpEntry, CpfpInfo, MempoolBlock, MempoolInfo, MempoolRecentTx, RecommendedFees, Txid,
TxidParam, TxidPrefix, Weight,
};
use crate::Query;
@@ -40,4 +43,67 @@ impl Query {
Ok(blocks)
}
pub fn mempool_recent(&self) -> Result<Vec<MempoolRecentTx>> {
let mempool = self.mempool().ok_or(Error::MempoolNotAvailable)?;
Ok(mempool.get_txs().recent().to_vec())
}
pub fn cpfp(&self, TxidParam { txid }: TxidParam) -> Result<CpfpInfo> {
let mempool = self.mempool().ok_or(Error::MempoolNotAvailable)?;
let entries = mempool.get_entries();
let prefix = TxidPrefix::from(&txid);
let entry = entries
.get(&prefix)
.ok_or(Error::NotFound("Transaction not in mempool".into()))?;
// Ancestors: walk up the depends chain
let mut ancestors = Vec::new();
let mut stack: Vec<TxidPrefix> = entry.depends.to_vec();
while let Some(p) = stack.pop() {
if let Some(anc) = entries.get(&p) {
ancestors.push(CpfpEntry {
txid: anc.txid.clone(),
weight: Weight::from(anc.vsize),
fee: anc.fee,
});
stack.extend(anc.depends.iter().cloned());
}
}
// Descendants: find entries that depend on this tx's prefix
let mut descendants = Vec::new();
for e in entries.entries().iter().flatten() {
if e.depends.contains(&prefix) {
descendants.push(CpfpEntry {
txid: e.txid.clone(),
weight: Weight::from(e.vsize),
fee: e.fee,
});
}
}
let effective_fee_per_vsize = entry.effective_fee_rate();
Ok(CpfpInfo {
ancestors,
descendants,
effective_fee_per_vsize,
})
}
pub fn transaction_times(&self, txids: &[Txid]) -> Result<Vec<u64>> {
let mempool = self.mempool().ok_or(Error::MempoolNotAvailable)?;
let entries = mempool.get_entries();
Ok(txids
.iter()
.map(|txid| {
entries
.get(&TxidPrefix::from(txid))
.map(|e| usize::from(e.first_seen) as u64)
.unwrap_or(0)
})
.collect())
}
}

View File

@@ -1,7 +1,7 @@
use brk_error::{Error, Result};
use brk_types::{
Height, PoolBlockCounts, PoolBlockShares, PoolDetail, PoolDetailInfo, PoolInfo, PoolSlug,
PoolStats, PoolsSummary, TimePeriod, pools,
BlockInfoV1, Height, PoolBlockCounts, PoolBlockShares, PoolDetail, PoolDetailInfo,
PoolHashrateEntry, PoolInfo, PoolSlug, PoolStats, PoolsSummary, TimePeriod, pools,
};
use vecdb::{AnyVec, ReadableVec, VecIndex};
@@ -177,4 +177,132 @@ impl Query {
reported_hashrate: None,
})
}
pub fn pool_blocks(
&self,
slug: PoolSlug,
start_height: Option<Height>,
) -> Result<Vec<BlockInfoV1>> {
let computer = self.computer();
let max_height = self.height().to_usize();
let start = start_height.map(|h| h.to_usize()).unwrap_or(max_height);
// BytesVec reader gives O(1) mmap reads — efficient for backward scan
let reader = computer.pools.pool.reader();
let end = start.min(reader.len().saturating_sub(1));
let mut heights = Vec::with_capacity(10);
for h in (0..=end).rev() {
if reader.get(h) == slug {
heights.push(h);
if heights.len() >= 10 {
break;
}
}
}
let mut blocks = Vec::with_capacity(heights.len());
for h in heights {
if let Ok(mut v) = self.blocks_v1_range(h, h + 1) {
blocks.append(&mut v);
}
}
Ok(blocks)
}
pub fn pool_hashrate(&self, slug: PoolSlug) -> Result<Vec<PoolHashrateEntry>> {
let pools_list = pools();
let pool = pools_list.get(slug);
let entries = self.compute_pool_hashrate_entries(slug, 0)?;
Ok(entries
.into_iter()
.map(|(ts, hr, share)| PoolHashrateEntry {
timestamp: ts,
avg_hashrate: hr,
share,
pool_name: pool.name.to_string(),
})
.collect())
}
pub fn pools_hashrate(
&self,
time_period: Option<TimePeriod>,
) -> Result<Vec<PoolHashrateEntry>> {
let current_height = self.height().to_usize();
let start = match time_period {
Some(tp) => current_height.saturating_sub(tp.block_count()),
None => 0,
};
let pools_list = pools();
let mut entries = Vec::new();
for pool in pools_list.iter() {
if let Ok(pool_entries) = self.compute_pool_hashrate_entries(pool.slug, start) {
for (ts, hr, share) in pool_entries {
if share > 0.0 {
entries.push(PoolHashrateEntry {
timestamp: ts,
avg_hashrate: hr,
share,
pool_name: pool.name.to_string(),
});
}
}
}
}
Ok(entries)
}
/// Compute (timestamp, hashrate, share) tuples for a pool from `start_height`.
fn compute_pool_hashrate_entries(
&self,
slug: PoolSlug,
start_height: usize,
) -> Result<Vec<(brk_types::Timestamp, u128, f64)>> {
let computer = self.computer();
let indexer = self.indexer();
let end = self.height().to_usize() + 1;
let start = start_height;
let dominance_bps = computer
.pools
.major
.get(&slug)
.map(|v| &v.base.dominance.bps.height)
.or_else(|| {
computer
.pools
.minor
.get(&slug)
.map(|v| &v.dominance.bps.height)
})
.ok_or_else(|| Error::NotFound("Pool not found".into()))?;
let total = end - start;
let step = (total / 200).max(1);
// Batch read everything for the range
let timestamps = indexer.vecs.blocks.timestamp.collect_range_at(start, end);
let bps_values = dominance_bps.collect_range_at(start, end);
let day1_values = computer.indexes.height.day1.collect_range_at(start, end);
let hashrate_vec = &computer.mining.hashrate.rate.base.day1;
// Pre-read all needed hashrates by collecting unique day1 values
let max_day = day1_values.iter().map(|d| d.to_usize()).max().unwrap_or(0);
let min_day = day1_values.iter().map(|d| d.to_usize()).min().unwrap_or(0);
let hashrates = hashrate_vec.collect_range_dyn(min_day, max_day + 1);
Ok((0..total)
.step_by(step)
.filter_map(|i| {
let bps = *bps_values[i];
let share = bps as f64 / 10000.0;
let day_idx = day1_values[i].to_usize() - min_day;
let network_hr = f64::from(*hashrates.get(day_idx)?.as_ref()?);
Some((timestamps[i], (network_hr * share) as u128, share))
})
.collect())
}
}

View File

@@ -1,5 +1,6 @@
use brk_error::Result;
use brk_types::Dollars;
use brk_types::{Dollars, ExchangeRates, HistoricalPrice, HistoricalPriceEntry, Timestamp};
use vecdb::{ReadableVec, VecIndex};
use crate::Query;
@@ -18,4 +19,40 @@ impl Query {
Ok(oracle.price_dollars())
}
pub fn historical_price(&self, timestamp: Option<Timestamp>) -> Result<HistoricalPrice> {
let indexer = self.indexer();
let computer = self.computer();
let max_height = self.height().to_usize();
let end = max_height + 1;
let timestamps = indexer.vecs.blocks.timestamp.collect();
let all_prices = computer.prices.spot.cents.height.collect();
let prices = if let Some(target_ts) = timestamp {
let target = usize::from(target_ts);
let h = timestamps
.binary_search_by_key(&target, |t| usize::from(*t))
.unwrap_or_else(|i| i.min(max_height));
vec![HistoricalPriceEntry {
time: usize::from(timestamps[h]) as u64,
usd: Dollars::from(all_prices[h]),
}]
} else {
let step = (max_height / 200).max(1);
(0..end)
.step_by(step)
.map(|h| HistoricalPriceEntry {
time: usize::from(timestamps[h]) as u64,
usd: Dollars::from(all_prices[h]),
})
.collect()
};
Ok(HistoricalPrice {
prices,
exchange_rates: ExchangeRates {},
})
}
}

View File

@@ -3,8 +3,8 @@ use std::io::Cursor;
use bitcoin::{consensus::Decodable, hex::DisplayHex};
use brk_error::{Error, Result};
use brk_types::{
OutputType, Sats, Transaction, TxIn, TxInIndex, TxIndex, TxOut, TxOutspend, TxStatus, Txid,
TxidParam, TxidPrefix, Vin, Vout, Weight,
Height, MerkleProof, OutputType, Sats, Transaction, TxIn, TxInIndex, TxIndex, TxOut,
TxOutspend, TxStatus, Txid, TxidParam, TxidPrefix, Vin, Vout, Weight,
};
use vecdb::{ReadableVec, VecIndex};
@@ -72,6 +72,20 @@ impl Query {
})
}
pub fn transaction_raw(&self, TxidParam { txid }: TxidParam) -> Result<Vec<u8>> {
let prefix = TxidPrefix::from(&txid);
let indexer = self.indexer();
let Ok(Some(tx_index)) = indexer
.stores
.txid_prefix_to_tx_index
.get(&prefix)
.map(|opt| opt.map(|cow| cow.into_owned()))
else {
return Err(Error::UnknownTxid);
};
self.transaction_raw_by_index(tx_index)
}
pub fn transaction_hex(&self, TxidParam { txid }: TxidParam) -> Result<String> {
// First check mempool for unconfirmed transactions
if let Some(mempool) = self.mempool()
@@ -192,7 +206,6 @@ impl Query {
pub fn transaction_by_index(&self, tx_index: TxIndex) -> Result<Transaction> {
let indexer = self.indexer();
let reader = self.reader();
let computer = self.computer();
// Get tx metadata using collect_one for PcoVec, read_once for BytesVec
let txid = indexer.vecs.transactions.txid.read_once(tx_index)?;
@@ -226,7 +239,12 @@ impl Query {
.first_txin_index
.collect_one(tx_index)
.unwrap();
let position = computer.positions.tx.collect_one(tx_index).unwrap();
let position = indexer
.vecs
.transactions
.position
.collect_one(tx_index)
.unwrap();
// Get block info for status
let block_hash = indexer.vecs.blocks.blockhash.read_once(height)?;
@@ -337,22 +355,15 @@ impl Query {
Ok(transaction)
}
fn transaction_hex_by_index(&self, tx_index: TxIndex) -> Result<String> {
fn transaction_raw_by_index(&self, tx_index: TxIndex) -> Result<Vec<u8>> {
let indexer = self.indexer();
let reader = self.reader();
let computer = self.computer();
let total_size = indexer.vecs.transactions.total_size.collect_one(tx_index).unwrap();
let position = indexer.vecs.transactions.position.collect_one(tx_index).unwrap();
self.reader().read_raw_bytes(position, *total_size as usize)
}
let total_size = indexer
.vecs
.transactions
.total_size
.collect_one(tx_index)
.unwrap();
let position = computer.positions.tx.collect_one(tx_index).unwrap();
let buffer = reader.read_raw_bytes(position, *total_size as usize)?;
Ok(buffer.to_lower_hex_string())
fn transaction_hex_by_index(&self, tx_index: TxIndex) -> Result<String> {
Ok(self.transaction_raw_by_index(tx_index)?.to_lower_hex_string())
}
fn outspend_details(&self, txin_index: TxInIndex) -> Result<TxOutspend> {
@@ -407,4 +418,93 @@ impl Query {
}),
})
}
fn resolve_tx(&self, txid: &Txid) -> Result<(TxIndex, Height)> {
let indexer = self.indexer();
let prefix = TxidPrefix::from(txid);
let tx_index: TxIndex = indexer
.stores
.txid_prefix_to_tx_index
.get(&prefix)?
.map(|cow| cow.into_owned())
.ok_or(Error::UnknownTxid)?;
let height: Height = indexer
.vecs
.transactions
.height
.collect_one(tx_index)
.unwrap();
Ok((tx_index, height))
}
pub fn broadcast_transaction(&self, hex: &str) -> Result<Txid> {
self.client().send_raw_transaction(hex)
}
pub fn merkleblock_proof(&self, txid_param: TxidParam) -> Result<String> {
let (_, height) = self.resolve_tx(&txid_param.txid)?;
let header = self.read_block_header(height)?;
let txids = self.block_txids_by_height(height)?;
let target: bitcoin::Txid = (&txid_param.txid).into();
let btxids: Vec<bitcoin::Txid> = txids.iter().map(bitcoin::Txid::from).collect();
let mb = bitcoin::MerkleBlock::from_header_txids_with_predicate(&header, &btxids, |t| {
*t == target
});
Ok(bitcoin::consensus::encode::serialize_hex(&mb))
}
pub fn merkle_proof(&self, txid_param: TxidParam) -> Result<MerkleProof> {
let (tx_index, height) = self.resolve_tx(&txid_param.txid)?;
let first_tx = self
.indexer()
.vecs
.transactions
.first_tx_index
.collect_one(height)
.ok_or(Error::NotFound("Block not found".into()))?;
let pos = tx_index.to_usize() - first_tx.to_usize();
let txids = self.block_txids_by_height(height)?;
Ok(MerkleProof {
block_height: height,
merkle: merkle_path(&txids, pos),
pos,
})
}
}
fn merkle_path(txids: &[Txid], pos: usize) -> Vec<String> {
use bitcoin::hashes::{Hash, sha256d};
// Txid bytes are in internal order (same layout as bitcoin::Txid)
let mut hashes: Vec<[u8; 32]> = txids
.iter()
.map(|t| bitcoin::Txid::from(t).to_byte_array())
.collect();
let mut proof = Vec::new();
let mut idx = pos;
while hashes.len() > 1 {
let sibling = if idx ^ 1 < hashes.len() { idx ^ 1 } else { idx };
// Display order: reverse bytes for hex output
let mut display = hashes[sibling];
display.reverse();
proof.push(bitcoin::hex::DisplayHex::to_lower_hex_string(&display));
hashes = hashes
.chunks(2)
.map(|pair| {
let right = pair.last().unwrap();
let mut combined = [0u8; 64];
combined[..32].copy_from_slice(&pair[0]);
combined[32..].copy_from_slice(right);
sha256d::Hash::hash(&combined).to_byte_array()
})
.collect();
idx /= 2;
}
proof
}

View File

@@ -21,7 +21,7 @@ fn main() -> Result<()> {
if let Some(block) = reader.read(Some(height), Some(height)).iter().next() {
println!(
"height={} hash={} txs={} coinbase=\"{}\" ({:?})",
"height={} hash={} txs={} coinbase=\"{:?}\" ({:?})",
block.height(),
block.hash(),
block.txdata.len(),

View File

@@ -14,7 +14,7 @@ use bitcoin::{block::Header, consensus::Decodable};
use blk_index_to_blk_path::*;
use brk_error::{Error, Result};
use brk_rpc::Client;
use brk_types::{BlkMetadata, BlkPosition, BlockHash, Height, ReadBlock};
use brk_types::{BlkPosition, BlockHash, Height, ReadBlock};
pub use crossbeam::channel::Receiver;
use crossbeam::channel::bounded;
use derive_more::Deref;
@@ -24,28 +24,17 @@ use tracing::{error, warn};
mod blk_index_to_blk_path;
mod decode;
mod scan;
mod xor_bytes;
mod xor_index;
use decode::*;
use scan::*;
pub use xor_bytes::*;
pub use xor_index::*;
const MAGIC_BYTES: [u8; 4] = [249, 190, 180, 217];
const BOUND_CAP: usize = 50;
fn find_magic(bytes: &[u8], xor_i: &mut XORIndex, xor_bytes: XORBytes) -> Option<usize> {
let mut window = [0u8; 4];
for (i, &b) in bytes.iter().enumerate() {
window.rotate_left(1);
window[3] = xor_i.byte(b, xor_bytes);
if window == MAGIC_BYTES {
return Some(i + 1);
}
}
None
}
///
/// Bitcoin BLK file reader
///
@@ -117,10 +106,46 @@ impl ReaderInner {
Ok(buffer)
}
/// Returns a receiver streaming `ReadBlock`s from `hash + 1` to the chain tip.
/// If `hash` is `None`, starts from genesis.
pub fn after(&self, hash: Option<BlockHash>) -> Result<Receiver<ReadBlock>> {
let start = if let Some(hash) = hash.as_ref() {
let info = self.client.get_block_header_info(hash)?;
Height::from(info.height + 1)
} else {
Height::ZERO
};
let end = self.client.get_last_height()?;
if end < start {
return Ok(bounded(0).1);
}
if *end - *start < 10 {
let mut blocks: Vec<_> = self.read_rev(Some(start), Some(end)).iter().collect();
blocks.reverse();
let (send, recv) = bounded(blocks.len());
for block in blocks {
let _ = send.send(block);
}
return Ok(recv);
}
Ok(self.read(Some(start), Some(end)))
}
/// Returns a crossbeam channel receiver that streams `ReadBlock`s in chain order.
///
/// Both `start` and `end` are inclusive. `None` means unbounded.
pub fn read(&self, start: Option<Height>, end: Option<Height>) -> Receiver<ReadBlock> {
if let (Some(s), Some(e)) = (start, end)
&& s > e
{
let (_, recv) = bounded(0);
return recv;
}
let client = self.client.clone();
let (send_bytes, recv_bytes) = bounded(BOUND_CAP / 2);
@@ -151,53 +176,25 @@ impl ReaderInner {
thread::spawn(move || {
let _ = blk_index_to_blk_path.range(first_blk_index..).try_for_each(
move |(blk_index, blk_path)| {
let mut xor_i = XORIndex::default();
let blk_index = *blk_index;
let Ok(mut blk_bytes_) = fs::read(blk_path) else {
let Ok(mut bytes) = fs::read(blk_path) else {
error!("Failed to read blk file: {}", blk_path.display());
return ControlFlow::Break(());
};
let blk_bytes = blk_bytes_.as_mut_slice();
let mut i = 0;
loop {
let Some(offset) = find_magic(&blk_bytes[i..], &mut xor_i, xor_bytes)
else {
break;
};
i += offset;
if i + 4 > blk_bytes.len() {
warn!("Truncated blk file {blk_index}: not enough bytes for block length at offset {i}");
break;
}
let len = u32::from_le_bytes(
xor_i
.bytes(&mut blk_bytes[i..(i + 4)], xor_bytes)
.try_into()
.unwrap(),
) as usize;
i += 4;
if i + len > blk_bytes.len() {
warn!("Truncated blk file {blk_index}: block at offset {} claims {len} bytes but only {} remain", i - 4, blk_bytes.len() - i);
break;
}
let position = BlkPosition::new(blk_index, i as u32);
let metadata = BlkMetadata::new(position, len as u32);
let block_bytes = (blk_bytes[i..(i + len)]).to_vec();
if send_bytes.send((metadata, block_bytes, xor_i)).is_err() {
return ControlFlow::Break(());
}
i += len;
xor_i.add_assign(len);
let result = scan_bytes(
&mut bytes,
*blk_index,
0,
xor_bytes,
|metadata, block_bytes, xor_i| {
if send_bytes.send((metadata, block_bytes, xor_i)).is_err() {
return ControlFlow::Break(());
}
ControlFlow::Continue(())
},
);
if result.interrupted {
return ControlFlow::Break(());
}
ControlFlow::Continue(())
},
);
@@ -288,6 +285,83 @@ impl ReaderInner {
recv_ordered
}
/// Streams `ReadBlock`s in reverse order (newest first) by scanning
/// `.blk` files from the tail. Efficient for reading recent blocks.
/// Both `start` and `end` are inclusive. `None` means unbounded.
pub fn read_rev(&self, start: Option<Height>, end: Option<Height>) -> Receiver<ReadBlock> {
const CHUNK: usize = 5 * 1024 * 1024;
if let (Some(s), Some(e)) = (start, end)
&& s > e
{
return bounded(0).1;
}
let client = self.client.clone();
let xor_bytes = self.xor_bytes;
let paths = BlkIndexToBlkPath::scan(&self.blocks_dir);
*self.blk_index_to_blk_path.write() = paths.clone();
let (send, recv) = bounded(BOUND_CAP);
thread::spawn(move || {
let mut head = Vec::new();
for (&blk_index, path) in paths.iter().rev() {
let file_len = fs::metadata(path).map(|m| m.len() as usize).unwrap_or(0);
if file_len == 0 {
continue;
}
let Ok(mut file) = File::open(path) else {
return;
};
let mut read_end = file_len;
while read_end > 0 {
let read_start = read_end.saturating_sub(CHUNK);
let chunk_len = read_end - read_start;
read_end = read_start;
let _ = file.seek(SeekFrom::Start(read_start as u64));
let mut buf = vec![0u8; chunk_len + head.len()];
if file.read_exact(&mut buf[..chunk_len]).is_err() {
return;
}
buf[chunk_len..].copy_from_slice(&head);
head.clear();
let mut blocks = Vec::new();
let result = scan_bytes(
&mut buf,
blk_index,
read_start,
xor_bytes,
|metadata, bytes, xor_i| {
if let Ok(Some(block)) = decode_block(
bytes, metadata, &client, xor_i, xor_bytes, start, end, 0, 0,
) {
blocks.push(block);
}
ControlFlow::Continue(())
},
);
for block in blocks.into_iter().rev() {
let done = start.is_some_and(|s| block.height() <= s);
if send.send(block).is_err() || done {
return;
}
}
if read_start > 0 {
head = buf[..result.first_magic.unwrap_or(buf.len())].to_vec();
}
}
}
});
recv
}
fn find_start_blk_index(
&self,
target_start: Option<Height>,
@@ -298,18 +372,6 @@ impl ReaderInner {
return Ok(0);
};
// If start is a very recent block we only look back X blk file before the last
if let Ok(height) = self.client.get_last_height()
&& (*height).saturating_sub(*target_start) <= 3
{
return Ok(blk_index_to_blk_path
.keys()
.rev()
.nth(2)
.copied()
.unwrap_or_default());
}
let blk_indices: Vec<u16> = blk_index_to_blk_path.keys().copied().collect();
if blk_indices.is_empty() {

View File

@@ -0,0 +1,73 @@
use std::ops::ControlFlow;
use brk_types::{BlkMetadata, BlkPosition};
use crate::{XORBytes, XORIndex};
const MAGIC_BYTES: [u8; 4] = [249, 190, 180, 217];
pub fn find_magic(bytes: &[u8], xor_i: &mut XORIndex, xor_bytes: XORBytes) -> Option<usize> {
let mut window = [0u8; 4];
for (i, &b) in bytes.iter().enumerate() {
window.rotate_left(1);
window[3] = xor_i.byte(b, xor_bytes);
if window == MAGIC_BYTES {
return Some(i + 1);
}
}
None
}
pub struct ScanResult {
pub first_magic: Option<usize>,
pub interrupted: bool,
}
/// Scans `buf` for blocks. `file_offset` is the absolute position of `buf[0]` in the file.
/// Calls `on_block` for each complete block found.
pub fn scan_bytes(
buf: &mut [u8],
blk_index: u16,
file_offset: usize,
xor_bytes: XORBytes,
mut on_block: impl FnMut(BlkMetadata, Vec<u8>, XORIndex) -> ControlFlow<()>,
) -> ScanResult {
let mut xor_i = XORIndex::default();
xor_i.add_assign(file_offset);
let mut first_magic = None;
let mut i = 0;
while let Some(off) = find_magic(&buf[i..], &mut xor_i, xor_bytes) {
let before = i;
i += off;
first_magic.get_or_insert(before + off.saturating_sub(4));
if i + 4 > buf.len() {
break;
}
let len = u32::from_le_bytes(
xor_i
.bytes(&mut buf[i..i + 4], xor_bytes)
.try_into()
.unwrap(),
) as usize;
i += 4;
if i + len > buf.len() {
break;
}
let position = BlkPosition::new(blk_index, (file_offset + i) as u32);
let metadata = BlkMetadata::new(position, len as u32);
if on_block(metadata, buf[i..i + len].to_vec(), xor_i).is_break() {
return ScanResult {
first_magic,
interrupted: true,
};
}
i += len;
xor_i.add_assign(len);
}
ScanResult {
first_magic,
interrupted: false,
}
}

View File

@@ -240,4 +240,8 @@ impl ClientInner {
) -> Result<String> {
Ok(self.call_with_retry(|c| c.get_raw_transaction_hex(txid, block_hash))?)
}
pub fn send_raw_transaction(&self, hex: &str) -> Result<bitcoin::Txid> {
Ok(self.call_once(|c| c.send_raw_transaction(hex))?)
}
}

View File

@@ -294,6 +294,14 @@ impl ClientInner {
})?;
Ok(r)
}
pub fn send_raw_transaction(&self, hex: &str) -> Result<bitcoin::Txid> {
let hex = hex.to_string();
Ok(self.call_with_retry(|c| {
let args = [serde_json::Value::String(hex.clone())];
c.call("sendrawtransaction", &args)
})?)
}
}
// Local deserialization structs for raw RPC responses

View File

@@ -232,6 +232,10 @@ impl Client {
.get_raw_transaction_hex(txid.into(), block_hash.map(|h| h.into()))
}
pub fn send_raw_transaction(&self, hex: &str) -> Result<Txid> {
self.0.send_raw_transaction(hex).map(Txid::from)
}
/// Checks if a block is in the main chain (has positive confirmations)
pub fn is_in_main_chain(&self, hash: &BlockHash) -> Result<bool> {
let block_info = self.get_block_info(hash)?;

View File

@@ -5,8 +5,8 @@ use axum::{
};
use brk_query::BLOCK_TXS_PAGE_SIZE;
use brk_types::{
BlockHashParam, BlockHashStartIndex, BlockHashTxIndex, BlockInfo, BlockStatus, BlockTimestamp,
HeightParam, TimestampParam, Transaction, Txid,
BlockHash, BlockHashParam, BlockHashStartIndex, BlockHashTxIndex, BlockInfo, BlockInfoV1,
BlockStatus, BlockTimestamp, Height, HeightParam, Hex, TimestampParam, Transaction, Txid,
};
use crate::{CacheStrategy, extended::TransformResponseExtended};
@@ -61,6 +61,46 @@ impl BlockRoutes for ApiRouter<AppState> {
},
),
)
.api_route(
"/api/v1/blocks",
get_with(
async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| {
state
.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.blocks_v1(None))
.await
},
|op| {
op.id("get_blocks_v1")
.blocks_tag()
.summary("Recent blocks with extras")
.description("Retrieve the last 10 blocks with extended data including pool identification and fee statistics.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-blocks-v1)*")
.ok_response::<Vec<BlockInfoV1>>()
.not_modified()
.server_error()
},
),
)
.api_route(
"/api/v1/blocks/{height}",
get_with(
async |uri: Uri,
headers: HeaderMap,
Path(path): Path<HeightParam>,
State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.blocks_v1(Some(path.height))).await
},
|op| {
op.id("get_blocks_v1_from_height")
.blocks_tag()
.summary("Blocks from height with extras")
.description("Retrieve up to 10 blocks with extended data going backwards from the given height.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-blocks-v1)*")
.ok_response::<Vec<BlockInfoV1>>()
.not_modified()
.bad_request()
.server_error()
},
),
)
.api_route(
"/api/block-height/{height}",
get_with(
@@ -68,16 +108,16 @@ impl BlockRoutes for ApiRouter<AppState> {
headers: HeaderMap,
Path(path): Path<HeightParam>,
State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.block_by_height(path.height)).await
state.cached_text(&headers, CacheStrategy::Height, &uri, move |q| q.block_hash_by_height(path.height).map(|h| h.to_string())).await
},
|op| {
op.id("get_block_by_height")
.blocks_tag()
.summary("Block by height")
.summary("Block hash by height")
.description(
"Retrieve block information by block height. Returns block metadata including hash, timestamp, difficulty, size, weight, and transaction count.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-height)*",
"Retrieve the block hash at a given height. Returns the hash as plain text.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-height)*",
)
.ok_response::<BlockInfo>()
.ok_response::<BlockHash>()
.not_modified()
.bad_request()
.not_found()
@@ -230,6 +270,79 @@ impl BlockRoutes for ApiRouter<AppState> {
},
),
)
.api_route(
"/api/blocks/tip/height",
get_with(
async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| {
state.cached_text(&headers, CacheStrategy::Height, &uri, |q| Ok(q.height().to_string())).await
},
|op| {
op.id("get_block_tip_height")
.blocks_tag()
.summary("Block tip height")
.description("Returns the height of the last block.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-tip-height)*")
.ok_response::<Height>()
.not_modified()
.server_error()
},
),
)
.api_route(
"/api/blocks/tip/hash",
get_with(
async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| {
state.cached_text(&headers, CacheStrategy::Height, &uri, |q| q.block_hash_by_height(q.height()).map(|h| h.to_string())).await
},
|op| {
op.id("get_block_tip_hash")
.blocks_tag()
.summary("Block tip hash")
.description("Returns the hash of the last block.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-tip-hash)*")
.ok_response::<BlockHash>()
.not_modified()
.server_error()
},
),
)
.api_route(
"/api/block/{hash}/header",
get_with(
async |uri: Uri, headers: HeaderMap, Path(path): Path<BlockHashParam>, State(state): State<AppState>| {
state.cached_text(&headers, CacheStrategy::Height, &uri, move |q| q.block_header_hex(&path.hash)).await
},
|op| {
op.id("get_block_header")
.blocks_tag()
.summary("Block header")
.description("Returns the hex-encoded block header.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-header)*")
.ok_response::<Hex>()
.not_modified()
.not_found()
.server_error()
},
),
)
.api_route(
"/api/v1/block/{hash}",
get_with(
async |uri: Uri, headers: HeaderMap, Path(path): Path<BlockHashParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| {
let height = q.height_by_hash(&path.hash)?;
q.block_by_height_v1(height)
}).await
},
|op| {
op.id("get_block_v1")
.blocks_tag()
.summary("Block (v1)")
.description("Returns block details with extras by hash.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-block-v1)*")
.ok_response::<BlockInfoV1>()
.not_modified()
.not_found()
.server_error()
},
),
)
.api_route(
"/api/v1/mining/blocks/timestamp/{timestamp}",
get_with(

View File

@@ -1,13 +1,14 @@
use aide::axum::{ApiRouter, routing::get_with};
use axum::{
extract::State,
extract::{Query, State},
http::{HeaderMap, Uri},
response::Redirect,
routing::get,
};
use brk_types::{Dollars, MempoolBlock, MempoolInfo, RecommendedFees, Txid};
use brk_types::{
Dollars, HistoricalPrice, MempoolBlock, MempoolInfo, MempoolRecentTx, OptionalTimestampParam,
RecommendedFees, Txid,
};
use crate::extended::TransformResponseExtended;
use crate::{CacheStrategy, extended::TransformResponseExtended};
use super::AppState;
@@ -18,9 +19,8 @@ pub trait MempoolRoutes {
impl MempoolRoutes for ApiRouter<AppState> {
fn add_mempool_routes(self) -> Self {
self
.route("/api/mempool", get(Redirect::temporary("/api#tag/mempool")))
.api_route(
"/api/mempool/info",
"/api/mempool",
get_with(
async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| {
state.cached_json(&headers, state.mempool_cache(), &uri, |q| q.mempool_info()).await
@@ -51,6 +51,22 @@ impl MempoolRoutes for ApiRouter<AppState> {
},
),
)
.api_route(
"/api/mempool/recent",
get_with(
async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| {
state.cached_json(&headers, state.mempool_cache(), &uri, |q| q.mempool_recent()).await
},
|op| {
op.id("get_mempool_recent")
.mempool_tag()
.summary("Recent mempool transactions")
.description("Get the last 10 transactions to enter the mempool.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-mempool-recent)*")
.ok_response::<Vec<MempoolRecentTx>>()
.server_error()
},
),
)
.api_route(
"/api/mempool/price",
get_with(
@@ -87,6 +103,22 @@ impl MempoolRoutes for ApiRouter<AppState> {
},
),
)
.api_route(
"/api/v1/fees/precise",
get_with(
async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| {
state.cached_json(&headers, state.mempool_cache(), &uri, |q| q.recommended_fees()).await
},
|op| {
op.id("get_precise_fees")
.mempool_tag()
.summary("Precise recommended fees")
.description("Get recommended fee rates with up to 3 decimal places, including sub-sat feerates.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-recommended-fees-precise)*")
.ok_response::<RecommendedFees>()
.server_error()
},
),
)
.api_route(
"/api/v1/fees/mempool-blocks",
get_with(
@@ -103,5 +135,22 @@ impl MempoolRoutes for ApiRouter<AppState> {
},
),
)
.api_route(
"/api/v1/historical-price",
get_with(
async |uri: Uri, headers: HeaderMap, Query(params): Query<OptionalTimestampParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.historical_price(params.timestamp)).await
},
|op| {
op.id("get_historical_price")
.mempool_tag()
.summary("Historical price")
.description("Get historical BTC/USD price. Optionally specify a UNIX timestamp to get the price at that time.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-historical-price)*")
.ok_response::<HistoricalPrice>()
.not_modified()
.server_error()
},
),
)
}
}

View File

@@ -6,8 +6,9 @@ use axum::{
routing::get,
};
use brk_types::{
BlockCountParam, BlockFeesEntry, BlockRewardsEntry, BlockSizesWeights, DifficultyAdjustment,
DifficultyAdjustmentEntry, HashrateSummary, PoolDetail, PoolInfo, PoolSlugParam, PoolsSummary,
BlockCountParam, BlockFeesEntry, BlockInfoV1, BlockRewardsEntry, BlockSizesWeights,
DifficultyAdjustment, DifficultyAdjustmentEntry, HashrateSummary, PoolDetail,
PoolHashrateEntry, PoolInfo, PoolSlugAndHeightParam, PoolSlugParam, PoolsSummary,
RewardStats, TimePeriodParam,
};
@@ -95,6 +96,94 @@ impl MiningRoutes for ApiRouter<AppState> {
},
),
)
.api_route(
"/api/v1/mining/pool/{slug}/blocks",
get_with(
async |uri: Uri, headers: HeaderMap, Path(path): Path<PoolSlugParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.pool_blocks(path.slug, None)).await
},
|op| {
op.id("get_pool_blocks")
.mining_tag()
.summary("Mining pool blocks")
.description("Get the 10 most recent blocks mined by a specific pool.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-blocks)*")
.ok_response::<Vec<BlockInfoV1>>()
.not_modified()
.not_found()
.server_error()
},
),
)
.api_route(
"/api/v1/mining/pool/{slug}/blocks/{height}",
get_with(
async |uri: Uri, headers: HeaderMap, Path(PoolSlugAndHeightParam {slug, height}): Path<PoolSlugAndHeightParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.pool_blocks(slug, Some(height))).await
},
|op| {
op.id("get_pool_blocks_from")
.mining_tag()
.summary("Mining pool blocks from height")
.description("Get 10 blocks mined by a specific pool before (and including) the given height.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-blocks)*")
.ok_response::<Vec<BlockInfoV1>>()
.not_modified()
.not_found()
.server_error()
},
),
)
.api_route(
"/api/v1/mining/pool/{slug}/hashrate",
get_with(
async |uri: Uri, headers: HeaderMap, Path(path): Path<PoolSlugParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.pool_hashrate(path.slug)).await
},
|op| {
op.id("get_pool_hashrate")
.mining_tag()
.summary("Mining pool hashrate")
.description("Get hashrate history for a specific mining pool.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-hashrate)*")
.ok_response::<Vec<PoolHashrateEntry>>()
.not_modified()
.not_found()
.server_error()
},
),
)
.api_route(
"/api/v1/mining/hashrate/pools",
get_with(
async |uri: Uri, headers: HeaderMap, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, |q| q.pools_hashrate(None)).await
},
|op| {
op.id("get_pools_hashrate")
.mining_tag()
.summary("All pools hashrate (all time)")
.description("Get hashrate data for all mining pools.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-hashrates)*")
.ok_response::<Vec<PoolHashrateEntry>>()
.not_modified()
.server_error()
},
),
)
.api_route(
"/api/v1/mining/hashrate/pools/{time_period}",
get_with(
async |uri: Uri, headers: HeaderMap, Path(path): Path<TimePeriodParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.pools_hashrate(Some(path.time_period))).await
},
|op| {
op.id("get_pools_hashrate_by_period")
.mining_tag()
.summary("All pools hashrate")
.description("Get hashrate data for all mining pools for a time period. Valid periods: 1m, 3m, 6m, 1y, 2y, 3y\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-mining-pool-hashrates)*")
.ok_response::<Vec<PoolHashrateEntry>>()
.not_modified()
.server_error()
},
),
)
.api_route(
"/api/v1/mining/hashrate",
get_with(

View File

@@ -1,11 +1,16 @@
use aide::axum::{ApiRouter, routing::get_with};
use aide::axum::{
ApiRouter,
routing::{get_with, post_with},
};
use axum::{
extract::{Path, State},
http::{HeaderMap, Uri},
response::Redirect,
routing::get,
};
use brk_types::{Hex, Transaction, TxOutspend, TxStatus, TxidParam, TxidVout};
use axum::extract::Query;
use brk_types::{
CpfpInfo, Hex, MerkleProof, Transaction, TxOutspend, TxStatus, Txid, TxidParam, TxidVout,
TxidsParam,
};
use crate::{CacheStrategy, extended::TransformResponseExtended};
@@ -18,8 +23,6 @@ pub trait TxRoutes {
impl TxRoutes for ApiRouter<AppState> {
fn add_tx_routes(self) -> Self {
self
.route("/api/tx", get(Redirect::temporary("/api/transactions")))
.route("/api/transactions", get(Redirect::temporary("/api#tag/transactions")))
.api_route(
"/api/tx/{txid}",
get_with(
@@ -146,5 +149,92 @@ impl TxRoutes for ApiRouter<AppState> {
.server_error(),
),
)
.api_route(
"/api/tx",
post_with(
async |State(state): State<AppState>, body: String| {
let hex = body.trim().to_string();
state.sync(|q| q.broadcast_transaction(&hex))
.map(|txid| txid.to_string())
.map_err(crate::Error::from)
},
|op| {
op.id("post_tx")
.transactions_tag()
.summary("Broadcast transaction")
.description("Broadcast a raw transaction to the network. The transaction should be provided as hex in the request body. The txid will be returned on success.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#post-transaction)*")
.ok_response::<Txid>()
.bad_request()
.server_error()
},
),
)
.api_route(
"/api/tx/{txid}/raw",
get_with(
async |uri: Uri, headers: HeaderMap, Path(txid): Path<TxidParam>, State(state): State<AppState>| {
state.cached_bytes(&headers, CacheStrategy::Height, &uri, move |q| q.transaction_raw(txid)).await
},
|op| op
.id("get_tx_raw")
.transactions_tag()
.summary("Transaction raw")
.description("Returns a transaction as binary data.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-transaction-raw)*")
.ok_response::<Vec<u8>>()
.not_modified()
.bad_request()
.not_found()
.server_error(),
),
)
.api_route(
"/api/tx/{txid}/merkle-proof",
get_with(
async |uri: Uri, headers: HeaderMap, Path(txid): Path<TxidParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::Height, &uri, move |q| q.merkle_proof(txid)).await
},
|op| op
.id("get_tx_merkle_proof")
.transactions_tag()
.summary("Transaction merkle proof")
.description("Get the merkle inclusion proof for a transaction.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-transaction-merkle-proof)*")
.ok_response::<MerkleProof>()
.not_modified()
.bad_request()
.not_found()
.server_error(),
),
)
.api_route(
"/api/v1/cpfp/{txid}",
get_with(
async |uri: Uri, headers: HeaderMap, Path(txid): Path<TxidParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::MempoolHash(0), &uri, move |q| q.cpfp(txid)).await
},
|op| op
.id("get_cpfp")
.transactions_tag()
.summary("CPFP info")
.description("Returns ancestors and descendants for a CPFP transaction.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-children-pay-for-parent)*")
.ok_response::<CpfpInfo>()
.not_found()
.server_error(),
),
)
.api_route(
"/api/v1/transaction-times",
get_with(
async |uri: Uri, headers: HeaderMap, Query(params): Query<TxidsParam>, State(state): State<AppState>| {
state.cached_json(&headers, CacheStrategy::MempoolHash(0), &uri, move |q| q.transaction_times(&params.txids)).await
},
|op| op
.id("get_transaction_times")
.transactions_tag()
.summary("Transaction first-seen times")
.description("Returns timestamps when transactions were first seen in the mempool. Returns 0 for mined or unknown transactions.\n\n*[Mempool.space docs](https://mempool.space/docs/api/rest#get-transaction-times)*")
.ok_response::<Vec<u64>>()
.server_error(),
),
)
}
}

View File

@@ -1,3 +1,4 @@
use aide::OperationOutput;
use axum::{
http::{StatusCode, header},
response::{IntoResponse, Response},
@@ -157,6 +158,10 @@ impl From<BrkError> for Error {
}
}
impl OperationOutput for Error {
type Inner = ();
}
impl IntoResponse for Error {
fn into_response(self) -> Response {
let body = build_error_body(self.status, self.code, self.message);

View File

@@ -2,15 +2,10 @@
pub enum Kind {
Recent,
Random,
Sequential,
Vec,
}
impl Kind {
pub fn is_sequential(&self) -> bool {
matches!(*self, Self::Sequential)
}
pub fn is_recent(&self) -> bool {
matches!(*self, Self::Recent)
}

View File

@@ -137,13 +137,6 @@ where
FilterPolicyEntry::Bloom(BloomConstructionPolicy::BitsPerKey(7.0)),
]));
}
Kind::Sequential => {
options = options
.filter_block_partitioning_policy(PartitioningPolicy::all(true))
.index_block_partitioning_policy(PartitioningPolicy::all(true))
.filter_block_pinning_policy(PinningPolicy::all(false))
.index_block_pinning_policy(PinningPolicy::all(false));
}
Kind::Vec => {
options = options
.max_memtable_size(8 * 1024 * 1024)

View File

@@ -1,11 +1,9 @@
use std::borrow::Cow;
use bitcoin::hashes::{Hash, HashEngine};
use derive_more::Deref;
use crate::BlkMetadata;
use super::{BlockHash, Height};
use super::{BlockHash, CoinbaseTag, Height};
/// Raw block bytes and per-tx offsets for fast txid hashing.
/// Present when block was parsed from blk*.dat files, absent for RPC blocks.
@@ -110,15 +108,15 @@ impl Block {
bitcoin::Txid::from_engine(engine)
}
pub fn coinbase_tag(&self) -> Cow<'_, str> {
String::from_utf8_lossy(
self.txdata
.first()
.and_then(|tx| tx.input.first())
.unwrap()
.script_sig
.as_bytes(),
)
pub fn coinbase_tag(&self) -> CoinbaseTag {
let bytes = self
.txdata
.first()
.and_then(|tx| tx.input.first())
.unwrap()
.script_sig
.as_bytes();
CoinbaseTag::from(bytes)
}
}

View File

@@ -0,0 +1,109 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::{BlockPool, FeeRate, Sats, Weight};
/// Extended block data matching mempool.space /api/v1/blocks extras
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct BlockExtras {
/// Total fees in satoshis
#[serde(rename = "totalFees")]
pub total_fees: Sats,
/// Median fee rate in sat/vB
#[serde(rename = "medianFee")]
pub median_fee: FeeRate,
/// Fee rate range: [min, 10%, 25%, 50%, 75%, 90%, max]
#[serde(rename = "feeRange")]
pub fee_range: [FeeRate; 7],
/// Total block reward (subsidy + fees) in satoshis
pub reward: Sats,
/// Mining pool that mined this block
pub pool: BlockPool,
/// Average fee per transaction in satoshis
#[serde(rename = "avgFee")]
pub avg_fee: Sats,
/// Average fee rate in sat/vB
#[serde(rename = "avgFeeRate")]
pub avg_fee_rate: FeeRate,
/// Raw coinbase transaction scriptsig as hex
#[serde(rename = "coinbaseRaw")]
pub coinbase_raw: String,
/// Primary coinbase output address
#[serde(rename = "coinbaseAddress")]
pub coinbase_address: Option<String>,
/// All coinbase output addresses
#[serde(rename = "coinbaseAddresses")]
pub coinbase_addresses: Vec<String>,
/// Coinbase output script in ASM format
#[serde(rename = "coinbaseSignature")]
pub coinbase_signature: String,
/// Coinbase scriptsig decoded as ASCII
#[serde(rename = "coinbaseSignatureAscii")]
pub coinbase_signature_ascii: String,
/// Average transaction size in bytes
#[serde(rename = "avgTxSize")]
pub avg_tx_size: f64,
/// Total number of inputs (excluding coinbase)
#[serde(rename = "totalInputs")]
pub total_inputs: u64,
/// Total number of outputs
#[serde(rename = "totalOutputs")]
pub total_outputs: u64,
/// Total output amount in satoshis
#[serde(rename = "totalOutputAmt")]
pub total_output_amt: Sats,
/// Median fee amount in satoshis
#[serde(rename = "medianFeeAmt")]
pub median_fee_amt: Sats,
/// Fee amount percentiles in satoshis: [min, 10%, 25%, 50%, 75%, 90%, max]
#[serde(rename = "feePercentiles")]
pub fee_percentiles: [Sats; 7],
/// Number of segwit transactions
#[serde(rename = "segwitTotalTxs")]
pub segwit_total_txs: u32,
/// Total size of segwit transactions in bytes
#[serde(rename = "segwitTotalSize")]
pub segwit_total_size: u64,
/// Total weight of segwit transactions
#[serde(rename = "segwitTotalWeight")]
pub segwit_total_weight: Weight,
/// Raw 80-byte block header as hex
pub header: String,
/// UTXO set change (outputs created minus inputs spent)
#[serde(rename = "utxoSetChange")]
pub utxo_set_change: i64,
/// Total UTXO set size at this height
#[serde(rename = "utxoSetSize")]
pub utxo_set_size: u64,
/// Total input amount in satoshis
#[serde(rename = "totalInputAmt")]
pub total_input_amt: Sats,
/// Virtual size in vbytes
#[serde(rename = "virtualSize")]
pub virtual_size: f64,
}

View File

@@ -0,0 +1,43 @@
use bitcoin::block::Header;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::BlockHash;
/// Block header matching mempool.space's format.
/// Contains the same fields as bitcoin::block::Header
/// but serialized for the JSON API.
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct BlockHeader {
/// Block version, used for soft fork signaling
pub version: u32,
/// Previous block hash
#[serde(rename = "previousblockhash")]
pub previous_block_hash: BlockHash,
/// Merkle root of the transaction tree
pub merkle_root: String,
/// Block timestamp as claimed by the miner (Unix time)
pub time: u32,
/// Compact target (bits)
pub bits: u32,
/// Nonce used to produce a valid block hash
pub nonce: u32,
}
impl From<Header> for BlockHeader {
fn from(h: Header) -> Self {
Self {
version: h.version.to_consensus() as u32,
previous_block_hash: BlockHash::from(h.prev_blockhash),
merkle_root: h.merkle_root.to_string(),
time: h.time,
bits: h.bits.to_consensus(),
nonce: h.nonce,
}
}
}

View File

@@ -1,9 +1,9 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::{BlockHash, Height, PoolSlug, Timestamp, Weight};
use crate::{BlockHash, BlockHeader, Height, Timestamp, Weight};
/// Block information returned by the API
/// Block information matching mempool.space /api/block/{hash}
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct BlockInfo {
/// Block hash
@@ -12,6 +12,13 @@ pub struct BlockInfo {
/// Block height
pub height: Height,
/// Block header fields
#[serde(flatten)]
pub header: BlockHeader,
/// Block timestamp (Unix time)
pub timestamp: Timestamp,
/// Number of transactions in the block
pub tx_count: u32,
@@ -21,45 +28,10 @@ pub struct BlockInfo {
/// Block weight in weight units
pub weight: Weight,
/// Block timestamp (Unix time)
pub timestamp: Timestamp,
/// Median time of the last 11 blocks
#[serde(rename = "mediantime")]
pub median_time: Timestamp,
/// Block difficulty as a floating point number
/// Block difficulty
pub difficulty: f64,
/// Extra block data (pool info, fee stats)
#[serde(skip_serializing_if = "Option::is_none")]
pub extras: Option<BlockExtras>,
}
/// Extra block data including pool identification and fee statistics
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct BlockExtras {
/// Mining pool that mined this block
pub pool: BlockPool,
/// Total fees in satoshis
pub total_fees: u64,
/// Average fee per transaction in satoshis
pub avg_fee: u64,
/// Average fee rate in sat/vB
pub avg_fee_rate: u64,
/// Total block reward (subsidy + fees) in satoshis
pub reward: u64,
}
/// Mining pool identification for a block
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct BlockPool {
/// Unique pool identifier
pub id: u8,
/// Pool name
pub name: String,
/// URL-friendly pool identifier
pub slug: PoolSlug,
}

View File

@@ -0,0 +1,15 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::{BlockExtras, BlockInfo};
/// Block information with extras, matching mempool.space /api/v1/blocks
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct BlockInfoV1 {
/// Base block information
#[serde(flatten)]
pub info: BlockInfo,
/// Extended block data
pub extras: BlockExtras,
}

View File

@@ -0,0 +1,17 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::PoolSlug;
/// Mining pool identification for a block
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct BlockPool {
/// Unique pool identifier
pub id: u8,
/// Pool name
pub name: String,
/// URL-friendly pool identifier
pub slug: PoolSlug,
}

View File

@@ -0,0 +1,86 @@
use derive_more::Deref;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use vecdb::{Bytes, Formattable};
/// Coinbase scriptSig tag for pool identification.
///
/// Stored as a fixed 101-byte record (1 byte length + 100 bytes data).
/// Uses `[u8; 101]` internally so that `size_of::<CoinbaseTag>()` matches
/// the serialized `Bytes::Array` size (vecdb requires this for alignment).
///
/// Bitcoin consensus limits coinbase scriptSig to 2-100 bytes.
#[derive(Debug, Deref, Clone, JsonSchema)]
pub struct CoinbaseTag(#[schemars(with = "String")] [u8; 101]);
impl Bytes for CoinbaseTag {
type Array = [u8; 101];
const IS_NATIVE_LAYOUT: bool = true;
#[inline]
fn to_bytes(&self) -> Self::Array {
self.0
}
#[inline]
fn from_bytes(bytes: &[u8]) -> vecdb::Result<Self> {
let arr: [u8; 101] = bytes.try_into().map_err(|_| vecdb::Error::WrongLength {
received: bytes.len(),
expected: 101,
})?;
Ok(Self(arr))
}
}
impl CoinbaseTag {
/// Returns the tag as a UTF-8 string (lossy).
#[inline]
pub fn as_str(&self) -> std::borrow::Cow<'_, str> {
let len = (self.0[0] as usize).min(100);
String::from_utf8_lossy(&self.0[1..1 + len])
}
/// Returns the tag bytes (without length prefix).
#[inline]
pub fn tag_bytes(&self) -> &[u8] {
let len = (self.0[0] as usize).min(100);
&self.0[1..1 + len]
}
}
impl From<&[u8]> for CoinbaseTag {
#[inline]
fn from(bytes: &[u8]) -> Self {
let truncated = &bytes[..bytes.len().min(100)];
let len = truncated.len() as u8;
let mut out = [0u8; 101];
out[0] = len;
out[1..1 + len as usize].copy_from_slice(truncated);
Self(out)
}
}
impl Serialize for CoinbaseTag {
fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
serializer.serialize_str(&self.as_str())
}
}
impl<'de> Deserialize<'de> for CoinbaseTag {
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
let s = String::deserialize(deserializer)?;
Ok(Self::from(s.as_bytes()))
}
}
impl Formattable for CoinbaseTag {
fn write_to(&self, buf: &mut Vec<u8>) {
buf.extend_from_slice(self.tag_bytes());
}
fn fmt_json(&self, buf: &mut Vec<u8>) {
buf.push(b'"');
buf.extend_from_slice(self.tag_bytes());
buf.push(b'"');
}
}

View File

@@ -0,0 +1,21 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::{FeeRate, Sats, Txid, Weight};
/// CPFP (Child Pays For Parent) information for a transaction
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct CpfpInfo {
pub ancestors: Vec<CpfpEntry>,
pub descendants: Vec<CpfpEntry>,
#[serde(rename = "effectiveFeePerVsize")]
pub effective_fee_per_vsize: FeeRate,
}
/// A transaction in a CPFP relationship
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct CpfpEntry {
pub txid: Txid,
pub weight: Weight,
pub fee: Sats,
}

View File

@@ -0,0 +1,24 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::Dollars;
/// Historical price response
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct HistoricalPrice {
pub prices: Vec<HistoricalPriceEntry>,
#[serde(rename = "exchangeRates")]
pub exchange_rates: ExchangeRates,
}
/// A single price data point
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct HistoricalPriceEntry {
pub time: u64,
#[serde(rename = "USD")]
pub usd: Dollars,
}
/// Exchange rates (USD base, on-chain only — no fiat pairs available)
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
pub struct ExchangeRates {}

View File

@@ -24,9 +24,13 @@ mod blk_metadata;
mod blk_position;
mod block;
mod block_count_param;
mod block_extras;
mod block_fee_rates_entry;
mod block_fees_entry;
mod block_header;
mod block_info;
mod block_info_v1;
mod block_pool;
mod block_rewards_entry;
mod block_size_entry;
mod block_sizes_weights;
@@ -44,6 +48,8 @@ mod cents_compact;
mod cents_sats;
mod cents_signed;
mod cents_squared_sats;
mod coinbase_tag;
mod cpfp;
mod cost_basis_bucket;
mod cost_basis_distribution;
mod cost_basis_params;
@@ -76,6 +82,7 @@ mod hashrate_entry;
mod hashrate_summary;
mod health;
mod height;
mod historical_price;
mod height_param;
mod hex;
mod hour1;
@@ -89,6 +96,8 @@ mod limit_param;
mod mempool_block;
mod mempool_entry_info;
mod mempool_info;
mod mempool_recent_tx;
mod merkle_proof;
mod minute10;
mod minute30;
mod month1;
@@ -124,6 +133,7 @@ mod percentile;
mod pool;
mod pool_detail;
mod pool_info;
mod pool_hashrate_entry;
mod pool_slug;
mod pool_slug_param;
mod pool_stats;
@@ -177,6 +187,7 @@ mod tx_with_hex;
mod txid;
mod txid_param;
mod txid_prefix;
mod txids_param;
mod txid_vout;
mod txin;
mod txin_index;
@@ -219,9 +230,13 @@ pub use blk_metadata::*;
pub use blk_position::*;
pub use block::*;
pub use block_count_param::*;
pub use block_extras::*;
pub use block_fee_rates_entry::*;
pub use block_fees_entry::*;
pub use block_header::*;
pub use block_info::*;
pub use block_info_v1::*;
pub use block_pool::*;
pub use block_rewards_entry::*;
pub use block_size_entry::*;
pub use block_sizes_weights::*;
@@ -239,6 +254,8 @@ pub use cents_compact::*;
pub use cents_sats::*;
pub use cents_signed::*;
pub use cents_squared_sats::*;
pub use coinbase_tag::*;
pub use cpfp::*;
pub use cost_basis_bucket::*;
pub use cost_basis_distribution::*;
pub use cost_basis_params::*;
@@ -271,6 +288,7 @@ pub use hashrate_entry::*;
pub use hashrate_summary::*;
pub use health::*;
pub use height::*;
pub use historical_price::*;
pub use height_param::*;
pub use hex::*;
pub use hour1::*;
@@ -284,6 +302,8 @@ pub use limit_param::*;
pub use mempool_block::*;
pub use mempool_entry_info::*;
pub use mempool_info::*;
pub use mempool_recent_tx::*;
pub use merkle_proof::*;
pub use minute10::*;
pub use minute30::*;
pub use month1::*;
@@ -319,6 +339,7 @@ pub use percentile::*;
pub use pool::*;
pub use pool_detail::*;
pub use pool_info::*;
pub use pool_hashrate_entry::*;
pub use pool_slug::*;
pub use pool_slug_param::*;
pub use pool_stats::*;
@@ -372,6 +393,7 @@ pub use tx_with_hex::*;
pub use txid::*;
pub use txid_param::*;
pub use txid_prefix::*;
pub use txids_param::*;
pub use txid_vout::*;
pub use txin::*;
pub use txin_index::*;

View File

@@ -1,9 +1,11 @@
use std::collections::BTreeMap;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::{Sats, Transaction, VSize};
use crate::{FeeRate, Sats, Transaction, VSize};
/// Mempool statistics
/// Mempool statistics with incrementally maintained fee histogram.
#[derive(Debug, Default, Clone, Serialize, Deserialize, JsonSchema)]
pub struct MempoolInfo {
/// Number of transactions in the mempool
@@ -12,28 +14,50 @@ pub struct MempoolInfo {
pub vsize: VSize,
/// Total fees of all transactions in the mempool (satoshis)
pub total_fee: Sats,
/// Fee histogram: `[[fee_rate, vsize], ...]` sorted by descending fee rate
#[serde(
serialize_with = "serialize_fee_histogram",
deserialize_with = "deserialize_fee_histogram"
)]
pub fee_histogram: BTreeMap<FeeRate, VSize>,
}
impl MempoolInfo {
/// Increment stats for a newly added transaction.
///
/// Fee must come from `MempoolEntryInfo` (Bitcoin Core) rather than `tx.fee`
/// because `tx.fee` may be 0 for chained mempool transactions where prevouts
/// cannot be looked up via `gettxout`.
#[inline]
pub fn add(&mut self, tx: &Transaction, fee: Sats) {
self.count += 1;
self.vsize += tx.vsize();
self.total_fee += fee;
let rate = FeeRate::from((fee, tx.vsize()));
*self.fee_histogram.entry(rate).or_insert(VSize::from(0u64)) += tx.vsize();
}
/// Decrement stats for a removed transaction.
///
/// Fee must match the fee used when the transaction was added.
#[inline]
pub fn remove(&mut self, tx: &Transaction, fee: Sats) {
self.count -= 1;
self.vsize -= tx.vsize();
self.total_fee -= fee;
let rate = FeeRate::from((fee, tx.vsize()));
if let Some(v) = self.fee_histogram.get_mut(&rate) {
*v -= tx.vsize();
if u64::from(*v) == 0 {
self.fee_histogram.remove(&rate);
}
}
}
}
fn serialize_fee_histogram<S: Serializer>(
map: &BTreeMap<FeeRate, VSize>,
serializer: S,
) -> Result<S::Ok, S::Error> {
let vec: Vec<(FeeRate, VSize)> = map.iter().rev().map(|(&r, &v)| (r, v)).collect();
vec.serialize(serializer)
}
fn deserialize_fee_histogram<'de, D: Deserializer<'de>>(
deserializer: D,
) -> Result<BTreeMap<FeeRate, VSize>, D::Error> {
let vec: Vec<(FeeRate, VSize)> = Vec::deserialize(deserializer)?;
Ok(vec.into_iter().collect())
}

View File

@@ -0,0 +1,24 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::{Sats, Transaction, Txid, VSize};
/// Simplified mempool transaction for the recent transactions endpoint
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct MempoolRecentTx {
pub txid: Txid,
pub fee: Sats,
pub vsize: VSize,
pub value: Sats,
}
impl From<(&Txid, &Transaction)> for MempoolRecentTx {
fn from((txid, tx): (&Txid, &Transaction)) -> Self {
Self {
txid: txid.clone(),
fee: tx.fee,
vsize: tx.vsize(),
value: tx.output.iter().map(|o| o.value).sum(),
}
}
}

View File

@@ -0,0 +1,12 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::Height;
/// Merkle inclusion proof for a transaction
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
pub struct MerkleProof {
pub block_height: Height,
pub merkle: Vec<String>,
pub pos: usize,
}

View File

@@ -1,5 +1,3 @@
use std::ops::{Add, Div};
/// Standard percentile values used throughout BRK.
pub const PERCENTILES: [u8; 19] = [
5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95,
@@ -8,31 +6,13 @@ pub const PERCENTILES: [u8; 19] = [
/// Length of the PERCENTILES array.
pub const PERCENTILES_LEN: usize = PERCENTILES.len();
/// Get a percentile value from a sorted slice.
/// Get a percentile value from a sorted slice using nearest-rank method.
///
/// # Panics
/// Panics if the slice is empty.
pub fn get_percentile<T>(sorted: &[T], percentile: f64) -> T
where
T: Clone + Div<usize, Output = T> + Add<T, Output = T>,
{
pub fn get_percentile<T: Clone>(sorted: &[T], percentile: f64) -> T {
let len = sorted.len();
if len == 0 {
panic!("Cannot get percentile from empty slice");
} else if len == 1 {
sorted[0].clone()
} else {
let index = (len - 1) as f64 * percentile;
let fract = index.fract();
if fract != 0.0 {
let left = sorted.get(index as usize).unwrap().clone();
let right = sorted.get(index.ceil() as usize).unwrap().clone();
(left + right) / 2
} else {
sorted.get(index as usize).unwrap().clone()
}
}
assert!(len > 0, "Cannot get percentile from empty slice");
let index = ((len - 1) as f64 * percentile).round() as usize;
sorted[index].clone()
}

View File

@@ -0,0 +1,19 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use super::Timestamp;
/// A single pool hashrate data point.
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct PoolHashrateEntry {
/// Unix timestamp.
pub timestamp: Timestamp,
/// Average hashrate (H/s).
#[serde(rename = "avgHashrate")]
pub avg_hashrate: u128,
/// Pool's share of total network hashrate.
pub share: f64,
/// Pool name.
#[serde(rename = "poolName")]
pub pool_name: String,
}

View File

@@ -1,9 +1,15 @@
use schemars::JsonSchema;
use serde::Deserialize;
use super::PoolSlug;
use super::{Height, PoolSlug};
#[derive(Deserialize, JsonSchema)]
pub struct PoolSlugParam {
pub slug: PoolSlug,
}
#[derive(Deserialize, JsonSchema)]
pub struct PoolSlugAndHeightParam {
pub slug: PoolSlug,
pub height: Height,
}

View File

@@ -7,3 +7,8 @@ use crate::Timestamp;
pub struct TimestampParam {
pub timestamp: Timestamp,
}
#[derive(Deserialize, JsonSchema)]
pub struct OptionalTimestampParam {
pub timestamp: Option<Timestamp>,
}

View File

@@ -0,0 +1,10 @@
use schemars::JsonSchema;
use serde::Deserialize;
use crate::Txid;
#[derive(Deserialize, JsonSchema)]
pub struct TxidsParam {
#[serde(rename = "txId[]")]
pub txids: Vec<Txid>,
}