This commit is contained in:
nym21
2025-12-22 16:22:09 +01:00
parent 02d635d48b
commit d30344ee3c
8 changed files with 26 additions and 478 deletions

16
Cargo.lock generated
View File

@@ -1856,18 +1856,18 @@ dependencies = [
[[package]]
name = "derive_more"
version = "2.1.0"
version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10b768e943bed7bf2cab53df09f4bc34bfd217cdb57d971e769874c9a6710618"
checksum = "d751e9e49156b02b44f9c1815bcb94b984cdcc4396ecc32521c739452808b134"
dependencies = [
"derive_more-impl",
]
[[package]]
name = "derive_more-impl"
version = "2.1.0"
version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d286bfdaf75e988b4a78e013ecd79c581e06399ab53fbacd2d916c2f904f30b"
checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb"
dependencies = [
"convert_case",
"proc-macro2",
@@ -3328,9 +3328,9 @@ dependencies = [
[[package]]
name = "oxc-browserslist"
version = "2.1.4"
version = "2.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9bd39c45e1d6bd2abfbd4b89cbcaba34bd315cd3cee23aad623fd075acc1ea01"
checksum = "6b48a7bf4591453d69792e735a8025b2c2c33ab75e02754023284ad17cfbbe04"
dependencies = [
"bincode",
"flate2",
@@ -4636,9 +4636,9 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.145"
version = "1.0.146"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c"
checksum = "217ca874ae0207aac254aa02c957ded05585a90892cc8d87f9e5fa49669dadd8"
dependencies = [
"indexmap",
"itoa",

View File

@@ -77,7 +77,7 @@ schemars = "1.1.0"
serde = "1.0.228"
serde_bytes = "0.11.19"
serde_derive = "1.0.228"
serde_json = { version = "1.0.145", features = ["float_roundtrip"] }
serde_json = { version = "1.0.146", features = ["float_roundtrip"] }
smallvec = "1.15.1"
tokio = { version = "1.48.0", features = ["rt-multi-thread"] }
vecdb = { version = "0.4.6", features = ["derive", "serde_json", "pco"] }

View File

@@ -4,9 +4,8 @@ use brk_computer::Computer;
use brk_error::Result;
use brk_fetcher::Fetcher;
use brk_indexer::Indexer;
use brk_types::TxIndex;
use mimalloc::MiMalloc;
use vecdb::{Exit, GenericStoredVec};
use vecdb::{AnyStoredVec, Exit};
#[global_allocator]
static GLOBAL: MiMalloc = MiMalloc;
@@ -21,7 +20,7 @@ pub fn main() -> Result<()> {
}
fn run() -> Result<()> {
brk_logger::init(Some(Path::new(".log")))?;
brk_logger::init(None)?;
let outputs_dir = Path::new(&env::var("HOME").unwrap()).join(".brk");
// let outputs_dir = Path::new("../../_outputs");
@@ -35,62 +34,8 @@ fn run() -> Result<()> {
let computer = Computer::forced_import(&outputs_dir, &indexer, Some(fetcher))?;
let txindex = TxIndex::new(134217893);
dbg!(
indexer
.vecs
.tx
.txindex_to_txid
.read_once(txindex)
.unwrap()
.to_string()
);
let first_txinindex = indexer
.vecs
.tx
.txindex_to_first_txinindex
.read_once(txindex)?;
dbg!(first_txinindex);
let first_txoutindex = indexer
.vecs
.tx
.txindex_to_first_txoutindex
.read_once(txindex)?;
dbg!(first_txoutindex);
let input_count = *computer.indexes.txindex_to_input_count.read_once(txindex)?;
dbg!(input_count);
let output_count = *computer
.indexes
.txindex_to_output_count
.read_once(txindex)?;
dbg!(output_count);
let _ = dbg!(computer.chain.txinindex_to_value.read_once(first_txinindex));
let _ = dbg!(
computer
.chain
.txinindex_to_value
.read_once(first_txinindex + 1)
);
let _ = dbg!(
indexer
.vecs
.txout
.txoutindex_to_value
.read_once(first_txoutindex)
);
let _ = dbg!(
indexer
.vecs
.txout
.txoutindex_to_value
.read_once(first_txoutindex + 1)
);
let _ = dbg!(computer.chain.txindex_to_input_value.read_once(txindex));
let _ = dbg!(computer.chain.txindex_to_input_value.read_once(txindex));
let _ = dbg!(computer.chain.txindex_to_output_value.read_once(txindex));
// dbg!(computer.indexes.txindex_to_txindex.ge(txindex));
let _a = dbg!(computer.chain.txinindex_to_value.region().meta());
let _b = dbg!(indexer.vecs.txout.txoutindex_to_value.region().meta());
Ok(())
}

View File

@@ -1,141 +0,0 @@
use std::{env, path::Path};
use brk_indexer::Indexer;
use brk_types::{Height, P2PKHAddressIndex, P2SHAddressIndex, TxOutIndex, TypeIndex};
use mimalloc::MiMalloc;
use vecdb::GenericStoredVec;
#[global_allocator]
static GLOBAL: MiMalloc = MiMalloc;
fn main() -> color_eyre::Result<()> {
color_eyre::install()?;
let outputs_dir = Path::new(&env::var("HOME").unwrap()).join(".brk");
let indexer = Indexer::forced_import(&outputs_dir)?;
let reader_outputtype = indexer.vecs.txout.txoutindex_to_outputtype.create_reader();
let reader_typeindex = indexer.vecs.txout.txoutindex_to_typeindex.create_reader();
let reader_txindex = indexer.vecs.txout.txoutindex_to_txindex.create_reader();
let reader_txid = indexer.vecs.tx.txindex_to_txid.create_reader();
let reader_height_to_first_txoutindex = indexer
.vecs
.txout
.height_to_first_txoutindex
.create_reader();
let reader_p2pkh = indexer
.vecs
.address
.p2pkhaddressindex_to_p2pkhbytes
.create_reader();
let reader_p2sh = indexer
.vecs
.address
.p2shaddressindex_to_p2shbytes
.create_reader();
// Check what's stored at typeindex 254909199 in both P2PKH and P2SH vecs
let typeindex = TypeIndex::from(254909199_usize);
let p2pkh_bytes = indexer
.vecs
.address
.p2pkhaddressindex_to_p2pkhbytes
.read(P2PKHAddressIndex::from(typeindex), &reader_p2pkh);
println!("P2PKH at typeindex 254909199: {:?}", p2pkh_bytes);
let p2sh_bytes = indexer
.vecs
.address
.p2shaddressindex_to_p2shbytes
.read(P2SHAddressIndex::from(typeindex), &reader_p2sh);
println!("P2SH at typeindex 254909199: {:?}", p2sh_bytes);
// Check first P2SH index at height 476152
let reader_first_p2sh = indexer
.vecs
.address
.height_to_first_p2shaddressindex
.create_reader();
let reader_first_p2pkh = indexer
.vecs
.address
.height_to_first_p2pkhaddressindex
.create_reader();
let first_p2sh_at_476152 = indexer
.vecs
.address
.height_to_first_p2shaddressindex
.read(Height::from(476152_usize), &reader_first_p2sh);
let first_p2pkh_at_476152 = indexer
.vecs
.address
.height_to_first_p2pkhaddressindex
.read(Height::from(476152_usize), &reader_first_p2pkh);
println!(
"First P2SH index at height 476152: {:?}",
first_p2sh_at_476152
);
println!(
"First P2PKH index at height 476152: {:?}",
first_p2pkh_at_476152
);
// Check the problematic txoutindexes found during debugging
for txoutindex_usize in [653399433_usize, 653399443_usize] {
let txoutindex = TxOutIndex::from(txoutindex_usize);
let outputtype = indexer
.vecs
.txout
.txoutindex_to_outputtype
.read(txoutindex, &reader_outputtype)
.unwrap();
let typeindex = indexer
.vecs
.txout
.txoutindex_to_typeindex
.read(txoutindex, &reader_typeindex)
.unwrap();
let txindex = indexer
.vecs
.txout
.txoutindex_to_txindex
.read(txoutindex, &reader_txindex)
.unwrap();
let txid = indexer
.vecs
.tx
.txindex_to_txid
.read(txindex, &reader_txid)
.unwrap();
// Find height by binary search
let mut height = Height::from(0_usize);
for h in 0..900_000_usize {
let first_txoutindex = indexer
.vecs
.txout
.height_to_first_txoutindex
.read(Height::from(h), &reader_height_to_first_txoutindex);
if let Ok(first) = first_txoutindex {
if usize::from(first) > txoutindex_usize {
break;
}
height = Height::from(h);
}
}
println!(
"txoutindex={}, outputtype={:?}, typeindex={}, txindex={}, txid={}, height={}",
txoutindex_usize,
outputtype,
usize::from(typeindex),
usize::from(txindex),
txid,
usize::from(height)
);
}
Ok(())
}

View File

@@ -1,121 +0,0 @@
use std::{collections::BTreeMap, path::Path, thread};
use brk_computer::Computer;
use brk_error::Result;
use brk_fetcher::Fetcher;
use brk_indexer::Indexer;
use brk_types::{Address, AddressBytes, OutputType, TxOutIndex, pools};
use vecdb::{Exit, IterableVec, TypedVecIterator};
fn main() -> Result<()> {
brk_logger::init(Some(Path::new(".log")))?;
let exit = Exit::new();
exit.set_ctrlc_handler();
thread::Builder::new()
.stack_size(256 * 1024 * 1024)
.spawn(move || -> Result<()> {
let outputs_dir = Path::new(&std::env::var("HOME").unwrap()).join(".brk");
let indexer = Indexer::forced_import(&outputs_dir)?;
let fetcher = Fetcher::import(true, None)?;
let computer = Computer::forced_import(&outputs_dir, &indexer, Some(fetcher))?;
let pools = pools();
let mut res: BTreeMap<&'static str, usize> = BTreeMap::default();
let vecs = indexer.vecs;
let stores = indexer.stores;
let mut height_to_first_txindex_iter = vecs.tx.height_to_first_txindex.iter()?;
let mut txindex_to_first_txoutindex_iter = vecs.tx.txindex_to_first_txoutindex.iter()?;
let mut txindex_to_output_count_iter = computer.indexes.txindex_to_output_count.iter();
let mut txoutindex_to_outputtype_iter = vecs.txout.txoutindex_to_outputtype.iter()?;
let mut txoutindex_to_typeindex_iter = vecs.txout.txoutindex_to_typeindex.iter()?;
let mut p2pk65addressindex_to_p2pk65bytes_iter =
vecs.address.p2pk65addressindex_to_p2pk65bytes.iter()?;
let mut p2pk33addressindex_to_p2pk33bytes_iter =
vecs.address.p2pk33addressindex_to_p2pk33bytes.iter()?;
let mut p2pkhaddressindex_to_p2pkhbytes_iter =
vecs.address.p2pkhaddressindex_to_p2pkhbytes.iter()?;
let mut p2shaddressindex_to_p2shbytes_iter =
vecs.address.p2shaddressindex_to_p2shbytes.iter()?;
let mut p2wpkhaddressindex_to_p2wpkhbytes_iter =
vecs.address.p2wpkhaddressindex_to_p2wpkhbytes.iter()?;
let mut p2wshaddressindex_to_p2wshbytes_iter =
vecs.address.p2wshaddressindex_to_p2wshbytes.iter()?;
let mut p2traddressindex_to_p2trbytes_iter =
vecs.address.p2traddressindex_to_p2trbytes.iter()?;
let mut p2aaddressindex_to_p2abytes_iter = vecs.address.p2aaddressindex_to_p2abytes.iter()?;
let unknown = pools.get_unknown();
stores
.height_to_coinbase_tag
.iter()
.for_each(|(height, coinbase_tag)| {
let txindex = height_to_first_txindex_iter.get_unwrap(height);
let txoutindex = txindex_to_first_txoutindex_iter.get_unwrap(txindex);
let outputcount = txindex_to_output_count_iter.get_unwrap(txindex);
let pool = (*txoutindex..(*txoutindex + *outputcount))
.map(TxOutIndex::from)
.find_map(|txoutindex| {
let outputtype = txoutindex_to_outputtype_iter.get_unwrap(txoutindex);
let typeindex = txoutindex_to_typeindex_iter.get_unwrap(txoutindex);
match outputtype {
OutputType::P2PK65 => Some(AddressBytes::from(
p2pk65addressindex_to_p2pk65bytes_iter
.get_unwrap(typeindex.into()),
)),
OutputType::P2PK33 => Some(AddressBytes::from(
p2pk33addressindex_to_p2pk33bytes_iter
.get_unwrap(typeindex.into()),
)),
OutputType::P2PKH => Some(AddressBytes::from(
p2pkhaddressindex_to_p2pkhbytes_iter
.get_unwrap(typeindex.into()),
)),
OutputType::P2SH => Some(AddressBytes::from(
p2shaddressindex_to_p2shbytes_iter.get_unwrap(typeindex.into()),
)),
OutputType::P2WPKH => Some(AddressBytes::from(
p2wpkhaddressindex_to_p2wpkhbytes_iter
.get_unwrap(typeindex.into()),
)),
OutputType::P2WSH => Some(AddressBytes::from(
p2wshaddressindex_to_p2wshbytes_iter
.get_unwrap(typeindex.into()),
)),
OutputType::P2TR => Some(AddressBytes::from(
p2traddressindex_to_p2trbytes_iter.get_unwrap(typeindex.into()),
)),
OutputType::P2A => Some(AddressBytes::from(
p2aaddressindex_to_p2abytes_iter.get_unwrap(typeindex.into()),
)),
_ => None,
}
.map(|bytes| Address::try_from(&bytes).unwrap())
.and_then(|address| pools.find_from_address(&address))
})
.or_else(|| pools.find_from_coinbase_tag(&coinbase_tag))
.unwrap_or(unknown);
*res.entry(pool.name).or_default() += 1;
});
let mut v = res.into_iter().map(|(k, v)| (v, k)).collect::<Vec<_>>();
v.sort_unstable();
println!("{:#?}", v);
println!("{:#?}", v.len());
Ok(())
})?
.join()
.unwrap()
}

View File

@@ -6,13 +6,11 @@ use crate::types::PoolIndex;
/// Entry in the priority heap for transaction selection.
///
/// Stores a snapshot of the score at insertion time.
/// The generation field detects stale entries after ancestor updates.
#[derive(Clone, Copy)]
pub struct HeapEntry {
pub pool_index: PoolIndex,
ancestor_fee: Sats,
ancestor_vsize: VSize,
pub generation: u32,
}
impl HeapEntry {
@@ -21,23 +19,18 @@ impl HeapEntry {
pool_index: node.pool_index,
ancestor_fee: node.ancestor_fee,
ancestor_vsize: node.ancestor_vsize,
generation: node.generation,
}
}
/// Returns true if this entry is outdated.
#[inline]
pub fn is_stale(&self, node: &TxNode) -> bool {
self.generation != node.generation
}
/// Compare fee rates: self > other?
#[inline]
fn has_higher_fee_rate_than(&self, other: &Self) -> bool {
// Cross multiply to avoid division:
// fee_a/vsize_a > fee_b/vsize_b ⟺ fee_a * vsize_b > fee_b * vsize_a
let self_score = u64::from(self.ancestor_fee) as u128 * u64::from(other.ancestor_vsize) as u128;
let other_score = u64::from(other.ancestor_fee) as u128 * u64::from(self.ancestor_vsize) as u128;
let self_score =
u64::from(self.ancestor_fee) as u128 * u64::from(other.ancestor_vsize) as u128;
let other_score =
u64::from(other.ancestor_fee) as u128 * u64::from(self.ancestor_vsize) as u128;
self_score > other_score
}
}

View File

@@ -4,10 +4,10 @@ use brk_types::FeeRate;
use rustc_hash::FxHashSet;
use smallvec::SmallVec;
use super::BLOCK_VSIZE;
use super::graph::Graph;
use super::heap_entry::HeapEntry;
use super::package::Package;
use super::BLOCK_VSIZE;
use crate::types::PoolIndex;
/// Select transactions from the graph and group into CPFP packages.
@@ -25,7 +25,7 @@ pub fn select_packages(graph: &mut Graph, num_blocks: usize) -> Vec<Package> {
let node = &graph[entry.pool_index];
// Skip if already selected or entry is stale
if node.selected || entry.is_stale(node) {
if node.selected {
continue;
}
@@ -80,13 +80,18 @@ fn select_with_ancestors(graph: &mut Graph, pool_idx: PoolIndex) -> SmallVec<[Po
}
/// Update descendants' ancestor scores after selecting a tx.
fn update_descendants(graph: &mut Graph, selected_idx: PoolIndex, heap: &mut BinaryHeap<HeapEntry>) {
fn update_descendants(
graph: &mut Graph,
selected_idx: PoolIndex,
heap: &mut BinaryHeap<HeapEntry>,
) {
let selected_fee = graph[selected_idx].fee;
let selected_vsize = graph[selected_idx].vsize;
// Track visited to avoid double-updates in diamond patterns
let mut visited: FxHashSet<PoolIndex> = FxHashSet::default();
let mut stack: SmallVec<[PoolIndex; 16]> = graph[selected_idx].children.iter().copied().collect();
let mut stack: SmallVec<[PoolIndex; 16]> =
graph[selected_idx].children.iter().copied().collect();
while let Some(child_idx) = stack.pop() {
if !visited.insert(child_idx) {

View File

@@ -1,133 +0,0 @@
# Mempool.space API Compatibility - Implementation Status
Plan file: `/Users/k/.claude/plans/smooth-weaving-crayon.md`
## Completed Endpoints
| Endpoint | Path | Notes |
|----------|------|-------|
| GET Block | `/api/block/{hash}` | |
| GET Block Height | `/api/block-height/{height}` | Returns plain text hash |
| GET Block Status | `/api/block/{hash}/status` | |
| GET Block Txids | `/api/block/{hash}/txids` | |
| GET Blocks | `/api/blocks[/:start_height]` | Last 10 blocks |
| GET Transaction | `/api/tx/{txid}` | |
| GET Tx Status | `/api/tx/{txid}/status` | |
| GET Tx Hex | `/api/tx/{txid}/hex` | Returns plain text |
| GET Address | `/api/address/{address}` | |
| GET Address Txs | `/api/address/{address}/txs` | |
| GET Address UTXOs | `/api/address/{address}/utxo` | |
| GET Mempool Info | `/api/mempool/info` | |
| GET Mempool Txids | `/api/mempool/txids` | |
| GET Recommended Fees | `/api/v1/fees/recommended` | Basic impl, needs optimization |
## Remaining Endpoints
### Mempool/Fees (4)
| # | Endpoint | Path | Dependencies | Priority |
|---|----------|------|--------------|----------|
| 1 | Optimize projected blocks | - | CPFP/ancestor scores | HIGH |
| 2 | GET Mempool Blocks | `/api/v1/fees/mempool-blocks` | #1 | HIGH |
| 3 | GET Mempool Recent | `/api/mempool/recent` | | MED |
| 4 | GET RBF Replacements | `/api/v1/replacements` | RBF tracking in brk_monitor | LOW |
### Blocks (4)
| # | Endpoint | Path | Dependencies | Priority |
|---|----------|------|--------------|----------|
| 5 | GET Block Txs | `/api/block/{hash}/txs[/:start_index]` | | MED |
| 6 | GET Block Txid at Index | `/api/block/{hash}/txid/{index}` | | LOW |
| 7 | GET Block Raw | `/api/block/{hash}/raw` | brk_reader | LOW |
| 8 | GET Block by Timestamp | `/api/v1/mining/blocks/timestamp/{timestamp}` | Binary search | LOW |
### Addresses (3)
| # | Endpoint | Path | Dependencies | Priority |
|---|----------|------|--------------|----------|
| 9 | GET Address Txs Chain | `/api/address/{address}/txs/chain[/:after_txid]` | | MED |
| 10 | GET Address Txs Mempool | `/api/address/{address}/txs/mempool` | brk_monitor | MED |
| 11 | GET Validate Address | `/api/v1/validate-address/{address}` | | LOW |
### Transactions (4)
| # | Endpoint | Path | Dependencies | Priority |
|---|----------|------|--------------|----------|
| 12 | GET Tx Outspend | `/api/tx/{txid}/outspend/{vout}` | #27 txoutindex_to_txinindex | HIGH |
| 13 | GET Tx Outspends | `/api/tx/{txid}/outspends` | #27 | HIGH |
| 14 | GET Tx Merkle Proof | `/api/tx/{txid}/merkle-proof` | | LOW |
| 15 | POST Tx Broadcast | `/api/tx` | brk_rpc | MED |
### General (1)
| # | Endpoint | Path | Dependencies | Priority |
|---|----------|------|--------------|----------|
| 16 | GET Difficulty Adjustment | `/api/v1/difficulty-adjustment` | | MED |
### Mining (9)
| # | Endpoint | Path | Dependencies | Priority |
|---|----------|------|--------------|----------|
| 17 | GET Mining Pools | `/api/v1/mining/pools[/:timePeriod]` | #28 pool identification | LOW |
| 18 | GET Mining Pool | `/api/v1/mining/pool/{slug}` | #28 | LOW |
| 19 | GET Hashrate | `/api/v1/mining/hashrate[/:timePeriod]` | | MED |
| 20 | GET Difficulty Adjustments | `/api/v1/mining/difficulty-adjustments[/:interval]` | | LOW |
| 21 | GET Reward Stats | `/api/v1/mining/reward-stats/{blockCount}` | | LOW |
| 22 | GET Block Fees | `/api/v1/mining/blocks/fees/{timePeriod}` | | LOW |
| 23 | GET Block Rewards | `/api/v1/mining/blocks/rewards/{timePeriod}` | | LOW |
| 24 | GET Block Fee Rates | `/api/v1/mining/blocks/fee-rates/{timePeriod}` | | LOW |
| 25 | GET Block Sizes/Weights | `/api/v1/mining/blocks/sizes-weights/{timePeriod}` | | LOW |
### Infrastructure (3)
| # | Task | Location | Priority |
|---|------|----------|----------|
| 26 | Index txindex_to_sigop_cost | brk_indexer | MED |
| 27 | Add txoutindex_to_txinindex mapping | brk_computer/stateful | HIGH |
| 28 | Pool identification from coinbase | brk_computer | LOW |
## Priority Order
### Phase 1: Core Functionality (HIGH)
1. **#27** Add txoutindex_to_txinindex mapping (enables outspend lookups)
2. **#12** GET Tx Outspend
3. **#13** GET Tx Outspends
4. **#1** Optimize projected blocks (CPFP/ancestor scores)
5. **#2** GET Mempool Blocks
### Phase 2: Essential Features (MED)
6. **#15** POST Tx Broadcast
7. **#16** GET Difficulty Adjustment
8. **#5** GET Block Txs (paginated)
9. **#9** GET Address Txs Chain
10. **#10** GET Address Txs Mempool
11. **#19** GET Hashrate
12. **#26** Index txindex_to_sigop_cost
13. **#3** GET Mempool Recent
### Phase 3: Nice to Have (LOW)
14. **#6** GET Block Txid at Index
15. **#7** GET Block Raw
16. **#8** GET Block by Timestamp
17. **#11** GET Validate Address
18. **#14** GET Tx Merkle Proof
19. **#4** GET RBF Replacements
20. **#20** GET Difficulty Adjustments
21. **#21** GET Reward Stats
22. **#22-25** Mining block statistics
23. **#17-18** Mining pools (requires #28)
24. **#28** Pool identification
## Design Documents
- Mempool projected blocks: `crates/brk_monitor/src/mempool/DESIGN.md`
## Skipped Endpoints
| Endpoint | Reason |
|----------|--------|
| GET Price | `/api/v1/prices` | External data source needed |
| GET Historical Price | `/api/v1/historical-price` | External data source needed |
| GET Full-RBF Replacements | `/api/v1/fullrbf/replacements` | Low priority |
| Lightning endpoints | Requires separate Lightning indexing |
| Accelerator endpoints | mempool.space-specific paid service |