fix: use phys_footprint on macOS to match Activity Monitor

Switch from MACH_TASK_BASIC_INFO (resident_size) to TASK_VM_INFO
(phys_footprint) which matches Activity Monitor's Memory column.
Also: capacity-aware heap estimation, entry counts in memory payload,
heap_bytes tests for all stores.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Razvan Dimescu
2026-03-30 12:06:23 +03:00
parent e6489d4a1f
commit a5a3a0552d
10 changed files with 178 additions and 47 deletions

View File

@@ -817,13 +817,12 @@ function renderMemory(mem, stats) {
document.getElementById('memoryRss').textContent = formatBytes(mem.process_rss_bytes); document.getElementById('memoryRss').textContent = formatBytes(mem.process_rss_bytes);
document.getElementById('memorySub').textContent = 'est. ' + formatBytes(mem.total_estimated_bytes); document.getElementById('memorySub').textContent = 'est. ' + formatBytes(mem.total_estimated_bytes);
// Entry counts from sibling stats objects (avoid duplication in memory payload)
const entryCounts = { const entryCounts = {
cache: stats.cache.entries, cache: mem.cache_entries,
blocklist: stats.blocking.domains_loaded, blocklist: mem.blocklist_entries,
query_log: mem.query_log_entries, query_log: mem.query_log_entries,
srtt: mem.srtt_entries, srtt: mem.srtt_entries,
overrides: stats.overrides.active, overrides: mem.overrides_entries,
}; };
// Sidebar panel // Sidebar panel
@@ -852,7 +851,7 @@ function renderMemory(mem, stats) {
<div class="memory-bar">${barSegments}</div> <div class="memory-bar">${barSegments}</div>
${rows} ${rows}
<div class="memory-rss"> <div class="memory-rss">
<span>Process RSS</span> <span>Process Footprint</span>
<span>${formatBytes(mem.process_rss_bytes)}</span> <span>${formatBytes(mem.process_rss_bytes)}</span>
</div> </div>
`; `;

View File

@@ -214,12 +214,15 @@ struct BlockingStatsResponse {
#[derive(Serialize)] #[derive(Serialize)]
struct MemoryStats { struct MemoryStats {
cache_bytes: usize, cache_bytes: usize,
cache_entries: usize,
blocklist_bytes: usize, blocklist_bytes: usize,
blocklist_entries: usize,
query_log_bytes: usize, query_log_bytes: usize,
query_log_entries: usize, query_log_entries: usize,
srtt_bytes: usize, srtt_bytes: usize,
srtt_entries: usize, srtt_entries: usize,
overrides_bytes: usize, overrides_bytes: usize,
overrides_entries: usize,
total_estimated_bytes: usize, total_estimated_bytes: usize,
process_rss_bytes: usize, process_rss_bytes: usize,
} }
@@ -553,12 +556,15 @@ async fn stats(State(ctx): State<Arc<ServerCtx>>) -> Json<StatsResponse> {
}, },
memory: MemoryStats { memory: MemoryStats {
cache_bytes, cache_bytes,
cache_entries: cache_len,
blocklist_bytes, blocklist_bytes,
blocklist_entries: bl_stats.domains_loaded,
query_log_bytes, query_log_bytes,
query_log_entries, query_log_entries,
srtt_bytes, srtt_bytes,
srtt_entries, srtt_entries,
overrides_bytes, overrides_bytes,
overrides_entries: override_count,
total_estimated_bytes: total_estimated, total_estimated_bytes: total_estimated,
process_rss_bytes: crate::stats::process_rss_bytes(), process_rss_bytes: crate::stats::process_rss_bytes(),
}, },

View File

@@ -184,17 +184,13 @@ impl BlocklistStore {
} }
pub fn heap_bytes(&self) -> usize { pub fn heap_bytes(&self) -> usize {
let domains: usize = self // HashSet<String> stores (hash, String) per slot + 1 control byte
.domains let per_slot_overhead = std::mem::size_of::<u64>() + std::mem::size_of::<String>() + 1;
.iter() let domains_table = self.domains.capacity() * per_slot_overhead;
.map(|d| std::mem::size_of::<String>() + d.capacity()) let domains_heap: usize = self.domains.iter().map(|d| d.capacity()).sum();
.sum(); let allow_table = self.allowlist.capacity() * per_slot_overhead;
let allow: usize = self let allow_heap: usize = self.allowlist.iter().map(|d| d.capacity()).sum();
.allowlist domains_table + domains_heap + allow_table + allow_heap
.iter()
.map(|d| std::mem::size_of::<String>() + d.capacity())
.sum();
domains + allow
} }
pub fn stats(&self) -> BlocklistStats { pub fn stats(&self) -> BlocklistStats {
@@ -248,6 +244,23 @@ pub fn parse_blocklist(text: &str) -> HashSet<String> {
domains domains
} }
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn heap_bytes_grows_with_domains() {
let mut store = BlocklistStore::new();
let empty = store.heap_bytes();
let domains: HashSet<String> = ["example.com", "example.org", "test.net"]
.iter()
.map(|s| s.to_string())
.collect();
store.swap_domains(domains, vec![]);
assert!(store.heap_bytes() > empty);
}
}
pub async fn download_blocklists(lists: &[String]) -> Vec<(String, String)> { pub async fn download_blocklists(lists: &[String]) -> Vec<(String, String)> {
let client = reqwest::Client::builder() let client = reqwest::Client::builder()
.timeout(std::time::Duration::from_secs(30)) .timeout(std::time::Duration::from_secs(30))

View File

@@ -143,11 +143,21 @@ impl DnsCache {
} }
pub fn heap_bytes(&self) -> usize { pub fn heap_bytes(&self) -> usize {
let mut total = 0; // Outer HashMap<String, HashMap>: (hash, String, HashMap) per slot + control byte
let outer_slot = std::mem::size_of::<u64>()
+ std::mem::size_of::<String>()
+ std::mem::size_of::<HashMap<QueryType, CacheEntry>>()
+ 1;
let mut total = self.entries.capacity() * outer_slot;
for (domain, type_map) in &self.entries { for (domain, type_map) in &self.entries {
total += domain.capacity() + std::mem::size_of::<String>(); total += domain.capacity();
// Inner HashMap<QueryType, CacheEntry>: (hash, QueryType, CacheEntry) per slot + control byte
let inner_slot = std::mem::size_of::<u64>()
+ std::mem::size_of::<QueryType>()
+ std::mem::size_of::<CacheEntry>()
+ 1;
total += type_map.capacity() * inner_slot;
for entry in type_map.values() { for entry in type_map.values() {
total += std::mem::size_of::<CacheEntry>();
total += entry.packet.heap_bytes(); total += entry.packet.heap_bytes();
} }
} }
@@ -206,3 +216,23 @@ fn adjust_ttls(records: &mut [DnsRecord], new_ttl: u32) {
record.set_ttl(new_ttl); record.set_ttl(new_ttl);
} }
} }
#[cfg(test)]
mod tests {
use super::*;
use crate::packet::DnsPacket;
#[test]
fn heap_bytes_grows_with_entries() {
let mut cache = DnsCache::new(100, 1, 3600);
let empty = cache.heap_bytes();
let mut pkt = DnsPacket::new();
pkt.answers.push(DnsRecord::A {
domain: "example.com".into(),
addr: "1.2.3.4".parse().unwrap(),
ttl: 300,
});
cache.insert("example.com", QueryType::A, &pkt);
assert!(cache.heap_bytes() > empty);
}
}

View File

@@ -118,16 +118,20 @@ impl OverrideStore {
} }
pub fn heap_bytes(&self) -> usize { pub fn heap_bytes(&self) -> usize {
self.entries // HashMap<String, OverrideEntry>: (hash, String, OverrideEntry) per slot + control byte
let per_slot = std::mem::size_of::<u64>()
+ std::mem::size_of::<String>()
+ std::mem::size_of::<OverrideEntry>()
+ 1;
let table = self.entries.capacity() * per_slot;
let heap: usize = self
.entries
.iter() .iter()
.map(|(k, v)| { .map(|(k, v)| {
k.capacity() k.capacity() + v.domain.capacity() + v.target.capacity() + v.record.heap_bytes()
+ std::mem::size_of::<OverrideEntry>()
+ v.domain.capacity()
+ v.target.capacity()
+ v.record.heap_bytes()
}) })
.sum() .sum();
table + heap
} }
pub fn active_count(&self) -> usize { pub fn active_count(&self) -> usize {
@@ -167,3 +171,16 @@ fn parse_target(domain: &str, target: &str, ttl: u32) -> Result<(QueryType, DnsR
}, },
)) ))
} }
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn heap_bytes_grows_with_entries() {
let mut store = OverrideStore::new();
let empty = store.heap_bytes();
store.insert("example.com", "1.2.3.4", 300, None).unwrap();
assert!(store.heap_bytes() > empty);
}
}

View File

@@ -610,4 +610,16 @@ mod tests {
panic!("expected DNSKEY"); panic!("expected DNSKEY");
} }
} }
#[test]
fn heap_bytes_accounts_for_records() {
let mut pkt = DnsPacket::new();
let empty = pkt.heap_bytes();
pkt.answers.push(DnsRecord::A {
domain: "example.com".into(),
addr: "1.2.3.4".parse().unwrap(),
ttl: 300,
});
assert!(pkt.heap_bytes() > empty);
}
} }

View File

@@ -92,3 +92,25 @@ pub struct QueryLogFilter {
pub since: Option<SystemTime>, pub since: Option<SystemTime>,
pub limit: Option<usize>, pub limit: Option<usize>,
} }
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn heap_bytes_grows_with_entries() {
let mut log = QueryLog::new(100);
let empty = log.heap_bytes();
log.push(QueryLogEntry {
timestamp: SystemTime::now(),
src_addr: "127.0.0.1:1234".parse().unwrap(),
domain: "example.com".into(),
query_type: QueryType::A,
path: QueryPath::Forwarded,
rescode: ResultCode::NOERROR,
latency_us: 500,
dnssec: DnssecStatus::Indeterminate,
});
assert!(log.heap_bytes() > empty);
}
}

View File

@@ -690,4 +690,14 @@ mod tests {
let parsed = round_trip(&rec); let parsed = round_trip(&rec);
assert_eq!(rec, parsed); assert_eq!(rec, parsed);
} }
#[test]
fn heap_bytes_reflects_string_capacity() {
let rec = DnsRecord::CNAME {
domain: "a]".repeat(100),
host: "b".repeat(200),
ttl: 60,
};
assert!(rec.heap_bytes() >= 300);
}
} }

View File

@@ -101,7 +101,12 @@ impl SrttCache {
} }
pub fn heap_bytes(&self) -> usize { pub fn heap_bytes(&self) -> usize {
self.entries.capacity() * (std::mem::size_of::<IpAddr>() + std::mem::size_of::<SrttEntry>()) // HashMap stores (hash, key, value) per slot + 1 control byte
let per_slot = std::mem::size_of::<u64>()
+ std::mem::size_of::<IpAddr>()
+ std::mem::size_of::<SrttEntry>()
+ 1;
self.entries.capacity() * per_slot
} }
pub fn len(&self) -> usize { pub fn len(&self) -> usize {
@@ -307,6 +312,16 @@ mod tests {
assert_eq!(addrs, vec![sock(1), sock(2)]); assert_eq!(addrs, vec![sock(1), sock(2)]);
} }
#[test]
fn heap_bytes_grows_with_entries() {
let mut cache = SrttCache::new(true);
let empty = cache.heap_bytes();
for i in 1..=10u8 {
cache.record_rtt(ip(i), 100, false);
}
assert!(cache.heap_bytes() > empty);
}
#[test] #[test]
fn eviction_removes_oldest() { fn eviction_removes_oldest() {
let mut cache = SrttCache::new(true); let mut cache = SrttCache::new(true);

View File

@@ -24,33 +24,40 @@ fn macos_rss() -> usize {
fn task_info( fn task_info(
target_task: u32, target_task: u32,
flavor: u32, flavor: u32,
task_info_out: *mut libc_task_basic_info, task_info_out: *mut TaskVmInfo,
task_info_count: *mut u32, task_info_count: *mut u32,
) -> i32; ) -> i32;
} }
// Partial task_vm_info_data_t — only fields up to phys_footprint.
#[repr(C)] #[repr(C)]
struct libc_task_basic_info { struct TaskVmInfo {
virtual_size: u64, virtual_size: u64,
region_count: i32,
page_size: i32,
resident_size: u64, resident_size: u64,
resident_size_max: u64, resident_size_peak: u64,
user_time: [u32; 2], device: u64,
system_time: [u32; 2], device_peak: u64,
policy: i32, internal: u64,
suspend_count: i32, internal_peak: u64,
external: u64,
external_peak: u64,
reusable: u64,
reusable_peak: u64,
purgeable_volatile_pmap: u64,
purgeable_volatile_resident: u64,
purgeable_volatile_virtual: u64,
compressed: u64,
compressed_peak: u64,
compressed_lifetime: u64,
phys_footprint: u64,
} }
const MACH_TASK_BASIC_INFO: u32 = 20; const TASK_VM_INFO: u32 = 22;
let mut info: libc_task_basic_info = unsafe { mem::zeroed() }; let mut info: TaskVmInfo = unsafe { mem::zeroed() };
let mut count = (mem::size_of::<libc_task_basic_info>() / mem::size_of::<u32>()) as u32; let mut count = (mem::size_of::<TaskVmInfo>() / mem::size_of::<u32>()) as u32;
let kr = unsafe { let kr = unsafe { task_info(mach_task_self(), TASK_VM_INFO, &mut info, &mut count) };
task_info(
mach_task_self(),
MACH_TASK_BASIC_INFO,
&mut info,
&mut count,
)
};
if kr == 0 { if kr == 0 {
info.resident_size as usize info.phys_footprint as usize
} else { } else {
0 0
} }
@@ -61,7 +68,7 @@ fn linux_rss() -> usize {
extern "C" { extern "C" {
fn sysconf(name: i32) -> i64; fn sysconf(name: i32) -> i64;
} }
const SC_PAGESIZE: i32 = 30; const SC_PAGESIZE: i32 = 30; // x86_64 + aarch64; differs on mips (28), sparc (29)
let page_size = unsafe { sysconf(SC_PAGESIZE) }; let page_size = unsafe { sysconf(SC_PAGESIZE) };
let page_size = if page_size > 0 { let page_size = if page_size > 0 {
page_size as usize page_size as usize