Compare commits
59 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ebfc31d793 | ||
|
|
b6703b4315 | ||
|
|
cc8d3c7a83 | ||
|
|
4dec0c89b5 | ||
|
|
ea840f5a07 | ||
|
|
df2856b57f | ||
|
|
236ef7b4f5 | ||
|
|
5d454cbed5 | ||
|
|
c1d425069f | ||
|
|
d274500308 | ||
|
|
9c313ef06a | ||
|
|
0d25fae4cf | ||
|
|
1ae2e23bb6 | ||
|
|
fe784addd2 | ||
|
|
a3a218ba5e | ||
|
|
e4594c7955 | ||
|
|
b85f599b8f | ||
|
|
03c164e339 | ||
|
|
2fce82e36c | ||
|
|
53ae4d1404 | ||
|
|
4748a4a4bb | ||
|
|
607470472d | ||
|
|
0dd7700665 | ||
|
|
dddc10336c | ||
|
|
4e723e8ee7 | ||
|
|
03ca0bcb28 | ||
|
|
c021d5a0c8 | ||
|
|
ed12659b26 | ||
|
|
eaab406515 | ||
|
|
9992418908 | ||
|
|
0a43feaf1a | ||
|
|
1bf11190d5 | ||
|
|
4f8afcd5b2 | ||
|
|
71cf0f0fc5 | ||
|
|
2b64e30bf7 | ||
|
|
4a1c98b02d | ||
|
|
55ea49b003 | ||
|
|
f01b2418cd | ||
|
|
32bff69113 | ||
|
|
0a39d98861 | ||
|
|
ca1f51652b | ||
|
|
a74d9a4bbb | ||
|
|
e564bd887e | ||
|
|
8bece0a0cd | ||
|
|
990c865f41 | ||
|
|
0ba2d3c72d | ||
|
|
def89ffe59 | ||
|
|
a29e4aeb96 | ||
|
|
d355f8d005 | ||
|
|
c410945222 | ||
|
|
b3f3a4f36c | ||
|
|
14b035387b | ||
|
|
d457ffc296 | ||
|
|
8ab50844c2 | ||
|
|
e04afe5b70 | ||
|
|
44113492f0 | ||
|
|
ec41f32d4e | ||
|
|
a35b0ea23c | ||
|
|
fbdb0a245f |
18
.github/workflows/ci.yml
vendored
18
.github/workflows/ci.yml
vendored
@@ -27,17 +27,6 @@ jobs:
|
||||
- name: audit
|
||||
run: cargo install cargo-audit && cargo audit
|
||||
|
||||
check-macos:
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: clippy
|
||||
run: cargo clippy -- -D warnings
|
||||
- name: test
|
||||
run: cargo test
|
||||
|
||||
check-windows:
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
@@ -48,10 +37,3 @@ jobs:
|
||||
run: cargo build
|
||||
- name: clippy
|
||||
run: cargo clippy -- -D warnings
|
||||
- name: test
|
||||
run: cargo test
|
||||
- name: Upload binary
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: numa-windows-x86_64
|
||||
path: target/debug/numa.exe
|
||||
|
||||
76
.github/workflows/homebrew-bump.yml
vendored
76
.github/workflows/homebrew-bump.yml
vendored
@@ -1,76 +0,0 @@
|
||||
name: Bump Homebrew Tap
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Version to bump (e.g. 0.10.0 or v0.10.0)'
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
bump:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Determine version
|
||||
id: ver
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" = "release" ]; then
|
||||
V="${{ github.event.release.tag_name }}"
|
||||
else
|
||||
V="${{ github.event.inputs.version }}"
|
||||
fi
|
||||
V="${V#v}"
|
||||
echo "version=$V" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Fetch sha256 checksums from release assets
|
||||
id: shas
|
||||
env:
|
||||
V: ${{ steps.ver.outputs.version }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
base="https://github.com/razvandimescu/numa/releases/download/v${V}"
|
||||
for t in macos-aarch64 macos-x86_64 linux-aarch64 linux-x86_64; do
|
||||
sha=$(curl -fsSL "${base}/numa-${t}.tar.gz.sha256" | awk '{print $1}')
|
||||
if [ -z "$sha" ]; then
|
||||
echo "ERROR: failed to fetch sha256 for $t" >&2
|
||||
exit 1
|
||||
fi
|
||||
key=$(echo "$t" | tr '[:lower:]-' '[:upper:]_')
|
||||
echo "SHA_${key}=${sha}" >> "$GITHUB_ENV"
|
||||
done
|
||||
|
||||
- name: Clone homebrew-tap
|
||||
env:
|
||||
HOMEBREW_TAP_GITHUB_TOKEN: ${{ secrets.HOMEBREW_TAP_GITHUB_TOKEN }}
|
||||
run: |
|
||||
git clone "https://x-access-token:${HOMEBREW_TAP_GITHUB_TOKEN}@github.com/razvandimescu/homebrew-tap.git" tap
|
||||
|
||||
- name: Update formula
|
||||
env:
|
||||
VERSION: ${{ steps.ver.outputs.version }}
|
||||
run: |
|
||||
python3 scripts/update-homebrew-formula.py tap/numa.rb
|
||||
echo "--- updated numa.rb ---"
|
||||
cat tap/numa.rb
|
||||
|
||||
- name: Commit and push
|
||||
working-directory: tap
|
||||
env:
|
||||
V: ${{ steps.ver.outputs.version }}
|
||||
run: |
|
||||
if git diff --quiet; then
|
||||
echo "numa.rb already at v${V}, nothing to commit"
|
||||
exit 0
|
||||
fi
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||
git add numa.rb
|
||||
git commit -m "chore: bump numa to v${V}"
|
||||
git push origin main
|
||||
12
Cargo.lock
generated
12
Cargo.lock
generated
@@ -1143,7 +1143,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "numa"
|
||||
version = "0.10.1"
|
||||
version = "0.6.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"axum",
|
||||
@@ -1159,7 +1159,6 @@ dependencies = [
|
||||
"reqwest",
|
||||
"ring",
|
||||
"rustls",
|
||||
"rustls-pemfile",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"socket2 0.5.10",
|
||||
@@ -1547,15 +1546,6 @@ dependencies = [
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustls-pemfile"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50"
|
||||
dependencies = [
|
||||
"rustls-pki-types",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustls-pki-types"
|
||||
version = "1.14.0"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "numa"
|
||||
version = "0.10.1"
|
||||
version = "0.6.0"
|
||||
authors = ["razvandimescu <razvan@dimescu.com>"]
|
||||
edition = "2021"
|
||||
description = "Portable DNS resolver in Rust — .numa local domains, ad blocking, developer overrides, DNS-over-HTTPS"
|
||||
@@ -10,7 +10,7 @@ keywords = ["dns", "dns-server", "ad-blocking", "reverse-proxy", "developer-tool
|
||||
categories = ["network-programming", "development-tools"]
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1", features = ["rt-multi-thread", "macros", "net", "time", "sync"] }
|
||||
tokio = { version = "1", features = ["rt-multi-thread", "macros", "net", "time"] }
|
||||
axum = "0.8"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
@@ -29,7 +29,6 @@ rustls = "0.23"
|
||||
tokio-rustls = "0.26"
|
||||
arc-swap = "1"
|
||||
ring = "0.17"
|
||||
rustls-pemfile = "2.2.0"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = { version = "0.5", features = ["html_reports"] }
|
||||
|
||||
@@ -13,5 +13,5 @@ RUN cargo build --release
|
||||
|
||||
FROM alpine:3.20
|
||||
COPY --from=builder /app/target/release/numa /usr/local/bin/numa
|
||||
EXPOSE 53/udp 80/tcp 443/tcp 853/tcp 5380/tcp
|
||||
EXPOSE 53/udp 80/tcp 443/tcp 5380/tcp
|
||||
ENTRYPOINT ["numa"]
|
||||
|
||||
8
Makefile
8
Makefile
@@ -1,4 +1,4 @@
|
||||
.PHONY: all build lint fmt check audit test coverage bench clean deploy blog release
|
||||
.PHONY: all build lint fmt check audit test coverage bench clean deploy blog
|
||||
|
||||
all: lint build test
|
||||
|
||||
@@ -33,12 +33,6 @@ blog:
|
||||
echo " $$f → site/blog/posts/$$name.html"; \
|
||||
done
|
||||
|
||||
release:
|
||||
ifndef VERSION
|
||||
$(error Usage: make release VERSION=0.8.0)
|
||||
endif
|
||||
./scripts/release.sh $(VERSION)
|
||||
|
||||
clean:
|
||||
cargo clean
|
||||
|
||||
|
||||
189
README.md
189
README.md
@@ -8,127 +8,166 @@
|
||||
|
||||
A portable DNS resolver in a single binary. Block ads on any network, name your local services (`frontend.numa`), and override any hostname with auto-revert — all from your laptop, no cloud account or Raspberry Pi required.
|
||||
|
||||
Built from scratch in Rust. Zero DNS libraries. RFC 1035 wire protocol parsed by hand. Caching, ad blocking, and local service domains out of the box. Optional recursive resolution from root nameservers with full DNSSEC chain-of-trust validation, plus a DNS-over-TLS listener for encrypted client connections (iOS Private DNS, systemd-resolved, etc.). One ~8MB binary, everything embedded.
|
||||
Built from scratch in Rust. Zero DNS libraries. RFC 1035 wire protocol parsed by hand. Recursive resolution from root nameservers with full DNSSEC validation (chain-of-trust + NSEC/NSEC3 denial proofs). One ~8MB binary, no PHP, no web server, no database — everything is embedded.
|
||||
|
||||

|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# macOS
|
||||
# Install (pick one)
|
||||
brew install razvandimescu/tap/numa
|
||||
|
||||
# Linux
|
||||
cargo install numa
|
||||
curl -fsSL https://raw.githubusercontent.com/razvandimescu/numa/main/install.sh | sh
|
||||
|
||||
# Windows — download from GitHub Releases
|
||||
# All platforms
|
||||
cargo install numa
|
||||
```
|
||||
# Run (port 53 requires root)
|
||||
sudo numa
|
||||
|
||||
```bash
|
||||
sudo numa # run in foreground (port 53 requires root/admin)
|
||||
# Try it
|
||||
dig @127.0.0.1 google.com # ✓ resolves normally
|
||||
dig @127.0.0.1 ads.google.com # ✗ blocked → 0.0.0.0
|
||||
```
|
||||
|
||||
Open the dashboard: **http://numa.numa** (or `http://localhost:5380`)
|
||||
|
||||
Set as system DNS:
|
||||
Or build from source:
|
||||
```bash
|
||||
git clone https://github.com/razvandimescu/numa.git && cd numa
|
||||
cargo build --release
|
||||
sudo ./target/release/numa
|
||||
```
|
||||
|
||||
| Platform | Install | Uninstall |
|
||||
|----------|---------|-----------|
|
||||
| macOS | `sudo numa install` | `sudo numa uninstall` |
|
||||
| Linux | `sudo numa install` | `sudo numa uninstall` |
|
||||
| Windows | `numa install` (admin) + reboot | `numa uninstall` (admin) + reboot |
|
||||
## Why Numa
|
||||
|
||||
On macOS and Linux, numa runs as a system service (launchd/systemd). On Windows, numa auto-starts on login via registry.
|
||||
- **Local service proxy** — `https://frontend.numa` instead of `localhost:5173`. Auto-generated TLS certs, WebSocket support for HMR. Like `/etc/hosts` but with auto TLS, a REST API, LAN discovery, and auto-revert.
|
||||
- **Path-based routing** — `app.numa/api → :5001`, `app.numa/auth → :5002`. Route URL paths to different backends with optional prefix stripping. Like nginx location blocks, zero config files.
|
||||
- **LAN service discovery** — Numa instances on the same network find each other automatically via mDNS. Access a teammate's `api.numa` from your machine. Opt-in via `[lan] enabled = true`.
|
||||
- **Developer overrides** — point any hostname to any IP, auto-reverts after N minutes. Full REST API for scripting. Built-in diagnostics: `curl localhost:5380/diagnose/example.com` tells you exactly how any domain resolves.
|
||||
- **DNS-over-HTTPS** — upstream queries encrypted via DoH. Your ISP sees HTTPS traffic, not DNS queries. Set `address = "https://9.9.9.9/dns-query"` in `[upstream]` or any DoH provider.
|
||||
- **Ad blocking that travels with you** — 385K+ domains blocked via [Hagezi Pro](https://github.com/hagezi/dns-blocklists). Works on any network: coffee shops, hotels, airports.
|
||||
- **Sub-microsecond caching** — 691ns cached round-trip, ~2.0M queries/sec throughput, zero heap allocations in the I/O path. [Benchmarks](bench/).
|
||||
- **Live dashboard** — real-time stats, query log, blocking controls, service management. LAN accessibility badges show which services are reachable from other devices.
|
||||
- **macOS, Linux, and Windows** — `numa install` configures system DNS, `numa service start` runs as launchd/systemd service.
|
||||
|
||||
## Local Services
|
||||
## Local Service Proxy
|
||||
|
||||
Name your dev services instead of remembering port numbers:
|
||||
Name your local dev services with `.numa` domains:
|
||||
|
||||
```bash
|
||||
curl -X POST localhost:5380/services \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"name":"frontend","target_port":5173}'
|
||||
|
||||
open http://frontend.numa # → proxied to localhost:5173
|
||||
```
|
||||
|
||||
Now `https://frontend.numa` works in your browser — green lock, valid cert, WebSocket passthrough for HMR. No mkcert, no nginx, no `/etc/hosts`.
|
||||
- **HTTPS with green lock** — auto-generated local CA + per-service TLS certs
|
||||
- **WebSocket** — Vite/webpack HMR works through the proxy
|
||||
- **Health checks** — dashboard shows green/red status per service
|
||||
- **LAN sharing** — services bound to `0.0.0.0` are automatically discoverable by other Numa instances on the network. Dashboard shows "LAN" or "local only" per service.
|
||||
- **Path-based routing** — route URL paths to different backends:
|
||||
```toml
|
||||
[[services]]
|
||||
name = "app"
|
||||
target_port = 3000
|
||||
routes = [
|
||||
{ path = "/api", port = 5001 },
|
||||
{ path = "/auth", port = 5002, strip = true },
|
||||
]
|
||||
```
|
||||
`app.numa/api/users → :5001/api/users`, `app.numa/auth/login → :5002/login` (stripped)
|
||||
- **Persistent** — services survive restarts
|
||||
- Or configure in `numa.toml`:
|
||||
|
||||
Add path-based routing (`app.numa/api → :5001`), share services across machines via LAN discovery, or configure everything in [`numa.toml`](numa.toml).
|
||||
```toml
|
||||
[[services]]
|
||||
name = "frontend"
|
||||
target_port = 5173
|
||||
```
|
||||
|
||||
## Ad Blocking & Privacy
|
||||
## LAN Service Discovery
|
||||
|
||||
385K+ domains blocked via [Hagezi Pro](https://github.com/hagezi/dns-blocklists). Works on any network — coffee shops, hotels, airports. Travels with your laptop.
|
||||
|
||||
Three resolution modes:
|
||||
|
||||
- **`forward`** (default) — transparent proxy to your existing system DNS. Everything works as before, just with caching and ad blocking on top. Captive portals, VPNs, corporate DNS — all respected.
|
||||
- **`recursive`** — resolve directly from root nameservers. No upstream dependency, no single entity sees your full query pattern. Add `[dnssec] enabled = true` for full chain-of-trust validation.
|
||||
- **`auto`** — probe root servers on startup, recursive if reachable, encrypted DoH fallback if blocked.
|
||||
|
||||
DNSSEC validates the full chain of trust: RRSIG signatures, DNSKEY verification, DS delegation, NSEC/NSEC3 denial proofs. [Read how it works →](https://numa.rs/blog/posts/dnssec-from-scratch.html)
|
||||
|
||||
**DNS-over-TLS listener** (RFC 7858) — accept encrypted queries on port 853 from strict clients like iOS Private DNS, systemd-resolved, or stubby. Two modes:
|
||||
|
||||
- **Self-signed** (default) — numa generates a local CA automatically. `numa install` adds it to the system trust store on macOS, Linux (Debian/Ubuntu, Fedora/RHEL/SUSE, Arch), and Windows. On iOS, install the `.mobileconfig` from `numa setup-phone`. Firefox keeps its own NSS store and ignores the system one — trust the CA there manually if you need HTTPS for `.numa` services in Firefox.
|
||||
- **Bring-your-own cert** — point `[dot] cert_path` / `key_path` at a publicly-trusted cert (e.g., Let's Encrypt via DNS-01 challenge on a domain pointing at your numa instance). Clients connect without any trust-store setup — same UX as AdGuard Home or Cloudflare `1.1.1.1`.
|
||||
|
||||
ALPN `"dot"` is advertised and enforced in both modes; a handshake with mismatched ALPN is rejected as a cross-protocol confusion defense.
|
||||
|
||||
## LAN Discovery
|
||||
|
||||
Run Numa on multiple machines. They find each other automatically via mDNS:
|
||||
Run Numa on multiple machines. They find each other automatically:
|
||||
|
||||
```
|
||||
Machine A (192.168.1.5) Machine B (192.168.1.20)
|
||||
┌──────────────────────┐ ┌──────────────────────┐
|
||||
│ Numa │ mDNS │ Numa │
|
||||
│ - api (port 8000) │◄───────────►│ - grafana (3000) │
|
||||
│ - frontend (5173) │ discovery │ │
|
||||
│ services: │◄───────────►│ services: │
|
||||
│ - api (port 8000) │ discovery │ - grafana (3000) │
|
||||
│ - frontend (5173) │ │ │
|
||||
└──────────────────────┘ └──────────────────────┘
|
||||
```
|
||||
|
||||
From Machine B: `curl http://api.numa` → proxied to Machine A's port 8000. Enable with `numa lan on`.
|
||||
From Machine B:
|
||||
```bash
|
||||
dig @127.0.0.1 api.numa # → 192.168.1.5
|
||||
curl http://api.numa # → proxied to Machine A's port 8000
|
||||
```
|
||||
|
||||
**Hub mode**: run one instance with `bind_addr = "0.0.0.0:53"` and point other devices' DNS to it — they get ad blocking + `.numa` resolution without installing anything.
|
||||
Enable LAN discovery:
|
||||
```bash
|
||||
numa lan on
|
||||
```
|
||||
Or in `numa.toml`:
|
||||
```toml
|
||||
[lan]
|
||||
enabled = true
|
||||
```
|
||||
Uses standard mDNS (`_numa._tcp.local` on port 5353) — compatible with Bonjour/Avahi, silently dropped by corporate firewalls instead of triggering IPS alerts.
|
||||
|
||||
**Hub mode** — don't want to install Numa on every machine? Run one instance as a shared DNS server and point other devices to it:
|
||||
|
||||
```bash
|
||||
# On the hub machine, bind to LAN interface
|
||||
[server]
|
||||
bind_addr = "0.0.0.0:53"
|
||||
|
||||
# On other devices, set DNS to the hub's IP
|
||||
# They get .numa resolution, ad blocking, caching — zero install
|
||||
```
|
||||
|
||||
## How It Compares
|
||||
|
||||
| | Pi-hole | AdGuard Home | Unbound | Numa |
|
||||
|---|---|---|---|---|
|
||||
| Local service proxy + auto TLS | — | — | — | `.numa` domains, HTTPS, WebSocket |
|
||||
| LAN service discovery | — | — | — | mDNS, zero config |
|
||||
| Developer overrides (REST API) | — | — | — | Auto-revert, scriptable |
|
||||
| Recursive resolver | — | — | Yes | Yes, with SRTT selection |
|
||||
| DNSSEC validation | — | — | Yes | Yes (RSA, ECDSA, Ed25519) |
|
||||
| Ad blocking | Yes | Yes | — | 385K+ domains |
|
||||
| Web admin UI | Full | Full | — | Dashboard |
|
||||
| Encrypted upstream (DoH) | Needs cloudflared | Yes | — | Native |
|
||||
| Encrypted clients (DoT listener) | Needs stunnel sidecar | Yes | Yes | Native (RFC 7858) |
|
||||
| Portable (laptop) | No (appliance) | No (appliance) | Server | Single binary, macOS/Linux/Windows |
|
||||
| Community maturity | 56K stars, 10 years | 33K stars | 20 years | New |
|
||||
| | Pi-hole | AdGuard Home | NextDNS | Cloudflare | Numa |
|
||||
|---|---|---|---|---|---|
|
||||
| Local service proxy | No | No | No | No | `.numa` + HTTPS + WS |
|
||||
| Path-based routing | No | No | No | No | Prefix match + strip |
|
||||
| LAN service discovery | No | No | No | No | mDNS, opt-in |
|
||||
| Developer overrides | No | No | No | No | REST API + auto-expiry |
|
||||
| Recursive resolver | No | No | Cloud only | Cloud only | From root hints, DNSSEC |
|
||||
| Encrypted upstream (DoH) | No (needs cloudflared) | Yes | Cloud only | Cloud only | Native, single binary |
|
||||
| Portable (travels with laptop) | No (appliance) | No (appliance) | Cloud only | Cloud only | Single binary |
|
||||
| Zero config | Complex | Docker/setup | Yes | Yes | Works out of the box |
|
||||
| Ad blocking | Yes | Yes | Yes | Limited | 385K+ domains |
|
||||
| Data stays local | Yes | Yes | Cloud | Cloud | 100% local |
|
||||
|
||||
## Performance
|
||||
## How It Works
|
||||
|
||||
691ns cached round-trip. ~2.0M qps throughput. Zero heap allocations in the hot path. Recursive queries average 237ms after SRTT warmup (12x improvement over round-robin). ECDSA P-256 DNSSEC verification: 174ns. [Benchmarks →](bench/)
|
||||
```
|
||||
Query → Overrides → .numa TLD → Blocklist → Local Zones → Cache → Recursive/Forward
|
||||
```
|
||||
|
||||
## Learn More
|
||||
Two resolution modes: **forward** (relay to upstream like Quad9/Cloudflare) or **recursive** (resolve from root nameservers — no upstream dependency). Set `mode = "recursive"` in `[upstream]` to resolve independently.
|
||||
|
||||
- [Blog: Implementing DNSSEC from Scratch in Rust](https://numa.rs/blog/posts/dnssec-from-scratch.html)
|
||||
- [Blog: I Built a DNS Resolver from Scratch](https://numa.rs/blog/posts/dns-from-scratch.html)
|
||||
- [Configuration reference](numa.toml) — all options documented inline
|
||||
- [REST API](src/api.rs) — 27 endpoints across overrides, cache, blocking, services, diagnostics
|
||||
No DNS libraries — no `hickory-dns`, no `trust-dns`. The wire protocol — headers, labels, compression pointers, record types — is parsed and serialized by hand. Runs on `tokio` + `axum`, async per-query task spawning.
|
||||
|
||||
[Configuration reference](numa.toml)
|
||||
|
||||
## Roadmap
|
||||
|
||||
- [x] DNS forwarding, caching, ad blocking, developer overrides
|
||||
- [x] `.numa` local domains — auto TLS, path routing, WebSocket proxy
|
||||
- [x] LAN service discovery — mDNS, cross-machine DNS + proxy
|
||||
- [x] DNS-over-HTTPS — encrypted upstream
|
||||
- [x] DNS-over-TLS listener — encrypted client connections (RFC 7858, ALPN strict)
|
||||
- [x] Recursive resolution + DNSSEC — chain-of-trust, NSEC/NSEC3
|
||||
- [x] SRTT-based nameserver selection
|
||||
- [ ] pkarr integration — self-sovereign DNS via Mainline DHT
|
||||
- [ ] Global `.numa` names — DHT-backed, no registrar
|
||||
- [x] DNS proxy core — forwarding, caching, local zones
|
||||
- [x] Developer overrides — REST API with auto-expiry
|
||||
- [x] Ad blocking — 385K+ domains, live dashboard, allowlist
|
||||
- [x] System integration — macOS + Linux, launchd/systemd, Tailscale/VPN auto-discovery
|
||||
- [x] Local service proxy — `.numa` domains, HTTP/HTTPS proxy, auto TLS, WebSocket
|
||||
- [x] Path-based routing — URL prefix routing with optional strip, REST API
|
||||
- [x] LAN service discovery — mDNS auto-discovery (opt-in), cross-machine DNS + proxy
|
||||
- [x] DNS-over-HTTPS — encrypted upstream via DoH (Quad9, Cloudflare, any provider)
|
||||
- [x] Recursive resolution — resolve from root nameservers, no upstream dependency
|
||||
- [x] DNSSEC validation — chain-of-trust, NSEC/NSEC3 denial proofs, AD bit (RSA, ECDSA, Ed25519)
|
||||
- [ ] pkarr integration — self-sovereign DNS via Mainline DHT (15M nodes)
|
||||
- [ ] Global `.numa` names — self-publish, DHT-backed, first-come-first-served
|
||||
|
||||
## License
|
||||
|
||||
|
||||
@@ -50,7 +50,17 @@ TLD priming solves this. On startup, Numa queries root for NS records of 34 comm
|
||||
|
||||
DNSSEC doesn't encrypt DNS traffic. It *signs* it. Every DNS record can have an accompanying RRSIG (signature) record. The resolver verifies the signature against the zone's DNSKEY, then verifies that DNSKEY against the parent zone's DS (delegation signer) record, walking up until it reaches the root trust anchor — a hardcoded public key that IANA publishes and the entire internet agrees on.
|
||||
|
||||
<img src="../dnssec-chain.svg" alt="DNSSEC chain of trust diagram — verifying cloudflare.com from answer through .com TLD to root trust anchor">
|
||||
```
|
||||
cloudflare.com A 104.16.132.229
|
||||
signed by → RRSIG (key_tag=34505, algo=13, signer=cloudflare.com)
|
||||
verified with → DNSKEY (cloudflare.com, key_tag=34505, ECDSA P-256)
|
||||
vouched for by → DS (at .com, key_tag=2371, digest=SHA-256 of cloudflare's DNSKEY)
|
||||
signed by → RRSIG (key_tag=19718, signer=com)
|
||||
verified with → DNSKEY (com, key_tag=19718)
|
||||
vouched for by → DS (at root, key_tag=30909)
|
||||
signed by → RRSIG (signer=.)
|
||||
verified with → DNSKEY (., key_tag=20326) ← root trust anchor (hardcoded)
|
||||
```
|
||||
|
||||
### How keys get there
|
||||
|
||||
@@ -155,9 +165,11 @@ The network fetch dominates. The crypto is noise.
|
||||
|
||||
## Surviving hostile networks
|
||||
|
||||
I deployed Numa as my system DNS and switched networks. Everything broke — every query SERVFAIL, 3-second timeout. The ISP blocks outbound UDP port 53 to everything except whitelisted public resolvers. Root servers, TLD servers, authoritative servers — all unreachable over UDP.
|
||||
I deployed Numa as my system DNS and switched to a different network. Everything broke. Every query: SERVFAIL, 3-second timeout.
|
||||
|
||||
But TCP port 53 worked. Every DNS server is required to support TCP (RFC 1035 section 4.2.2). The ISP only filters UDP.
|
||||
The network probe told the story: the ISP blocks outbound UDP port 53 to all servers except a handful of whitelisted public resolvers (Google, Cloudflare). Root servers, TLD servers, authoritative servers — all unreachable over UDP. The ISP forces you onto their DNS or a blessed upstream. Recursive resolution is impossible.
|
||||
|
||||
Except TCP port 53 worked fine. And every DNS server is required to support TCP (RFC 1035 section 4.2.2). The ISP apparently only filters UDP.
|
||||
|
||||
The fix has three parts:
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
<string>com.numa.dns</string>
|
||||
<key>ProgramArguments</key>
|
||||
<array>
|
||||
<string>{{exe_path}}</string>
|
||||
<string>/usr/local/bin/numa</string>
|
||||
</array>
|
||||
<key>RunAtLoad</key>
|
||||
<true/>
|
||||
|
||||
10
install.sh
10
install.sh
@@ -70,10 +70,8 @@ echo ""
|
||||
echo " \033[38;2;107;124;78mInstalled:\033[0m $INSTALL_DIR/numa ($TAG)"
|
||||
echo ""
|
||||
echo " Get started:"
|
||||
echo " sudo numa install # install service + set as system DNS"
|
||||
echo " open http://localhost:5380 # dashboard"
|
||||
echo ""
|
||||
echo " Other commands:"
|
||||
echo " sudo numa # run in foreground (no service)"
|
||||
echo " sudo numa uninstall # restore original DNS"
|
||||
echo " sudo numa # start the DNS server"
|
||||
echo " sudo numa install # set as system DNS"
|
||||
echo " sudo numa service start # run as persistent service"
|
||||
echo " open http://localhost:5380 # dashboard"
|
||||
echo ""
|
||||
|
||||
@@ -5,7 +5,7 @@ Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart={{exe_path}}
|
||||
ExecStart=/usr/local/bin/numa
|
||||
Restart=always
|
||||
RestartSec=2
|
||||
StandardOutput=journal
|
||||
|
||||
16
numa.toml
16
numa.toml
@@ -2,12 +2,6 @@
|
||||
bind_addr = "0.0.0.0:53"
|
||||
api_port = 5380
|
||||
# api_bind_addr = "127.0.0.1" # default; set to "0.0.0.0" for LAN dashboard access
|
||||
# data_dir = "/var/lib/numa" # where numa stores TLS CA and cert material
|
||||
# Defaults: /var/lib/numa on linux (FHS),
|
||||
# /usr/local/var/numa on macos (homebrew prefix),
|
||||
# %PROGRAMDATA%\numa on windows. Override for
|
||||
# containerized deploys or tests that can't
|
||||
# write to the system path.
|
||||
|
||||
# [upstream]
|
||||
# mode = "forward" # "forward" (default) — relay to upstream
|
||||
@@ -60,7 +54,7 @@ enabled = true
|
||||
port = 80
|
||||
tls_port = 443
|
||||
tld = "numa"
|
||||
# bind_addr = "127.0.0.1" # default; set to "0.0.0.0" for LAN access to .numa services
|
||||
# bind_addr = "127.0.0.1" # default; auto 0.0.0.0 when [lan] enabled
|
||||
|
||||
# Pre-configured services (numa.numa is always added automatically)
|
||||
# [[services]]
|
||||
@@ -89,14 +83,6 @@ tld = "numa"
|
||||
# enabled = false # opt-in: verify chain of trust from root KSK
|
||||
# strict = false # true = SERVFAIL on bogus signatures
|
||||
|
||||
# DNS-over-TLS listener (RFC 7858) — encrypted DNS on port 853
|
||||
# [dot]
|
||||
# enabled = false # opt-in: accept DoT queries
|
||||
# port = 853 # standard DoT port
|
||||
# bind_addr = "0.0.0.0" # IPv4 or IPv6; unspecified binds all interfaces
|
||||
# cert_path = "/etc/numa/dot.crt" # PEM cert; omit to use self-signed (proxy CA if available)
|
||||
# key_path = "/etc/numa/dot.key" # PEM private key; must be set together with cert_path
|
||||
|
||||
# LAN service discovery via mDNS (disabled by default — no network traffic unless enabled)
|
||||
# [lan]
|
||||
# enabled = true # discover other Numa instances via mDNS (_numa._tcp.local)
|
||||
|
||||
@@ -1,306 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
API="${NUMA_API:-http://127.0.0.1:5380}"
|
||||
DNS="${NUMA_DNS:-127.0.0.1}"
|
||||
NUMA_BIN="${NUMA_BIN:-/usr/local/bin/numa}"
|
||||
LAUNCHD_PLIST="/Library/LaunchDaemons/com.numa.dns.plist"
|
||||
|
||||
DOMAINS=(
|
||||
paypal.com ebay.com zoom.us slack.com discord.com
|
||||
microsoft.com apple.com meta.com oracle.com ibm.com
|
||||
docker.com kubernetes.io prometheus.io grafana.com terraform.io
|
||||
python.org nodejs.org golang.org wikipedia.org reddit.com
|
||||
stackoverflow.com stripe.com linear.app nytimes.com bbc.co.uk
|
||||
rust-lang.org fastly.com hetzner.com uber.com airbnb.com
|
||||
notion.so figma.com netflix.com spotify.com dropbox.com
|
||||
gitlab.com twitch.tv shopify.com vercel.app mozilla.org
|
||||
)
|
||||
|
||||
stats() {
|
||||
curl -s "$API/query-log" | python3 -c "
|
||||
import sys, json
|
||||
|
||||
data = json.load(sys.stdin)
|
||||
rec = [q for q in data if q['path'] == 'RECURSIVE']
|
||||
if not rec:
|
||||
print('No recursive queries in log.')
|
||||
sys.exit()
|
||||
|
||||
vals = sorted([q['latency_ms'] for q in rec])
|
||||
n = len(vals)
|
||||
|
||||
print(f'Recursive queries: {n}')
|
||||
print(f' Avg: {sum(vals)/n:.1f}ms')
|
||||
print(f' Median: {vals[n//2]:.1f}ms')
|
||||
print(f' P95: {vals[int(n*0.95)]:.1f}ms')
|
||||
print(f' P99: {vals[int(n*0.99)]:.1f}ms')
|
||||
print(f' Min: {min(vals):.1f}ms')
|
||||
print(f' Max: {max(vals):.1f}ms')
|
||||
print(f' <100ms: {sum(1 for v in vals if v < 100)}')
|
||||
print(f' <200ms: {sum(1 for v in vals if v < 200)}')
|
||||
print(f' <500ms: {sum(1 for v in vals if v < 500)}')
|
||||
print(f' >1s: {sum(1 for v in vals if v >= 1000)}')
|
||||
print()
|
||||
print('Slowest 5:')
|
||||
for q in sorted(rec, key=lambda q: q['latency_ms'], reverse=True)[:5]:
|
||||
print(f' {q[\"latency_ms\"]:>8.1f}ms {q[\"query_type\"]:5s} {q[\"domain\"]:35s} {q[\"rescode\"]}')
|
||||
print()
|
||||
print('Fastest 5:')
|
||||
for q in sorted(rec, key=lambda q: q['latency_ms'])[:5]:
|
||||
print(f' {q[\"latency_ms\"]:>8.1f}ms {q[\"query_type\"]:5s} {q[\"domain\"]:35s} {q[\"rescode\"]}')
|
||||
"
|
||||
}
|
||||
|
||||
query_all() {
|
||||
local label="$1"
|
||||
echo "=== $label ==="
|
||||
for d in "${DOMAINS[@]}"; do
|
||||
printf " %-25s " "$d"
|
||||
dig "@$DNS" "$d" A +noall +stats 2>/dev/null | grep "Query time"
|
||||
done
|
||||
echo
|
||||
}
|
||||
|
||||
flush_cache() {
|
||||
curl -s -X DELETE "$API/cache" > /dev/null
|
||||
echo "Cache flushed ($(curl -s "$API/stats" | python3 -c "import sys,json; print(json.load(sys.stdin)['cache']['entries'])" 2>/dev/null || echo '?') entries)."
|
||||
}
|
||||
|
||||
wait_for_api() {
|
||||
local attempts=0
|
||||
while ! curl -sf "$API/health" > /dev/null 2>&1; do
|
||||
attempts=$((attempts + 1))
|
||||
if [ $attempts -ge 20 ]; then
|
||||
echo "ERROR: API not reachable at $API after 10s" >&2
|
||||
exit 1
|
||||
fi
|
||||
sleep 0.5
|
||||
done
|
||||
}
|
||||
|
||||
wait_for_priming() {
|
||||
echo -n "Waiting for TLD priming..."
|
||||
local prev=0
|
||||
local stable=0
|
||||
for _ in $(seq 1 60); do
|
||||
local entries
|
||||
entries=$(curl -s "$API/stats" | python3 -c "import sys,json; print(json.load(sys.stdin)['cache']['entries'])" 2>/dev/null || echo 0)
|
||||
if [ "$entries" -gt 0 ] && [ "$entries" = "$prev" ]; then
|
||||
stable=$((stable + 1))
|
||||
if [ $stable -ge 3 ]; then
|
||||
echo " done ($entries cache entries)."
|
||||
return
|
||||
fi
|
||||
else
|
||||
stable=0
|
||||
fi
|
||||
prev="$entries"
|
||||
sleep 1
|
||||
done
|
||||
echo " timeout (cache: $prev entries)."
|
||||
}
|
||||
|
||||
# restart_numa <config_toml_body>
|
||||
# Writes config to a temp file, stops numa (launchd or manual), starts with that config.
|
||||
restart_numa() {
|
||||
local config_body="$1"
|
||||
local tmpconf
|
||||
tmpconf=$(mktemp /tmp/numa-bench-XXXXXX)
|
||||
mv "$tmpconf" "${tmpconf}.toml"
|
||||
tmpconf="${tmpconf}.toml"
|
||||
echo "$config_body" > "$tmpconf"
|
||||
|
||||
# Stop launchd-managed numa if active
|
||||
if sudo launchctl list com.numa.dns &>/dev/null; then
|
||||
sudo launchctl unload "$LAUNCHD_PLIST" 2>/dev/null || true
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
# Kill any remaining
|
||||
sudo killall numa 2>/dev/null || true
|
||||
sleep 2
|
||||
|
||||
sudo "$NUMA_BIN" "$tmpconf" &
|
||||
wait_for_api
|
||||
wait_for_priming
|
||||
echo "numa ready (pid $(pgrep numa | head -1), config: $tmpconf)."
|
||||
}
|
||||
|
||||
# Restore the launchd service
|
||||
restore_launchd() {
|
||||
sudo killall numa 2>/dev/null || true
|
||||
sleep 1
|
||||
if [ -f "$LAUNCHD_PLIST" ]; then
|
||||
sudo launchctl load "$LAUNCHD_PLIST" 2>/dev/null || true
|
||||
echo "Restored launchd service."
|
||||
fi
|
||||
}
|
||||
|
||||
run_pass() {
|
||||
local label="$1"
|
||||
flush_cache
|
||||
sleep 0.5
|
||||
query_all "$label"
|
||||
echo "=== $label — stats ==="
|
||||
stats
|
||||
}
|
||||
|
||||
case "${1:-full}" in
|
||||
cold)
|
||||
echo "--- Cold cache benchmark ---"
|
||||
run_pass "Cold SRTT + Cold cache"
|
||||
;;
|
||||
warm)
|
||||
echo "--- Warm SRTT benchmark ---"
|
||||
echo "Priming SRTT..."
|
||||
for d in "${DOMAINS[@]}"; do dig "@$DNS" "$d" A +short > /dev/null 2>&1; done
|
||||
run_pass "Warm SRTT + Cold cache"
|
||||
;;
|
||||
stats)
|
||||
stats
|
||||
;;
|
||||
compare-srtt)
|
||||
echo "============================================"
|
||||
echo " A/B: SRTT OFF vs ON (dnssec off)"
|
||||
echo "============================================"
|
||||
echo
|
||||
|
||||
restart_numa "$(cat <<'TOML'
|
||||
[upstream]
|
||||
mode = "recursive"
|
||||
srtt = false
|
||||
TOML
|
||||
)"
|
||||
echo
|
||||
run_pass "SRTT OFF"
|
||||
|
||||
echo
|
||||
echo "--------------------------------------------"
|
||||
echo
|
||||
|
||||
restart_numa "$(cat <<'TOML'
|
||||
[upstream]
|
||||
mode = "recursive"
|
||||
srtt = true
|
||||
TOML
|
||||
)"
|
||||
echo
|
||||
run_pass "SRTT ON"
|
||||
|
||||
echo
|
||||
restore_launchd
|
||||
;;
|
||||
compare-dnssec)
|
||||
echo "============================================"
|
||||
echo " A/B: DNSSEC OFF vs ON (srtt on)"
|
||||
echo "============================================"
|
||||
echo
|
||||
|
||||
restart_numa "$(cat <<'TOML'
|
||||
[upstream]
|
||||
mode = "recursive"
|
||||
srtt = true
|
||||
|
||||
[dnssec]
|
||||
enabled = false
|
||||
TOML
|
||||
)"
|
||||
echo
|
||||
run_pass "DNSSEC OFF"
|
||||
|
||||
echo
|
||||
echo "--------------------------------------------"
|
||||
echo
|
||||
|
||||
restart_numa "$(cat <<'TOML'
|
||||
[upstream]
|
||||
mode = "recursive"
|
||||
srtt = true
|
||||
|
||||
[dnssec]
|
||||
enabled = true
|
||||
TOML
|
||||
)"
|
||||
echo
|
||||
run_pass "DNSSEC ON"
|
||||
|
||||
echo
|
||||
restore_launchd
|
||||
;;
|
||||
compare-all)
|
||||
echo "============================================"
|
||||
echo " Full A/B matrix"
|
||||
echo " 1. SRTT OFF + DNSSEC OFF (baseline)"
|
||||
echo " 2. SRTT ON + DNSSEC OFF"
|
||||
echo " 3. SRTT ON + DNSSEC ON"
|
||||
echo "============================================"
|
||||
echo
|
||||
|
||||
# --- 1. Baseline ---
|
||||
restart_numa "$(cat <<'TOML'
|
||||
[upstream]
|
||||
mode = "recursive"
|
||||
srtt = false
|
||||
|
||||
[dnssec]
|
||||
enabled = false
|
||||
TOML
|
||||
)"
|
||||
echo
|
||||
run_pass "SRTT OFF + DNSSEC OFF"
|
||||
|
||||
echo
|
||||
echo "--------------------------------------------"
|
||||
echo
|
||||
|
||||
# --- 2. SRTT only ---
|
||||
restart_numa "$(cat <<'TOML'
|
||||
[upstream]
|
||||
mode = "recursive"
|
||||
srtt = true
|
||||
|
||||
[dnssec]
|
||||
enabled = false
|
||||
TOML
|
||||
)"
|
||||
echo
|
||||
run_pass "SRTT ON + DNSSEC OFF"
|
||||
|
||||
echo
|
||||
echo "--------------------------------------------"
|
||||
echo
|
||||
|
||||
# --- 3. Both ---
|
||||
restart_numa "$(cat <<'TOML'
|
||||
[upstream]
|
||||
mode = "recursive"
|
||||
srtt = true
|
||||
|
||||
[dnssec]
|
||||
enabled = true
|
||||
TOML
|
||||
)"
|
||||
echo
|
||||
run_pass "SRTT ON + DNSSEC ON"
|
||||
|
||||
echo
|
||||
restore_launchd
|
||||
;;
|
||||
full|*)
|
||||
echo "--- Full benchmark (cold → warm → SRTT-only) ---"
|
||||
echo
|
||||
|
||||
wait_for_priming
|
||||
flush_cache
|
||||
sleep 0.5
|
||||
query_all "Pass 1: Cold SRTT + Cold cache"
|
||||
|
||||
flush_cache
|
||||
sleep 0.5
|
||||
query_all "Pass 2: Warm SRTT + Cold cache"
|
||||
|
||||
echo "=== Pass 2 stats (SRTT-warm) ==="
|
||||
stats
|
||||
;;
|
||||
esac
|
||||
@@ -1,43 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "Usage: $0 <version> (e.g. 0.7.0)" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
VERSION="$1"
|
||||
TAG="v$VERSION"
|
||||
|
||||
# Sanity checks
|
||||
if ! git diff --quiet || ! git diff --cached --quiet; then
|
||||
echo "ERROR: working tree is dirty — commit or stash first" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$(git branch --show-current)" != "main" ]; then
|
||||
echo "ERROR: must be on main branch" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if git tag -l "$TAG" | grep -q .; then
|
||||
echo "ERROR: tag $TAG already exists" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CURRENT=$(grep '^version = ' Cargo.toml | head -1 | sed 's/version = "\(.*\)"/\1/')
|
||||
echo "Bumping $CURRENT -> $VERSION"
|
||||
|
||||
# Bump version
|
||||
sed -i.bak "s/^version = \"$CURRENT\"/version = \"$VERSION\"/" Cargo.toml
|
||||
rm -f Cargo.toml.bak
|
||||
cargo update --workspace
|
||||
|
||||
# Commit, tag, push
|
||||
git add Cargo.toml Cargo.lock
|
||||
git commit -m "chore: bump version to $VERSION"
|
||||
git tag "$TAG"
|
||||
git push origin main --tags
|
||||
|
||||
echo
|
||||
echo "Released $TAG — GitHub Actions will build, publish to crates.io, and create the release."
|
||||
@@ -1,57 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Rewrite a Homebrew formula in place: bump version, URL paths, and sha256 lines.
|
||||
|
||||
Reads the formula path from argv[1], and the following env vars:
|
||||
VERSION e.g. "0.10.0" (no leading v)
|
||||
SHA_MACOS_AARCH64
|
||||
SHA_MACOS_X86_64
|
||||
SHA_LINUX_AARCH64
|
||||
SHA_LINUX_X86_64
|
||||
|
||||
Assumptions about the formula:
|
||||
- Has `version "X.Y.Z"` somewhere
|
||||
- Has `url "...releases/download/vX.Y.Z/numa-<target>.tar.gz"` lines
|
||||
- May or may not already have `sha256 "..."` lines immediately after each url
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
formula_path = sys.argv[1]
|
||||
version = os.environ["VERSION"].lstrip("v")
|
||||
shas = {
|
||||
"macos-aarch64": os.environ["SHA_MACOS_AARCH64"],
|
||||
"macos-x86_64": os.environ["SHA_MACOS_X86_64"],
|
||||
"linux-aarch64": os.environ["SHA_LINUX_AARCH64"],
|
||||
"linux-x86_64": os.environ["SHA_LINUX_X86_64"],
|
||||
}
|
||||
|
||||
with open(formula_path) as f:
|
||||
content = f.read()
|
||||
|
||||
content = re.sub(r'version "[^"]*"', f'version "{version}"', content)
|
||||
content = re.sub(
|
||||
r"releases/download/v[\d.]+/numa-",
|
||||
f"releases/download/v{version}/numa-",
|
||||
content,
|
||||
)
|
||||
content = re.sub(r'\n[ \t]*sha256 "[^"]*"', "", content)
|
||||
|
||||
|
||||
def add_sha(match: re.Match) -> str:
|
||||
indent = match.group(1)
|
||||
target = match.group(2)
|
||||
if target not in shas:
|
||||
return match.group(0)
|
||||
return f'{match.group(0)}\n{indent}sha256 "{shas[target]}"'
|
||||
|
||||
|
||||
content = re.sub(
|
||||
r'^([ \t]+)url "[^"]*numa-([\w-]+)\.tar\.gz"',
|
||||
add_sha,
|
||||
content,
|
||||
flags=re.MULTILINE,
|
||||
)
|
||||
|
||||
with open(formula_path, "w") as f:
|
||||
f.write(content)
|
||||
@@ -1,136 +0,0 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 720 680" font-family="'DM Sans', system-ui, sans-serif" font-size="13">
|
||||
<defs>
|
||||
<marker id="arr" viewBox="0 0 10 10" refX="10" refY="5" markerWidth="7" markerHeight="7" orient="auto-start-reverse">
|
||||
<path d="M 0 0 L 10 5 L 0 10 z" fill="#64748b"/>
|
||||
</marker>
|
||||
<marker id="arr-amber" viewBox="0 0 10 10" refX="10" refY="5" markerWidth="7" markerHeight="7" orient="auto-start-reverse">
|
||||
<path d="M 0 0 L 10 5 L 0 10 z" fill="#c0623a"/>
|
||||
</marker>
|
||||
<marker id="arr-teal" viewBox="0 0 10 10" refX="10" refY="5" markerWidth="7" markerHeight="7" orient="auto-start-reverse">
|
||||
<path d="M 0 0 L 10 5 L 0 10 z" fill="#6b7c4e"/>
|
||||
</marker>
|
||||
<filter id="s" x="-3%" y="-3%" width="106%" height="106%">
|
||||
<feDropShadow dx="0" dy="1" stdDeviation="2" flood-opacity="0.06"/>
|
||||
</filter>
|
||||
</defs>
|
||||
|
||||
<!-- Background -->
|
||||
<rect width="720" height="680" rx="8" fill="#faf7f2"/>
|
||||
|
||||
<!-- Title -->
|
||||
<text x="360" y="36" text-anchor="middle" font-size="15" font-weight="600" fill="#2c2418" font-family="'Instrument Serif', Georgia, serif" letter-spacing="-0.02em">DNSSEC Chain of Trust</text>
|
||||
<text x="360" y="54" text-anchor="middle" font-size="11" fill="#a39888">Verifying cloudflare.com — from answer to root trust anchor</text>
|
||||
|
||||
<!-- Legend -->
|
||||
<g transform="translate(28, 72)">
|
||||
<rect width="14" height="14" rx="3" fill="#c0623a" opacity="0.15" stroke="#c0623a" stroke-width="1"/>
|
||||
<text x="20" y="12" font-size="11" fill="#6b5e4f">Verify signature (RRSIG → DNSKEY)</text>
|
||||
<rect x="230" width="14" height="14" rx="3" fill="#6b7c4e" opacity="0.15" stroke="#6b7c4e" stroke-width="1"/>
|
||||
<text x="250" y="12" font-size="11" fill="#6b5e4f">Vouch for key (DS → parent DNSKEY)</text>
|
||||
<rect x="478" width="14" height="14" rx="3" fill="#2c2418" opacity="0.08" stroke="#2c2418" stroke-opacity="0.15" stroke-width="1"/>
|
||||
<text x="498" y="12" font-size="11" fill="#6b5e4f">DNS record / key</text>
|
||||
</g>
|
||||
|
||||
<!-- ═══ ZONE: cloudflare.com ═══ -->
|
||||
<rect x="40" y="104" width="640" height="152" rx="8" fill="none" stroke="rgba(0,0,0,0.06)" stroke-dasharray="4,3"/>
|
||||
<text x="56" y="122" font-size="10" font-weight="600" fill="#a39888" letter-spacing="0.08em" font-family="'JetBrains Mono', monospace">CLOUDFLARE.COM ZONE</text>
|
||||
|
||||
<!-- A record -->
|
||||
<rect x="80" y="138" width="320" height="38" rx="6" fill="white" stroke="rgba(0,0,0,0.08)" filter="url(#s)"/>
|
||||
<text x="96" y="157" font-size="12" font-weight="600" fill="#2c2418" font-family="'JetBrains Mono', monospace">cloudflare.com A 104.16.132.229</text>
|
||||
<text x="96" y="170" font-size="10" fill="#a39888">The answer we want to verify</text>
|
||||
|
||||
<!-- RRSIG -->
|
||||
<line x1="400" y1="157" x2="440" y2="157" stroke="#c0623a" stroke-width="1.5" marker-end="url(#arr-amber)"/>
|
||||
<text x="412" y="149" font-size="9" fill="#c0623a" font-weight="600">signed by</text>
|
||||
|
||||
<rect x="445" y="138" width="220" height="38" rx="6" fill="rgba(192,98,58,0.06)" stroke="rgba(192,98,58,0.2)" filter="url(#s)"/>
|
||||
<text x="461" y="155" font-size="11" font-weight="600" fill="#9e4e2d" font-family="'JetBrains Mono', monospace">RRSIG</text>
|
||||
<text x="505" y="155" font-size="11" fill="#6b5e4f">tag=34505, algo=13</text>
|
||||
<text x="461" y="170" font-size="10" fill="#a39888">signer: cloudflare.com</text>
|
||||
|
||||
<!-- DNSKEY -->
|
||||
<rect x="80" y="192" width="320" height="50" rx="6" fill="white" stroke="rgba(0,0,0,0.08)" filter="url(#s)"/>
|
||||
<text x="96" y="211" font-size="11" font-weight="600" fill="#2c2418" font-family="'JetBrains Mono', monospace">DNSKEY</text>
|
||||
<text x="156" y="211" font-size="11" fill="#6b5e4f">cloudflare.com, tag=34505</text>
|
||||
<text x="96" y="228" font-size="11" fill="#6b7c4e" font-weight="500">ECDSA P-256</text>
|
||||
<text x="194" y="228" font-size="10" fill="#a39888">— 174ns to verify</text>
|
||||
|
||||
<!-- RRSIG → DNSKEY arrow -->
|
||||
<path d="M 555 176 L 555 192 L 400 192 L 400 200" stroke="#c0623a" stroke-width="1.5" fill="none" marker-end="url(#arr-amber)"/>
|
||||
<text x="460" y="189" font-size="9" fill="#c0623a" font-weight="600">verified with</text>
|
||||
|
||||
<!-- ═══ ZONE: .com ═══ -->
|
||||
<rect x="40" y="270" width="640" height="132" rx="8" fill="none" stroke="rgba(0,0,0,0.06)" stroke-dasharray="4,3"/>
|
||||
<text x="56" y="288" font-size="10" font-weight="600" fill="#a39888" letter-spacing="0.08em" font-family="'JetBrains Mono', monospace">.COM TLD ZONE</text>
|
||||
|
||||
<!-- DS connecting zones -->
|
||||
<line x1="240" y1="242" x2="240" y2="302" stroke="#6b7c4e" stroke-width="1.5" marker-end="url(#arr-teal)"/>
|
||||
<text x="252" y="276" font-size="9" fill="#6b7c4e" font-weight="600">vouched for by</text>
|
||||
|
||||
<!-- DS record at .com -->
|
||||
<rect x="80" y="304" width="320" height="38" rx="6" fill="rgba(107,124,78,0.06)" stroke="rgba(107,124,78,0.2)" filter="url(#s)"/>
|
||||
<text x="96" y="321" font-size="11" font-weight="600" fill="#566540" font-family="'JetBrains Mono', monospace">DS</text>
|
||||
<text x="118" y="321" font-size="11" fill="#6b5e4f">tag=2371, digest=SHA-256</text>
|
||||
<text x="96" y="336" font-size="10" fill="#a39888">hash of cloudflare.com DNSKEY</text>
|
||||
|
||||
<!-- DS signed by RRSIG -->
|
||||
<line x1="400" y1="323" x2="440" y2="323" stroke="#c0623a" stroke-width="1.5" marker-end="url(#arr-amber)"/>
|
||||
<text x="412" y="315" font-size="9" fill="#c0623a" font-weight="600">signed by</text>
|
||||
|
||||
<rect x="445" y="304" width="220" height="38" rx="6" fill="rgba(192,98,58,0.06)" stroke="rgba(192,98,58,0.2)" filter="url(#s)"/>
|
||||
<text x="461" y="321" font-size="11" font-weight="600" fill="#9e4e2d" font-family="'JetBrains Mono', monospace">RRSIG</text>
|
||||
<text x="505" y="321" font-size="11" fill="#6b5e4f">tag=19718, signer=com</text>
|
||||
|
||||
<!-- .com DNSKEY -->
|
||||
<rect x="80" y="356" width="320" height="32" rx="6" fill="white" stroke="rgba(0,0,0,0.08)" filter="url(#s)"/>
|
||||
<text x="96" y="377" font-size="11" font-weight="600" fill="#2c2418" font-family="'JetBrains Mono', monospace">DNSKEY</text>
|
||||
<text x="156" y="377" font-size="11" fill="#6b5e4f">com, tag=19718</text>
|
||||
|
||||
<!-- RRSIG → .com DNSKEY -->
|
||||
<path d="M 555 342 L 555 356 L 400 356 L 400 366" stroke="#c0623a" stroke-width="1.5" fill="none" marker-end="url(#arr-amber)"/>
|
||||
<text x="460" y="353" font-size="9" fill="#c0623a" font-weight="600">verified with</text>
|
||||
|
||||
<!-- ═══ ZONE: root ═══ -->
|
||||
<rect x="40" y="404" width="640" height="132" rx="8" fill="none" stroke="rgba(0,0,0,0.06)" stroke-dasharray="4,3"/>
|
||||
<text x="56" y="422" font-size="10" font-weight="600" fill="#a39888" letter-spacing="0.08em" font-family="'JetBrains Mono', monospace">ROOT ZONE (.)</text>
|
||||
|
||||
<!-- DS connecting .com → root -->
|
||||
<line x1="240" y1="388" x2="240" y2="436" stroke="#6b7c4e" stroke-width="1.5" marker-end="url(#arr-teal)"/>
|
||||
<text x="252" y="416" font-size="9" fill="#6b7c4e" font-weight="600">vouched for by</text>
|
||||
|
||||
<!-- DS at root -->
|
||||
<rect x="80" y="438" width="320" height="38" rx="6" fill="rgba(107,124,78,0.06)" stroke="rgba(107,124,78,0.2)" filter="url(#s)"/>
|
||||
<text x="96" y="455" font-size="11" font-weight="600" fill="#566540" font-family="'JetBrains Mono', monospace">DS</text>
|
||||
<text x="118" y="455" font-size="11" fill="#6b5e4f">tag=30909, digest=SHA-256</text>
|
||||
<text x="96" y="470" font-size="10" fill="#a39888">hash of com DNSKEY</text>
|
||||
|
||||
<!-- DS signed by root RRSIG -->
|
||||
<line x1="400" y1="457" x2="440" y2="457" stroke="#c0623a" stroke-width="1.5" marker-end="url(#arr-amber)"/>
|
||||
<text x="412" y="449" font-size="9" fill="#c0623a" font-weight="600">signed by</text>
|
||||
|
||||
<rect x="445" y="438" width="220" height="38" rx="6" fill="rgba(192,98,58,0.06)" stroke="rgba(192,98,58,0.2)" filter="url(#s)"/>
|
||||
<text x="461" y="455" font-size="11" font-weight="600" fill="#9e4e2d" font-family="'JetBrains Mono', monospace">RRSIG</text>
|
||||
<text x="505" y="455" font-size="11" fill="#6b5e4f">signer=.</text>
|
||||
|
||||
<!-- Root DNSKEY -->
|
||||
<rect x="80" y="490" width="320" height="32" rx="6" fill="white" stroke="rgba(0,0,0,0.08)" filter="url(#s)"/>
|
||||
<text x="96" y="511" font-size="11" font-weight="600" fill="#2c2418" font-family="'JetBrains Mono', monospace">DNSKEY</text>
|
||||
<text x="156" y="511" font-size="11" fill="#6b5e4f">root (.), tag=20326, RSA/SHA-256</text>
|
||||
|
||||
<!-- RRSIG → root DNSKEY -->
|
||||
<path d="M 555 476 L 555 490 L 400 490 L 400 500" stroke="#c0623a" stroke-width="1.5" fill="none" marker-end="url(#arr-amber)"/>
|
||||
<text x="460" y="487" font-size="9" fill="#c0623a" font-weight="600">verified with</text>
|
||||
|
||||
<!-- ═══ TRUST ANCHOR ═══ -->
|
||||
<line x1="240" y1="522" x2="240" y2="558" stroke="#2c2418" stroke-width="2" stroke-dasharray="4,3"/>
|
||||
|
||||
<rect x="120" y="560" width="480" height="52" rx="8" fill="#2c2418" filter="url(#s)"/>
|
||||
<text x="360" y="582" text-anchor="middle" font-size="12" font-weight="600" fill="#faf7f2" font-family="'JetBrains Mono', monospace">ROOT TRUST ANCHOR</text>
|
||||
<text x="360" y="600" text-anchor="middle" font-size="11" fill="#a39888">IANA KSK, key_tag=20326 — hardcoded in Numa as const [u8; 256]</text>
|
||||
|
||||
<!-- Flow summary -->
|
||||
<text x="360" y="646" text-anchor="middle" font-size="12" fill="#6b5e4f" font-style="italic">Trust flows up (DS records). Keys flow down (DNSKEY → RRSIG).</text>
|
||||
<text x="360" y="664" text-anchor="middle" font-size="11" fill="#a39888">If any link breaks — wrong signature, missing DS, expired RRSIG — Numa rejects the response.</text>
|
||||
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 9.2 KiB |
@@ -101,7 +101,7 @@ body {
|
||||
/* Stat cards row */
|
||||
.stats-row {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(6, 1fr);
|
||||
grid-template-columns: repeat(5, 1fr);
|
||||
gap: 1rem;
|
||||
}
|
||||
.stat-card {
|
||||
@@ -125,8 +125,6 @@ body {
|
||||
.stat-card.blocked::before { background: var(--rose); }
|
||||
.stat-card.overrides::before { background: var(--violet); }
|
||||
.stat-card.uptime::before { background: var(--cyan); }
|
||||
.stat-card.memory::before { background: var(--text-dim); }
|
||||
.stat-card.memory .stat-value { color: var(--text-secondary); }
|
||||
|
||||
.stat-label {
|
||||
font-size: 0.7rem;
|
||||
@@ -287,7 +285,6 @@ body {
|
||||
.path-tag.OVERRIDE { background: rgba(82, 122, 82, 0.12); color: var(--emerald); }
|
||||
.path-tag.SERVFAIL { background: rgba(181, 68, 58, 0.12); color: var(--rose); }
|
||||
.path-tag.BLOCKED { background: rgba(163, 152, 136, 0.15); color: var(--text-dim); }
|
||||
.path-tag.COALESCED { background: rgba(138, 104, 158, 0.12); color: var(--violet-dim); }
|
||||
|
||||
/* Sidebar panels */
|
||||
.sidebar {
|
||||
@@ -470,74 +467,10 @@ body {
|
||||
display: none;
|
||||
}
|
||||
|
||||
/* Memory sidebar panel */
|
||||
.memory-bar {
|
||||
display: flex;
|
||||
height: 18px;
|
||||
border-radius: 4px;
|
||||
overflow: hidden;
|
||||
background: var(--bg-surface);
|
||||
margin-bottom: 0.8rem;
|
||||
}
|
||||
.memory-bar-seg {
|
||||
height: 100%;
|
||||
min-width: 2px;
|
||||
transition: width 0.6s ease;
|
||||
}
|
||||
.memory-bar-seg.cache { background: var(--teal); }
|
||||
.memory-bar-seg.blocklist { background: var(--rose); }
|
||||
.memory-bar-seg.querylog { background: var(--amber); }
|
||||
.memory-bar-seg.srtt { background: var(--cyan); }
|
||||
.memory-bar-seg.overrides { background: var(--violet); }
|
||||
.memory-row {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
padding: 0.3rem 0;
|
||||
border-bottom: 1px solid var(--border);
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.72rem;
|
||||
}
|
||||
.memory-row:last-child { border-bottom: none; }
|
||||
.memory-row-dot {
|
||||
width: 8px;
|
||||
height: 8px;
|
||||
border-radius: 2px;
|
||||
flex-shrink: 0;
|
||||
margin-right: 0.5rem;
|
||||
}
|
||||
.memory-row-label {
|
||||
flex: 1;
|
||||
color: var(--text-secondary);
|
||||
}
|
||||
.memory-row-size {
|
||||
width: 65px;
|
||||
text-align: right;
|
||||
color: var(--text-primary);
|
||||
font-weight: 500;
|
||||
}
|
||||
.memory-row-entries {
|
||||
width: 90px;
|
||||
text-align: right;
|
||||
color: var(--text-dim);
|
||||
}
|
||||
.memory-rss {
|
||||
margin-top: 0.5rem;
|
||||
padding-top: 0.5rem;
|
||||
border-top: 1px solid var(--border);
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.72rem;
|
||||
color: var(--text-dim);
|
||||
}
|
||||
|
||||
/* Responsive */
|
||||
@media (max-width: 1100px) {
|
||||
.main-grid { grid-template-columns: 1fr; }
|
||||
}
|
||||
@media (max-width: 900px) {
|
||||
.stats-row { grid-template-columns: repeat(3, 1fr); }
|
||||
}
|
||||
@media (max-width: 700px) {
|
||||
.stats-row { grid-template-columns: repeat(2, 1fr); }
|
||||
.dashboard { padding: 1rem; }
|
||||
@@ -590,11 +523,6 @@ body {
|
||||
<div class="stat-value" id="uptime">—</div>
|
||||
<div class="stat-sub" id="uptimeSub"> </div>
|
||||
</div>
|
||||
<div class="stat-card memory">
|
||||
<div class="stat-label">Memory</div>
|
||||
<div class="stat-value" id="memoryRss">—</div>
|
||||
<div class="stat-sub" id="memorySub"> </div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Resolution paths -->
|
||||
@@ -619,8 +547,6 @@ body {
|
||||
<select id="logFilterPath" onchange="applyLogFilter()"
|
||||
style="font-family:var(--font-mono);font-size:0.7rem;padding:0.25rem 0.4rem;border:1px solid var(--border);border-radius:4px;background:var(--bg-surface);color:var(--text-secondary);outline:none;">
|
||||
<option value="">all paths</option>
|
||||
<option value="RECURSIVE">recursive</option>
|
||||
<option value="COALESCED">coalesced</option>
|
||||
<option value="FORWARD">forward</option>
|
||||
<option value="CACHED">cached</option>
|
||||
<option value="BLOCKED">blocked</option>
|
||||
@@ -719,17 +645,6 @@ body {
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Memory breakdown -->
|
||||
<div class="panel" id="memoryPanel">
|
||||
<div class="panel-header">
|
||||
<span class="panel-title">Memory</span>
|
||||
<span class="panel-title" id="memoryTotal" style="color: var(--text-dim)"></span>
|
||||
</div>
|
||||
<div class="panel-body" id="memoryBody">
|
||||
<div class="empty-state">No memory data</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Cache entries -->
|
||||
<div class="panel">
|
||||
<div class="panel-header">
|
||||
@@ -794,69 +709,6 @@ function formatRemaining(secs) {
|
||||
return `${Math.floor(secs / 3600)}h ${Math.floor((secs % 3600) / 60)}m left`;
|
||||
}
|
||||
|
||||
function formatBytes(bytes) {
|
||||
if (bytes === 0) return '0 B';
|
||||
if (bytes < 1024) return bytes + ' B';
|
||||
if (bytes < 1048576) return (bytes / 1024).toFixed(1) + ' KB';
|
||||
if (bytes < 1073741824) return (bytes / 1048576).toFixed(1) + ' MB';
|
||||
return (bytes / 1073741824).toFixed(1) + ' GB';
|
||||
}
|
||||
|
||||
const MEMORY_COMPONENTS = [
|
||||
{ key: 'cache', label: 'Cache', cls: 'cache', color: 'var(--teal)' },
|
||||
{ key: 'blocklist', label: 'Blocklist', cls: 'blocklist', color: 'var(--rose)' },
|
||||
{ key: 'query_log', label: 'Query Log', cls: 'querylog', color: 'var(--amber)' },
|
||||
{ key: 'srtt', label: 'SRTT', cls: 'srtt', color: 'var(--cyan)' },
|
||||
{ key: 'overrides', label: 'Overrides', cls: 'overrides', color: 'var(--violet)' },
|
||||
];
|
||||
|
||||
function renderMemory(mem, stats) {
|
||||
if (!mem) return;
|
||||
|
||||
// Stat card
|
||||
document.getElementById('memoryRss').textContent = formatBytes(mem.process_memory_bytes);
|
||||
document.getElementById('memorySub').textContent = 'est. ' + formatBytes(mem.total_estimated_bytes);
|
||||
|
||||
const entryCounts = {
|
||||
cache: stats.cache.entries,
|
||||
blocklist: stats.blocking.domains_loaded,
|
||||
query_log: mem.query_log_entries,
|
||||
srtt: mem.srtt_entries,
|
||||
overrides: stats.overrides.active,
|
||||
};
|
||||
|
||||
// Sidebar panel
|
||||
const total = mem.total_estimated_bytes || 1;
|
||||
document.getElementById('memoryTotal').textContent = formatBytes(total);
|
||||
|
||||
const barSegments = MEMORY_COMPONENTS.map(c => {
|
||||
const bytes = mem[c.key + '_bytes'] || 0;
|
||||
const pct = ((bytes / total) * 100).toFixed(1);
|
||||
return `<div class="memory-bar-seg ${c.cls}" style="width:${pct}%" title="${c.label}: ${formatBytes(bytes)} (${pct}%)"></div>`;
|
||||
}).join('');
|
||||
|
||||
const rows = MEMORY_COMPONENTS.map(c => {
|
||||
const bytes = mem[c.key + '_bytes'] || 0;
|
||||
const entries = entryCounts[c.key] || 0;
|
||||
return `
|
||||
<div class="memory-row">
|
||||
<div class="memory-row-dot" style="background:${c.color}"></div>
|
||||
<span class="memory-row-label">${c.label}</span>
|
||||
<span class="memory-row-size">${formatBytes(bytes)}</span>
|
||||
<span class="memory-row-entries">${formatNumber(entries)} entries</span>
|
||||
</div>`;
|
||||
}).join('');
|
||||
|
||||
document.getElementById('memoryBody').innerHTML = `
|
||||
<div class="memory-bar">${barSegments}</div>
|
||||
${rows}
|
||||
<div class="memory-rss">
|
||||
<span>Process Footprint</span>
|
||||
<span>${formatBytes(mem.process_memory_bytes)}</span>
|
||||
</div>
|
||||
`;
|
||||
}
|
||||
|
||||
const PATH_DEFS = [
|
||||
{ key: 'forwarded', label: 'Forward', cls: 'forward' },
|
||||
{ key: 'recursive', label: 'Recursive', cls: 'recursive' },
|
||||
@@ -1027,13 +879,6 @@ async function refresh() {
|
||||
document.getElementById('footerUpstream').textContent = stats.upstream || '';
|
||||
document.getElementById('footerConfig').textContent = stats.config_path || '';
|
||||
document.getElementById('footerData').textContent = stats.data_dir || '';
|
||||
const modeEl = document.getElementById('footerMode');
|
||||
modeEl.textContent = stats.mode || '—';
|
||||
modeEl.style.color = stats.mode === 'recursive' ? 'var(--emerald)' : 'var(--amber)';
|
||||
document.getElementById('footerDnssec').textContent = stats.dnssec ? 'on' : 'off';
|
||||
document.getElementById('footerDnssec').style.color = stats.dnssec ? 'var(--emerald)' : 'var(--text-dim)';
|
||||
document.getElementById('footerSrtt').textContent = stats.srtt ? 'on' : 'off';
|
||||
document.getElementById('footerSrtt').style.color = stats.srtt ? 'var(--emerald)' : 'var(--text-dim)';
|
||||
|
||||
// LAN status indicator
|
||||
const lanEl = document.getElementById('lanToggle');
|
||||
@@ -1093,7 +938,7 @@ async function refresh() {
|
||||
prevTime = now;
|
||||
|
||||
// Cache hit rate
|
||||
const answered = q.cached + q.forwarded + q.recursive + q.coalesced + q.local + q.overridden;
|
||||
const answered = q.cached + q.forwarded + q.local + q.overridden;
|
||||
const hitRate = answered > 0 ? ((q.cached / answered) * 100).toFixed(1) : '0.0';
|
||||
document.getElementById('cacheRate').textContent = hitRate + '%';
|
||||
|
||||
@@ -1105,7 +950,6 @@ async function refresh() {
|
||||
renderServices(services);
|
||||
renderBlockingInfo(blockingInfo);
|
||||
renderAllowlist(allowlist);
|
||||
renderMemory(stats.memory, stats);
|
||||
|
||||
} catch (err) {
|
||||
document.getElementById('statusDot').className = 'status-dot error';
|
||||
@@ -1385,9 +1229,6 @@ setInterval(refresh, 2000);
|
||||
Config: <span id="footerConfig" style="user-select:all;color:var(--emerald);"></span>
|
||||
· Data: <span id="footerData" style="user-select:all;color:var(--emerald);"></span>
|
||||
· Upstream: <span id="footerUpstream" style="user-select:all;color:var(--emerald);"></span>
|
||||
· Mode: <span id="footerMode" style="color:var(--text-dim);">—</span>
|
||||
· DNSSEC: <span id="footerDnssec" style="color:var(--text-dim);">—</span>
|
||||
· SRTT: <span id="footerSrtt" style="color:var(--text-dim);">—</span>
|
||||
· Logs: <span style="user-select:all;color:var(--emerald);">macOS: /usr/local/var/log/numa.log · Linux: journalctl -u numa -f</span>
|
||||
· <a href="https://github.com/razvandimescu/numa" target="_blank" rel="noopener" style="color:var(--amber);text-decoration:none;">GitHub</a>
|
||||
</div>
|
||||
|
||||
@@ -4,10 +4,10 @@
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Numa — DNS you own. Everywhere you go.</title>
|
||||
<meta name="description" content="DNS you own. Portable DNS resolver with caching, ad blocking, .numa local domains, developer overrides. Optional recursive resolution with full DNSSEC validation. Built from scratch in Rust.">
|
||||
<meta name="description" content="DNS you own. Recursive resolver with full DNSSEC validation, ad blocking, .numa local domains, developer overrides. A single portable binary built from scratch in Rust.">
|
||||
<link rel="canonical" href="https://numa.rs">
|
||||
<meta property="og:title" content="Numa — DNS you own. Everywhere you go.">
|
||||
<meta property="og:description" content="Portable DNS resolver with caching, ad blocking, .numa local domains, and developer overrides. Optional recursive resolution with full DNSSEC validation. Built from scratch in Rust.">
|
||||
<meta property="og:description" content="Recursive DNS resolver with full DNSSEC validation, ad blocking, .numa local domains, and developer overrides. Built from scratch in Rust.">
|
||||
<meta property="og:type" content="website">
|
||||
<meta property="og:url" content="https://numa.rs">
|
||||
<link rel="stylesheet" href="/fonts/fonts.css">
|
||||
@@ -1232,17 +1232,17 @@ footer .closing {
|
||||
<div class="reveal">
|
||||
<div class="section-label">How It Works</div>
|
||||
<h2>What it does today</h2>
|
||||
<p class="lead">A DNS resolver with caching, ad blocking, local service domains, and a REST API. Optional recursive resolution with DNSSEC. Everything runs in a single binary.</p>
|
||||
<p class="lead">A recursive DNS resolver with DNSSEC validation, ad blocking, local service domains, and a REST API. Everything runs in a single binary.</p>
|
||||
</div>
|
||||
<div class="layers-grid">
|
||||
<div class="layer-card reveal reveal-delay-1">
|
||||
<div class="layer-badge">Layer 1</div>
|
||||
<h3>Resolve & Protect</h3>
|
||||
<ul>
|
||||
<li>Forward mode by default — transparent proxy to your existing DNS, with caching</li>
|
||||
<li>Ad & tracker blocking — 385K+ domains, zero config</li>
|
||||
<li>Recursive resolution — opt-in, resolve from root nameservers, no upstream needed</li>
|
||||
<li>Recursive resolution — resolve from root nameservers, no upstream needed</li>
|
||||
<li>DNSSEC validation — chain-of-trust + NSEC/NSEC3 denial proofs (RSA, ECDSA, Ed25519)</li>
|
||||
<li>Ad & tracker blocking — 385K+ domains, zero config</li>
|
||||
<li>DNS-over-HTTPS — encrypted upstream as alternative to recursive mode</li>
|
||||
<li>TTL-aware caching (sub-ms lookups)</li>
|
||||
<li>Single binary, portable — macOS, Linux, and Windows</li>
|
||||
</ul>
|
||||
|
||||
70
src/api.rs
70
src/api.rs
@@ -160,17 +160,13 @@ struct QueryLogResponse {
|
||||
struct StatsResponse {
|
||||
uptime_secs: u64,
|
||||
upstream: String,
|
||||
mode: &'static str, // "recursive" or "forward" — never "auto" at runtime
|
||||
config_path: String,
|
||||
data_dir: String,
|
||||
dnssec: bool,
|
||||
srtt: bool,
|
||||
queries: QueriesStats,
|
||||
cache: CacheStats,
|
||||
overrides: OverrideStats,
|
||||
blocking: BlockingStatsResponse,
|
||||
lan: LanStatsResponse,
|
||||
memory: MemoryStats,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
@@ -184,7 +180,6 @@ struct QueriesStats {
|
||||
total: u64,
|
||||
forwarded: u64,
|
||||
recursive: u64,
|
||||
coalesced: u64,
|
||||
cached: u64,
|
||||
local: u64,
|
||||
overridden: u64,
|
||||
@@ -211,19 +206,6 @@ struct BlockingStatsResponse {
|
||||
allowlist_size: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct MemoryStats {
|
||||
cache_bytes: usize,
|
||||
blocklist_bytes: usize,
|
||||
query_log_bytes: usize,
|
||||
query_log_entries: usize,
|
||||
srtt_bytes: usize,
|
||||
srtt_entries: usize,
|
||||
overrides_bytes: usize,
|
||||
total_estimated_bytes: usize,
|
||||
process_memory_bytes: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct DiagnoseResponse {
|
||||
domain: String,
|
||||
@@ -425,8 +407,14 @@ async fn forward_query_for_diagnose(
|
||||
timeout: std::time::Duration,
|
||||
) -> (bool, String) {
|
||||
use crate::packet::DnsPacket;
|
||||
use crate::question::DnsQuestion;
|
||||
|
||||
let query = DnsPacket::query(0xBEEF, domain, QueryType::A);
|
||||
let mut query = DnsPacket::new();
|
||||
query.header.id = 0xBEEF;
|
||||
query.header.recursion_desired = true;
|
||||
query
|
||||
.questions
|
||||
.push(DnsQuestion::new(domain.to_string(), QueryType::A));
|
||||
|
||||
match forward_query(&query, upstream, timeout).await {
|
||||
Ok(resp) => (
|
||||
@@ -485,29 +473,12 @@ async fn query_log(
|
||||
|
||||
async fn stats(State(ctx): State<Arc<ServerCtx>>) -> Json<StatsResponse> {
|
||||
let snap = ctx.stats.lock().unwrap().snapshot();
|
||||
let (cache_len, cache_max, cache_bytes) = {
|
||||
let (cache_len, cache_max) = {
|
||||
let cache = ctx.cache.read().unwrap();
|
||||
(cache.len(), cache.max_entries(), cache.heap_bytes())
|
||||
(cache.len(), cache.max_entries())
|
||||
};
|
||||
let (override_count, overrides_bytes) = {
|
||||
let ov = ctx.overrides.read().unwrap();
|
||||
(ov.active_count(), ov.heap_bytes())
|
||||
};
|
||||
let (bl_stats, blocklist_bytes) = {
|
||||
let bl = ctx.blocklist.read().unwrap();
|
||||
(bl.stats(), bl.heap_bytes())
|
||||
};
|
||||
let (query_log_bytes, query_log_entries) = {
|
||||
let log = ctx.query_log.lock().unwrap();
|
||||
(log.heap_bytes(), log.len())
|
||||
};
|
||||
let (srtt_bytes, srtt_entries, srtt_enabled) = {
|
||||
let s = ctx.srtt.read().unwrap();
|
||||
(s.heap_bytes(), s.len(), s.is_enabled())
|
||||
};
|
||||
|
||||
let total_estimated =
|
||||
cache_bytes + blocklist_bytes + query_log_bytes + srtt_bytes + overrides_bytes;
|
||||
let override_count = ctx.overrides.read().unwrap().active_count();
|
||||
let bl_stats = ctx.blocklist.read().unwrap().stats();
|
||||
|
||||
let upstream = if ctx.upstream_mode == crate::config::UpstreamMode::Recursive {
|
||||
"recursive (root hints)".to_string()
|
||||
@@ -518,16 +489,12 @@ async fn stats(State(ctx): State<Arc<ServerCtx>>) -> Json<StatsResponse> {
|
||||
Json(StatsResponse {
|
||||
uptime_secs: snap.uptime_secs,
|
||||
upstream,
|
||||
mode: ctx.upstream_mode.as_str(),
|
||||
config_path: ctx.config_path.clone(),
|
||||
data_dir: ctx.data_dir.to_string_lossy().to_string(),
|
||||
dnssec: ctx.dnssec_enabled,
|
||||
srtt: srtt_enabled,
|
||||
queries: QueriesStats {
|
||||
total: snap.total,
|
||||
forwarded: snap.forwarded,
|
||||
recursive: snap.recursive,
|
||||
coalesced: snap.coalesced,
|
||||
cached: snap.cached,
|
||||
local: snap.local,
|
||||
overridden: snap.overridden,
|
||||
@@ -551,17 +518,6 @@ async fn stats(State(ctx): State<Arc<ServerCtx>>) -> Json<StatsResponse> {
|
||||
enabled: ctx.lan_enabled,
|
||||
peers: ctx.lan_peers.lock().unwrap().list().len(),
|
||||
},
|
||||
memory: MemoryStats {
|
||||
cache_bytes,
|
||||
blocklist_bytes,
|
||||
query_log_bytes,
|
||||
query_log_entries,
|
||||
srtt_bytes,
|
||||
srtt_entries,
|
||||
overrides_bytes,
|
||||
total_estimated_bytes: total_estimated,
|
||||
process_memory_bytes: crate::stats::process_memory_bytes(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -906,7 +862,7 @@ async fn remove_route(
|
||||
}
|
||||
|
||||
async fn serve_ca(State(ctx): State<Arc<ServerCtx>>) -> Result<impl IntoResponse, StatusCode> {
|
||||
let ca_path = ctx.data_dir.join(crate::tls::CA_FILE_NAME);
|
||||
let ca_path = ctx.data_dir.join("ca.pem");
|
||||
let bytes = tokio::task::spawn_blocking(move || std::fs::read(ca_path))
|
||||
.await
|
||||
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?
|
||||
@@ -992,8 +948,6 @@ mod tests {
|
||||
tls_config: None,
|
||||
upstream_mode: crate::config::UpstreamMode::Forward,
|
||||
root_hints: Vec::new(),
|
||||
srtt: RwLock::new(crate::srtt::SrttCache::new(true)),
|
||||
inflight: Mutex::new(std::collections::HashMap::new()),
|
||||
dnssec_enabled: false,
|
||||
dnssec_strict: false,
|
||||
})
|
||||
|
||||
@@ -183,15 +183,6 @@ impl BlocklistStore {
|
||||
self.allowlist.iter().cloned().collect()
|
||||
}
|
||||
|
||||
pub fn heap_bytes(&self) -> usize {
|
||||
let per_slot_overhead = std::mem::size_of::<u64>() + std::mem::size_of::<String>() + 1;
|
||||
let domains_table = self.domains.capacity() * per_slot_overhead;
|
||||
let domains_heap: usize = self.domains.iter().map(|d| d.capacity()).sum();
|
||||
let allow_table = self.allowlist.capacity() * per_slot_overhead;
|
||||
let allow_heap: usize = self.allowlist.iter().map(|d| d.capacity()).sum();
|
||||
domains_table + domains_heap + allow_table + allow_heap
|
||||
}
|
||||
|
||||
pub fn stats(&self) -> BlocklistStats {
|
||||
BlocklistStats {
|
||||
enabled: self.is_enabled(),
|
||||
@@ -243,23 +234,6 @@ pub fn parse_blocklist(text: &str) -> HashSet<String> {
|
||||
domains
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn heap_bytes_grows_with_domains() {
|
||||
let mut store = BlocklistStore::new();
|
||||
let empty = store.heap_bytes();
|
||||
let domains: HashSet<String> = ["example.com", "example.org", "test.net"]
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
store.swap_domains(domains, vec![]);
|
||||
assert!(store.heap_bytes() > empty);
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn download_blocklists(lists: &[String]) -> Vec<(String, String)> {
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(std::time::Duration::from_secs(30))
|
||||
|
||||
40
src/cache.rs
40
src/cache.rs
@@ -142,26 +142,6 @@ impl DnsCache {
|
||||
self.entry_count = 0;
|
||||
}
|
||||
|
||||
pub fn heap_bytes(&self) -> usize {
|
||||
let outer_slot = std::mem::size_of::<u64>()
|
||||
+ std::mem::size_of::<String>()
|
||||
+ std::mem::size_of::<HashMap<QueryType, CacheEntry>>()
|
||||
+ 1;
|
||||
let mut total = self.entries.capacity() * outer_slot;
|
||||
for (domain, type_map) in &self.entries {
|
||||
total += domain.capacity();
|
||||
let inner_slot = std::mem::size_of::<u64>()
|
||||
+ std::mem::size_of::<QueryType>()
|
||||
+ std::mem::size_of::<CacheEntry>()
|
||||
+ 1;
|
||||
total += type_map.capacity() * inner_slot;
|
||||
for entry in type_map.values() {
|
||||
total += entry.packet.heap_bytes();
|
||||
}
|
||||
}
|
||||
total
|
||||
}
|
||||
|
||||
pub fn remove(&mut self, domain: &str) {
|
||||
let domain_lower = domain.to_lowercase();
|
||||
if let Some(type_map) = self.entries.remove(&domain_lower) {
|
||||
@@ -214,23 +194,3 @@ fn adjust_ttls(records: &mut [DnsRecord], new_ttl: u32) {
|
||||
record.set_ttl(new_ttl);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::packet::DnsPacket;
|
||||
|
||||
#[test]
|
||||
fn heap_bytes_grows_with_entries() {
|
||||
let mut cache = DnsCache::new(100, 1, 3600);
|
||||
let empty = cache.heap_bytes();
|
||||
let mut pkt = DnsPacket::new();
|
||||
pkt.answers.push(DnsRecord::A {
|
||||
domain: "example.com".into(),
|
||||
addr: "1.2.3.4".parse().unwrap(),
|
||||
ttl: 300,
|
||||
});
|
||||
cache.insert("example.com", QueryType::A, &pkt);
|
||||
assert!(cache.heap_bytes() > empty);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::collections::HashMap;
|
||||
use std::net::Ipv4Addr;
|
||||
use std::net::Ipv6Addr;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::path::Path;
|
||||
|
||||
use serde::Deserialize;
|
||||
|
||||
@@ -29,8 +29,6 @@ pub struct Config {
|
||||
pub lan: LanConfig,
|
||||
#[serde(default)]
|
||||
pub dnssec: DnssecConfig,
|
||||
#[serde(default)]
|
||||
pub dot: DotConfig,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -41,10 +39,6 @@ pub struct ServerConfig {
|
||||
pub api_port: u16,
|
||||
#[serde(default = "default_api_bind_addr")]
|
||||
pub api_bind_addr: String,
|
||||
/// Where numa writes TLS material (CA, leaf certs, regenerated state).
|
||||
/// Defaults to `crate::data_dir()` (platform-specific system path) if unset.
|
||||
#[serde(default)]
|
||||
pub data_dir: Option<PathBuf>,
|
||||
}
|
||||
|
||||
impl Default for ServerConfig {
|
||||
@@ -53,7 +47,6 @@ impl Default for ServerConfig {
|
||||
bind_addr: default_bind_addr(),
|
||||
api_port: default_api_port(),
|
||||
api_bind_addr: default_api_bind_addr(),
|
||||
data_dir: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -66,31 +59,18 @@ fn default_bind_addr() -> String {
|
||||
"0.0.0.0:53".to_string()
|
||||
}
|
||||
|
||||
pub const DEFAULT_API_PORT: u16 = 5380;
|
||||
|
||||
fn default_api_port() -> u16 {
|
||||
DEFAULT_API_PORT
|
||||
5380
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Default, PartialEq, Eq, Clone, Copy)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum UpstreamMode {
|
||||
Auto,
|
||||
#[default]
|
||||
Forward,
|
||||
Recursive,
|
||||
}
|
||||
|
||||
impl UpstreamMode {
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
UpstreamMode::Auto => "auto",
|
||||
UpstreamMode::Forward => "forward",
|
||||
UpstreamMode::Recursive => "recursive",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct UpstreamConfig {
|
||||
#[serde(default)]
|
||||
@@ -105,8 +85,6 @@ pub struct UpstreamConfig {
|
||||
pub root_hints: Vec<String>,
|
||||
#[serde(default = "default_prime_tlds")]
|
||||
pub prime_tlds: Vec<String>,
|
||||
#[serde(default = "default_srtt")]
|
||||
pub srtt: bool,
|
||||
}
|
||||
|
||||
impl Default for UpstreamConfig {
|
||||
@@ -118,19 +96,10 @@ impl Default for UpstreamConfig {
|
||||
timeout_ms: default_timeout_ms(),
|
||||
root_hints: default_root_hints(),
|
||||
prime_tlds: default_prime_tlds(),
|
||||
srtt: default_srtt(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn default_true() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn default_srtt() -> bool {
|
||||
default_true()
|
||||
}
|
||||
|
||||
fn default_prime_tlds() -> Vec<String> {
|
||||
vec![
|
||||
// gTLDs
|
||||
@@ -377,41 +346,6 @@ pub struct DnssecConfig {
|
||||
pub strict: bool,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Clone)]
|
||||
pub struct DotConfig {
|
||||
#[serde(default)]
|
||||
pub enabled: bool,
|
||||
#[serde(default = "default_dot_port")]
|
||||
pub port: u16,
|
||||
#[serde(default = "default_dot_bind_addr")]
|
||||
pub bind_addr: String,
|
||||
/// Path to TLS certificate (PEM). If None, uses self-signed CA.
|
||||
#[serde(default)]
|
||||
pub cert_path: Option<PathBuf>,
|
||||
/// Path to TLS private key (PEM). If None, uses self-signed CA.
|
||||
#[serde(default)]
|
||||
pub key_path: Option<PathBuf>,
|
||||
}
|
||||
|
||||
impl Default for DotConfig {
|
||||
fn default() -> Self {
|
||||
DotConfig {
|
||||
enabled: false,
|
||||
port: default_dot_port(),
|
||||
bind_addr: default_dot_bind_addr(),
|
||||
cert_path: None,
|
||||
key_path: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn default_dot_port() -> u16 {
|
||||
853
|
||||
}
|
||||
fn default_dot_bind_addr() -> String {
|
||||
"0.0.0.0".to_string()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
679
src/ctx.rs
679
src/ctx.rs
@@ -1,4 +1,3 @@
|
||||
use std::collections::HashMap;
|
||||
use std::net::SocketAddr;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::{Mutex, RwLock};
|
||||
@@ -8,9 +7,6 @@ use arc_swap::ArcSwap;
|
||||
use log::{debug, error, info, warn};
|
||||
use rustls::ServerConfig;
|
||||
use tokio::net::UdpSocket;
|
||||
use tokio::sync::broadcast;
|
||||
|
||||
type InflightMap = HashMap<(String, QueryType), broadcast::Sender<Option<DnsPacket>>>;
|
||||
|
||||
use crate::blocklist::BlocklistStore;
|
||||
use crate::buffer::BytePacketBuffer;
|
||||
@@ -25,7 +21,6 @@ use crate::query_log::{QueryLog, QueryLogEntry};
|
||||
use crate::question::QueryType;
|
||||
use crate::record::DnsRecord;
|
||||
use crate::service_store::ServiceStore;
|
||||
use crate::srtt::SrttCache;
|
||||
use crate::stats::{QueryPath, ServerStats};
|
||||
use crate::system_dns::ForwardingRule;
|
||||
|
||||
@@ -56,27 +51,28 @@ pub struct ServerCtx {
|
||||
pub tls_config: Option<ArcSwap<ServerConfig>>,
|
||||
pub upstream_mode: UpstreamMode,
|
||||
pub root_hints: Vec<SocketAddr>,
|
||||
pub srtt: RwLock<SrttCache>,
|
||||
pub inflight: Mutex<InflightMap>,
|
||||
pub dnssec_enabled: bool,
|
||||
pub dnssec_strict: bool,
|
||||
}
|
||||
|
||||
/// Transport-agnostic DNS resolution. Runs the full pipeline (overrides, blocklist,
|
||||
/// cache, upstream, DNSSEC) and returns the serialized response in a buffer.
|
||||
/// Callers use `.filled()` to get the response bytes without heap allocation.
|
||||
/// Callers are responsible for parsing the incoming buffer into a `DnsPacket`
|
||||
/// (and logging parse errors) before calling this function.
|
||||
pub async fn resolve_query(
|
||||
query: DnsPacket,
|
||||
pub async fn handle_query(
|
||||
mut buffer: BytePacketBuffer,
|
||||
src_addr: SocketAddr,
|
||||
ctx: &ServerCtx,
|
||||
) -> crate::Result<BytePacketBuffer> {
|
||||
) -> crate::Result<()> {
|
||||
let start = Instant::now();
|
||||
|
||||
let query = match DnsPacket::from_buffer(&mut buffer) {
|
||||
Ok(packet) => packet,
|
||||
Err(e) => {
|
||||
warn!("{} | PARSE ERROR | {}", src_addr, e);
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let (qname, qtype) = match query.questions.first() {
|
||||
Some(q) => (q.name.clone(), q.qtype),
|
||||
None => return Err("empty question section".into()),
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
// Pipeline: overrides -> .tld interception -> blocklist -> local zones -> cache -> upstream
|
||||
@@ -90,13 +86,18 @@ pub async fn resolve_query(
|
||||
} else if qname == "localhost" || qname.ends_with(".localhost") {
|
||||
// RFC 6761: .localhost always resolves to loopback
|
||||
let mut resp = DnsPacket::response_from(&query, ResultCode::NOERROR);
|
||||
resp.answers.push(sinkhole_record(
|
||||
&qname,
|
||||
qtype,
|
||||
std::net::Ipv4Addr::LOCALHOST,
|
||||
std::net::Ipv6Addr::LOCALHOST,
|
||||
300,
|
||||
));
|
||||
match qtype {
|
||||
QueryType::AAAA => resp.answers.push(DnsRecord::AAAA {
|
||||
domain: qname.clone(),
|
||||
addr: std::net::Ipv6Addr::LOCALHOST,
|
||||
ttl: 300,
|
||||
}),
|
||||
_ => resp.answers.push(DnsRecord::A {
|
||||
domain: qname.clone(),
|
||||
addr: std::net::Ipv4Addr::LOCALHOST,
|
||||
ttl: 300,
|
||||
}),
|
||||
}
|
||||
(resp, QueryPath::Local, DnssecStatus::Indeterminate)
|
||||
} else if is_special_use_domain(&qname) {
|
||||
// RFC 6761/8880: private PTR, DDR, NAT64 — answer locally
|
||||
@@ -105,17 +106,12 @@ pub async fn resolve_query(
|
||||
} else if !ctx.proxy_tld_suffix.is_empty()
|
||||
&& (qname.ends_with(&ctx.proxy_tld_suffix) || qname == ctx.proxy_tld)
|
||||
{
|
||||
// Resolve .numa: remote clients get LAN IP (can't reach 127.0.0.1), local get loopback
|
||||
// Resolve .numa: local services → 127.0.0.1, LAN peers → peer IP
|
||||
let service_name = qname.strip_suffix(&ctx.proxy_tld_suffix).unwrap_or(&qname);
|
||||
let is_remote = !src_addr.ip().is_loopback();
|
||||
let resolve_ip = {
|
||||
let local = ctx.services.lock().unwrap();
|
||||
if local.lookup(service_name).is_some() {
|
||||
if is_remote {
|
||||
*ctx.lan_ip.lock().unwrap()
|
||||
} else {
|
||||
std::net::Ipv4Addr::LOCALHOST
|
||||
}
|
||||
std::net::Ipv4Addr::LOCALHOST
|
||||
} else {
|
||||
let mut peers = ctx.lan_peers.lock().unwrap();
|
||||
peers
|
||||
@@ -127,24 +123,38 @@ pub async fn resolve_query(
|
||||
.unwrap_or(std::net::Ipv4Addr::LOCALHOST)
|
||||
}
|
||||
};
|
||||
let v6 = if resolve_ip == std::net::Ipv4Addr::LOCALHOST {
|
||||
std::net::Ipv6Addr::LOCALHOST
|
||||
} else {
|
||||
resolve_ip.to_ipv6_mapped()
|
||||
};
|
||||
let mut resp = DnsPacket::response_from(&query, ResultCode::NOERROR);
|
||||
resp.answers
|
||||
.push(sinkhole_record(&qname, qtype, resolve_ip, v6, 300));
|
||||
match qtype {
|
||||
QueryType::AAAA => resp.answers.push(DnsRecord::AAAA {
|
||||
domain: qname.clone(),
|
||||
addr: if resolve_ip == std::net::Ipv4Addr::LOCALHOST {
|
||||
std::net::Ipv6Addr::LOCALHOST
|
||||
} else {
|
||||
resolve_ip.to_ipv6_mapped()
|
||||
},
|
||||
ttl: 300,
|
||||
}),
|
||||
_ => resp.answers.push(DnsRecord::A {
|
||||
domain: qname.clone(),
|
||||
addr: resolve_ip,
|
||||
ttl: 300,
|
||||
}),
|
||||
}
|
||||
(resp, QueryPath::Local, DnssecStatus::Indeterminate)
|
||||
} else if ctx.blocklist.read().unwrap().is_blocked(&qname) {
|
||||
let mut resp = DnsPacket::response_from(&query, ResultCode::NOERROR);
|
||||
resp.answers.push(sinkhole_record(
|
||||
&qname,
|
||||
qtype,
|
||||
std::net::Ipv4Addr::UNSPECIFIED,
|
||||
std::net::Ipv6Addr::UNSPECIFIED,
|
||||
60,
|
||||
));
|
||||
match qtype {
|
||||
QueryType::AAAA => resp.answers.push(DnsRecord::AAAA {
|
||||
domain: qname.clone(),
|
||||
addr: std::net::Ipv6Addr::UNSPECIFIED,
|
||||
ttl: 60,
|
||||
}),
|
||||
_ => resp.answers.push(DnsRecord::A {
|
||||
domain: qname.clone(),
|
||||
addr: std::net::Ipv4Addr::UNSPECIFIED,
|
||||
ttl: 60,
|
||||
}),
|
||||
}
|
||||
(resp, QueryPath::Blocked, DnssecStatus::Indeterminate)
|
||||
} else if let Some(records) = ctx.zone_map.get(qname.as_str()).and_then(|m| m.get(&qtype)) {
|
||||
let mut resp = DnsPacket::response_from(&query, ResultCode::NOERROR);
|
||||
@@ -159,20 +169,20 @@ pub async fn resolve_query(
|
||||
resp.header.authed_data = true;
|
||||
}
|
||||
(resp, QueryPath::Cached, cached_dnssec)
|
||||
} else if let Some(fwd_addr) =
|
||||
crate::system_dns::match_forwarding_rule(&qname, &ctx.forwarding_rules)
|
||||
{
|
||||
// Conditional forwarding takes priority over recursive mode
|
||||
// (e.g. Tailscale .ts.net, VPC private zones)
|
||||
let upstream = Upstream::Udp(fwd_addr);
|
||||
match forward_query(&query, &upstream, ctx.timeout).await {
|
||||
Ok(resp) => {
|
||||
ctx.cache.write().unwrap().insert(&qname, qtype, &resp);
|
||||
(resp, QueryPath::Forwarded, DnssecStatus::Indeterminate)
|
||||
}
|
||||
} else if ctx.upstream_mode == UpstreamMode::Recursive {
|
||||
match crate::recursive::resolve_recursive(
|
||||
&qname,
|
||||
qtype,
|
||||
&ctx.cache,
|
||||
&query,
|
||||
&ctx.root_hints,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(resp) => (resp, QueryPath::Recursive, DnssecStatus::Indeterminate),
|
||||
Err(e) => {
|
||||
error!(
|
||||
"{} | {:?} {} | FORWARD ERROR | {}",
|
||||
"{} | {:?} {} | RECURSIVE ERROR | {}",
|
||||
src_addr, qtype, qname, e
|
||||
);
|
||||
(
|
||||
@@ -182,31 +192,6 @@ pub async fn resolve_query(
|
||||
)
|
||||
}
|
||||
}
|
||||
} else if ctx.upstream_mode == UpstreamMode::Recursive {
|
||||
let key = (qname.clone(), qtype);
|
||||
let (resp, path, err) = resolve_coalesced(&ctx.inflight, key, &query, || {
|
||||
crate::recursive::resolve_recursive(
|
||||
&qname,
|
||||
qtype,
|
||||
&ctx.cache,
|
||||
&query,
|
||||
&ctx.root_hints,
|
||||
&ctx.srtt,
|
||||
)
|
||||
})
|
||||
.await;
|
||||
if path == QueryPath::Coalesced {
|
||||
debug!("{} | {:?} {} | COALESCED", src_addr, qtype, qname);
|
||||
} else if path == QueryPath::UpstreamError {
|
||||
error!(
|
||||
"{} | {:?} {} | RECURSIVE ERROR | {}",
|
||||
src_addr,
|
||||
qtype,
|
||||
qname,
|
||||
err.as_deref().unwrap_or("leader failed")
|
||||
);
|
||||
}
|
||||
(resp, path, DnssecStatus::Indeterminate)
|
||||
} else {
|
||||
let upstream =
|
||||
match crate::system_dns::match_forwarding_rule(&qname, &ctx.forwarding_rules) {
|
||||
@@ -241,8 +226,7 @@ pub async fn resolve_query(
|
||||
let mut dnssec = dnssec;
|
||||
if ctx.dnssec_enabled && path == QueryPath::Recursive {
|
||||
let (status, vstats) =
|
||||
crate::dnssec::validate_response(&response, &ctx.cache, &ctx.root_hints, &ctx.srtt)
|
||||
.await;
|
||||
crate::dnssec::validate_response(&response, &ctx.cache, &ctx.root_hints).await;
|
||||
|
||||
debug!(
|
||||
"DNSSEC | {} | {:?} | {}ms | dnskey_hit={} dnskey_fetch={} ds_hit={} ds_fetch={}",
|
||||
@@ -303,17 +287,17 @@ pub async fn resolve_query(
|
||||
response.resources.len(),
|
||||
);
|
||||
|
||||
// Serialize response
|
||||
// TODO: TC bit is UDP-specific; DoT connections could carry up to 65535 bytes.
|
||||
// Once BytePacketBuffer supports larger buffers, skip truncation for TCP/TLS.
|
||||
let mut resp_buffer = BytePacketBuffer::new();
|
||||
if response.write(&mut resp_buffer).is_err() {
|
||||
// Response too large — set TC bit and send header + question only
|
||||
// Response too large for UDP — set TC bit and send header + question only
|
||||
debug!("response too large, setting TC bit for {}", qname);
|
||||
let mut tc_response = DnsPacket::response_from(&query, response.header.rescode);
|
||||
tc_response.header.truncated_message = true;
|
||||
resp_buffer = BytePacketBuffer::new();
|
||||
tc_response.write(&mut resp_buffer)?;
|
||||
let mut tc_buffer = BytePacketBuffer::new();
|
||||
tc_response.write(&mut tc_buffer)?;
|
||||
ctx.socket.send_to(tc_buffer.filled(), src_addr).await?;
|
||||
} else {
|
||||
ctx.socket.send_to(resp_buffer.filled(), src_addr).await?;
|
||||
}
|
||||
|
||||
// Record stats and query log
|
||||
@@ -336,30 +320,6 @@ pub async fn resolve_query(
|
||||
dnssec,
|
||||
});
|
||||
|
||||
Ok(resp_buffer)
|
||||
}
|
||||
|
||||
/// Handle a DNS query received over UDP. Thin wrapper around resolve_query.
|
||||
pub async fn handle_query(
|
||||
mut buffer: BytePacketBuffer,
|
||||
src_addr: SocketAddr,
|
||||
ctx: &ServerCtx,
|
||||
) -> crate::Result<()> {
|
||||
let query = match DnsPacket::from_buffer(&mut buffer) {
|
||||
Ok(packet) => packet,
|
||||
Err(e) => {
|
||||
warn!("{} | PARSE ERROR | {}", src_addr, e);
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
match resolve_query(query, src_addr, ctx).await {
|
||||
Ok(resp_buffer) => {
|
||||
ctx.socket.send_to(resp_buffer.filled(), src_addr).await?;
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("{} | RESOLVE ERROR | {}", src_addr, e);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -406,110 +366,7 @@ fn is_special_use_domain(qname: &str) -> bool {
|
||||
return true;
|
||||
}
|
||||
// NAT64 (RFC 8880)
|
||||
if qname == "ipv4only.arpa" {
|
||||
return true;
|
||||
}
|
||||
// RFC 6762: .local is reserved for mDNS — never forward to upstream
|
||||
qname == "local" || qname.ends_with(".local")
|
||||
}
|
||||
|
||||
fn sinkhole_record(
|
||||
domain: &str,
|
||||
qtype: QueryType,
|
||||
v4: std::net::Ipv4Addr,
|
||||
v6: std::net::Ipv6Addr,
|
||||
ttl: u32,
|
||||
) -> DnsRecord {
|
||||
match qtype {
|
||||
QueryType::AAAA => DnsRecord::AAAA {
|
||||
domain: domain.to_string(),
|
||||
addr: v6,
|
||||
ttl,
|
||||
},
|
||||
_ => DnsRecord::A {
|
||||
domain: domain.to_string(),
|
||||
addr: v4,
|
||||
ttl,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
enum Disposition {
|
||||
Leader(broadcast::Sender<Option<DnsPacket>>),
|
||||
Follower(broadcast::Receiver<Option<DnsPacket>>),
|
||||
}
|
||||
|
||||
fn acquire_inflight(inflight: &Mutex<InflightMap>, key: (String, QueryType)) -> Disposition {
|
||||
let mut map = inflight.lock().unwrap();
|
||||
if let Some(tx) = map.get(&key) {
|
||||
Disposition::Follower(tx.subscribe())
|
||||
} else {
|
||||
let (tx, _) = broadcast::channel::<Option<DnsPacket>>(1);
|
||||
map.insert(key, tx.clone());
|
||||
Disposition::Leader(tx)
|
||||
}
|
||||
}
|
||||
|
||||
/// Run a resolve function with in-flight coalescing. Multiple concurrent calls
|
||||
/// for the same key share a single resolution — the first caller (leader)
|
||||
/// executes `resolve_fn`, and followers wait for the broadcast result.
|
||||
async fn resolve_coalesced<F, Fut>(
|
||||
inflight: &Mutex<InflightMap>,
|
||||
key: (String, QueryType),
|
||||
query: &DnsPacket,
|
||||
resolve_fn: F,
|
||||
) -> (DnsPacket, QueryPath, Option<String>)
|
||||
where
|
||||
F: FnOnce() -> Fut,
|
||||
Fut: std::future::Future<Output = crate::Result<DnsPacket>>,
|
||||
{
|
||||
let disposition = acquire_inflight(inflight, key.clone());
|
||||
|
||||
match disposition {
|
||||
Disposition::Follower(mut rx) => match rx.recv().await {
|
||||
Ok(Some(mut resp)) => {
|
||||
resp.header.id = query.header.id;
|
||||
(resp, QueryPath::Coalesced, None)
|
||||
}
|
||||
_ => (
|
||||
DnsPacket::response_from(query, ResultCode::SERVFAIL),
|
||||
QueryPath::UpstreamError,
|
||||
None,
|
||||
),
|
||||
},
|
||||
Disposition::Leader(tx) => {
|
||||
let guard = InflightGuard { inflight, key };
|
||||
let result = resolve_fn().await;
|
||||
drop(guard);
|
||||
|
||||
match result {
|
||||
Ok(resp) => {
|
||||
let _ = tx.send(Some(resp.clone()));
|
||||
(resp, QueryPath::Recursive, None)
|
||||
}
|
||||
Err(e) => {
|
||||
let _ = tx.send(None);
|
||||
let err_msg = e.to_string();
|
||||
(
|
||||
DnsPacket::response_from(query, ResultCode::SERVFAIL),
|
||||
QueryPath::UpstreamError,
|
||||
Some(err_msg),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct InflightGuard<'a> {
|
||||
inflight: &'a Mutex<InflightMap>,
|
||||
key: (String, QueryType),
|
||||
}
|
||||
|
||||
impl Drop for InflightGuard<'_> {
|
||||
fn drop(&mut self) {
|
||||
self.inflight.lock().unwrap().remove(&self.key);
|
||||
}
|
||||
qname == "ipv4only.arpa"
|
||||
}
|
||||
|
||||
fn special_use_response(query: &DnsPacket, qname: &str, qtype: QueryType) -> DnsPacket {
|
||||
@@ -545,391 +402,3 @@ fn special_use_response(query: &DnsPacket, qname: &str, qtype: QueryType) -> Dns
|
||||
DnsPacket::response_from(query, ResultCode::NXDOMAIN)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::collections::HashMap;
|
||||
use std::net::Ipv4Addr;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use tokio::sync::broadcast;
|
||||
|
||||
// ---- InflightGuard unit tests ----
|
||||
|
||||
#[test]
|
||||
fn inflight_guard_removes_key_on_drop() {
|
||||
let map: Mutex<InflightMap> = Mutex::new(HashMap::new());
|
||||
let key = ("example.com".to_string(), QueryType::A);
|
||||
let (tx, _) = broadcast::channel::<Option<DnsPacket>>(1);
|
||||
map.lock().unwrap().insert(key.clone(), tx);
|
||||
|
||||
assert_eq!(map.lock().unwrap().len(), 1);
|
||||
{
|
||||
let _guard = InflightGuard {
|
||||
inflight: &map,
|
||||
key: key.clone(),
|
||||
};
|
||||
} // guard dropped here
|
||||
assert!(map.lock().unwrap().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn inflight_guard_only_removes_own_key() {
|
||||
let map: Mutex<InflightMap> = Mutex::new(HashMap::new());
|
||||
let key_a = ("a.com".to_string(), QueryType::A);
|
||||
let key_b = ("b.com".to_string(), QueryType::A);
|
||||
let (tx_a, _) = broadcast::channel::<Option<DnsPacket>>(1);
|
||||
let (tx_b, _) = broadcast::channel::<Option<DnsPacket>>(1);
|
||||
map.lock().unwrap().insert(key_a.clone(), tx_a);
|
||||
map.lock().unwrap().insert(key_b.clone(), tx_b);
|
||||
|
||||
{
|
||||
let _guard = InflightGuard {
|
||||
inflight: &map,
|
||||
key: key_a,
|
||||
};
|
||||
}
|
||||
let m = map.lock().unwrap();
|
||||
assert_eq!(m.len(), 1);
|
||||
assert!(m.contains_key(&key_b));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn inflight_guard_same_domain_different_qtype_independent() {
|
||||
let map: Mutex<InflightMap> = Mutex::new(HashMap::new());
|
||||
let key_a = ("example.com".to_string(), QueryType::A);
|
||||
let key_aaaa = ("example.com".to_string(), QueryType::AAAA);
|
||||
let (tx_a, _) = broadcast::channel::<Option<DnsPacket>>(1);
|
||||
let (tx_aaaa, _) = broadcast::channel::<Option<DnsPacket>>(1);
|
||||
map.lock().unwrap().insert(key_a.clone(), tx_a);
|
||||
map.lock().unwrap().insert(key_aaaa.clone(), tx_aaaa);
|
||||
|
||||
{
|
||||
let _guard = InflightGuard {
|
||||
inflight: &map,
|
||||
key: key_a,
|
||||
};
|
||||
}
|
||||
let m = map.lock().unwrap();
|
||||
assert_eq!(m.len(), 1);
|
||||
assert!(m.contains_key(&key_aaaa));
|
||||
}
|
||||
|
||||
// ---- Coalescing disposition tests (via acquire_inflight) ----
|
||||
|
||||
#[test]
|
||||
fn first_caller_becomes_leader() {
|
||||
let map: Mutex<InflightMap> = Mutex::new(HashMap::new());
|
||||
let key = ("test.com".to_string(), QueryType::A);
|
||||
|
||||
let d = acquire_inflight(&map, key.clone());
|
||||
assert!(matches!(d, Disposition::Leader(_)));
|
||||
assert_eq!(map.lock().unwrap().len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn second_caller_becomes_follower() {
|
||||
let map: Mutex<InflightMap> = Mutex::new(HashMap::new());
|
||||
let key = ("test.com".to_string(), QueryType::A);
|
||||
|
||||
let _leader = acquire_inflight(&map, key.clone());
|
||||
let follower = acquire_inflight(&map, key);
|
||||
assert!(matches!(follower, Disposition::Follower(_)));
|
||||
// Map still has exactly 1 entry — follower subscribes, doesn't insert
|
||||
assert_eq!(map.lock().unwrap().len(), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn leader_broadcast_reaches_follower() {
|
||||
let map: Mutex<InflightMap> = Mutex::new(HashMap::new());
|
||||
let key = ("test.com".to_string(), QueryType::A);
|
||||
|
||||
let leader = acquire_inflight(&map, key.clone());
|
||||
let follower = acquire_inflight(&map, key);
|
||||
|
||||
let tx = match leader {
|
||||
Disposition::Leader(tx) => tx,
|
||||
_ => panic!("expected leader"),
|
||||
};
|
||||
let mut rx = match follower {
|
||||
Disposition::Follower(rx) => rx,
|
||||
_ => panic!("expected follower"),
|
||||
};
|
||||
|
||||
let mut resp = DnsPacket::new();
|
||||
resp.header.id = 42;
|
||||
resp.answers.push(DnsRecord::A {
|
||||
domain: "test.com".into(),
|
||||
addr: Ipv4Addr::new(1, 2, 3, 4),
|
||||
ttl: 300,
|
||||
});
|
||||
let _ = tx.send(Some(resp));
|
||||
|
||||
let received = rx.recv().await.unwrap().unwrap();
|
||||
assert_eq!(received.header.id, 42);
|
||||
assert_eq!(received.answers.len(), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn leader_none_signals_failure_to_follower() {
|
||||
let map: Mutex<InflightMap> = Mutex::new(HashMap::new());
|
||||
let key = ("test.com".to_string(), QueryType::A);
|
||||
|
||||
let leader = acquire_inflight(&map, key.clone());
|
||||
let follower = acquire_inflight(&map, key);
|
||||
|
||||
let tx = match leader {
|
||||
Disposition::Leader(tx) => tx,
|
||||
_ => panic!("expected leader"),
|
||||
};
|
||||
let mut rx = match follower {
|
||||
Disposition::Follower(rx) => rx,
|
||||
_ => panic!("expected follower"),
|
||||
};
|
||||
|
||||
let _ = tx.send(None);
|
||||
assert!(rx.recv().await.unwrap().is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn multiple_followers_all_receive_via_acquire() {
|
||||
let map: Mutex<InflightMap> = Mutex::new(HashMap::new());
|
||||
let key = ("multi.com".to_string(), QueryType::A);
|
||||
|
||||
let leader = acquire_inflight(&map, key.clone());
|
||||
let f1 = acquire_inflight(&map, key.clone());
|
||||
let f2 = acquire_inflight(&map, key.clone());
|
||||
let f3 = acquire_inflight(&map, key);
|
||||
|
||||
let tx = match leader {
|
||||
Disposition::Leader(tx) => tx,
|
||||
_ => panic!("expected leader"),
|
||||
};
|
||||
|
||||
let mut resp = DnsPacket::new();
|
||||
resp.answers.push(DnsRecord::A {
|
||||
domain: "multi.com".into(),
|
||||
addr: Ipv4Addr::new(10, 0, 0, 1),
|
||||
ttl: 60,
|
||||
});
|
||||
let _ = tx.send(Some(resp));
|
||||
|
||||
for f in [f1, f2, f3] {
|
||||
let mut rx = match f {
|
||||
Disposition::Follower(rx) => rx,
|
||||
_ => panic!("expected follower"),
|
||||
};
|
||||
let r = rx.recv().await.unwrap().unwrap();
|
||||
assert_eq!(r.answers.len(), 1);
|
||||
}
|
||||
}
|
||||
|
||||
// ---- Integration: resolve_coalesced with mock futures ----
|
||||
|
||||
fn mock_response(domain: &str) -> DnsPacket {
|
||||
let mut resp = DnsPacket::new();
|
||||
resp.header.response = true;
|
||||
resp.header.rescode = ResultCode::NOERROR;
|
||||
resp.answers.push(DnsRecord::A {
|
||||
domain: domain.to_string(),
|
||||
addr: Ipv4Addr::new(10, 0, 0, 1),
|
||||
ttl: 300,
|
||||
});
|
||||
resp
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn concurrent_queries_coalesce_to_single_resolution() {
|
||||
let inflight = Arc::new(Mutex::new(HashMap::new()));
|
||||
let resolve_count = Arc::new(std::sync::atomic::AtomicU32::new(0));
|
||||
|
||||
let mut handles = Vec::new();
|
||||
for i in 0..5u16 {
|
||||
let count = resolve_count.clone();
|
||||
let inf = inflight.clone();
|
||||
let key = ("coalesce.test".to_string(), QueryType::A);
|
||||
let query = DnsPacket::query(100 + i, "coalesce.test", QueryType::A);
|
||||
handles.push(tokio::spawn(async move {
|
||||
resolve_coalesced(&inf, key, &query, || async {
|
||||
count.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
tokio::time::sleep(Duration::from_millis(200)).await;
|
||||
Ok(mock_response("coalesce.test"))
|
||||
})
|
||||
.await
|
||||
}));
|
||||
}
|
||||
|
||||
let mut paths = Vec::new();
|
||||
for h in handles {
|
||||
let (_, path, _) = h.await.unwrap();
|
||||
paths.push(path);
|
||||
}
|
||||
|
||||
let actual = resolve_count.load(std::sync::atomic::Ordering::Relaxed);
|
||||
assert_eq!(actual, 1, "expected 1 resolution, got {}", actual);
|
||||
|
||||
let recursive = paths.iter().filter(|p| **p == QueryPath::Recursive).count();
|
||||
let coalesced = paths.iter().filter(|p| **p == QueryPath::Coalesced).count();
|
||||
assert_eq!(recursive, 1, "expected 1 RECURSIVE, got {}", recursive);
|
||||
assert_eq!(coalesced, 4, "expected 4 COALESCED, got {}", coalesced);
|
||||
|
||||
assert!(inflight.lock().unwrap().is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn different_qtypes_not_coalesced() {
|
||||
let inflight = Arc::new(Mutex::new(HashMap::new()));
|
||||
let resolve_count = Arc::new(std::sync::atomic::AtomicU32::new(0));
|
||||
|
||||
let inf1 = inflight.clone();
|
||||
let inf2 = inflight.clone();
|
||||
let count1 = resolve_count.clone();
|
||||
let count2 = resolve_count.clone();
|
||||
|
||||
let query_a = DnsPacket::query(200, "same.domain", QueryType::A);
|
||||
let query_aaaa = DnsPacket::query(201, "same.domain", QueryType::AAAA);
|
||||
|
||||
let h1 = tokio::spawn(async move {
|
||||
resolve_coalesced(
|
||||
&inf1,
|
||||
("same.domain".to_string(), QueryType::A),
|
||||
&query_a,
|
||||
|| async {
|
||||
count1.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
Ok(mock_response("same.domain"))
|
||||
},
|
||||
)
|
||||
.await
|
||||
});
|
||||
let h2 = tokio::spawn(async move {
|
||||
resolve_coalesced(
|
||||
&inf2,
|
||||
("same.domain".to_string(), QueryType::AAAA),
|
||||
&query_aaaa,
|
||||
|| async {
|
||||
count2.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
Ok(mock_response("same.domain"))
|
||||
},
|
||||
)
|
||||
.await
|
||||
});
|
||||
|
||||
let (_, path1, _) = h1.await.unwrap();
|
||||
let (_, path2, _) = h2.await.unwrap();
|
||||
|
||||
let actual = resolve_count.load(std::sync::atomic::Ordering::Relaxed);
|
||||
assert_eq!(actual, 2, "A and AAAA should each resolve, got {}", actual);
|
||||
assert_eq!(path1, QueryPath::Recursive);
|
||||
assert_eq!(path2, QueryPath::Recursive);
|
||||
|
||||
assert!(inflight.lock().unwrap().is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn inflight_map_cleaned_after_error() {
|
||||
let inflight: Mutex<InflightMap> = Mutex::new(HashMap::new());
|
||||
let query = DnsPacket::query(300, "will-fail.test", QueryType::A);
|
||||
|
||||
let (_, path, _) = resolve_coalesced(
|
||||
&inflight,
|
||||
("will-fail.test".to_string(), QueryType::A),
|
||||
&query,
|
||||
|| async { Err::<DnsPacket, _>("upstream timeout".into()) },
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(path, QueryPath::UpstreamError);
|
||||
assert!(inflight.lock().unwrap().is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn follower_gets_servfail_when_leader_fails() {
|
||||
let inflight = Arc::new(Mutex::new(HashMap::new()));
|
||||
|
||||
let mut handles = Vec::new();
|
||||
for i in 0..3u16 {
|
||||
let inf = inflight.clone();
|
||||
let query = DnsPacket::query(400 + i, "fail.test", QueryType::A);
|
||||
handles.push(tokio::spawn(async move {
|
||||
resolve_coalesced(
|
||||
&inf,
|
||||
("fail.test".to_string(), QueryType::A),
|
||||
&query,
|
||||
|| async {
|
||||
tokio::time::sleep(Duration::from_millis(200)).await;
|
||||
Err::<DnsPacket, _>("upstream error".into())
|
||||
},
|
||||
)
|
||||
.await
|
||||
}));
|
||||
}
|
||||
|
||||
let mut paths = Vec::new();
|
||||
for h in handles {
|
||||
let (resp, path, _) = h.await.unwrap();
|
||||
assert_eq!(resp.header.rescode, ResultCode::SERVFAIL);
|
||||
assert_eq!(
|
||||
resp.questions.len(),
|
||||
1,
|
||||
"SERVFAIL must echo question section"
|
||||
);
|
||||
assert_eq!(resp.questions[0].name, "fail.test");
|
||||
paths.push(path);
|
||||
}
|
||||
|
||||
let errors = paths
|
||||
.iter()
|
||||
.filter(|p| **p == QueryPath::UpstreamError)
|
||||
.count();
|
||||
assert_eq!(errors, 3, "all 3 should be UpstreamError, got {}", errors);
|
||||
|
||||
assert!(inflight.lock().unwrap().is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn servfail_leader_includes_question_section() {
|
||||
let inflight: Mutex<InflightMap> = Mutex::new(HashMap::new());
|
||||
let query = DnsPacket::query(500, "question.test", QueryType::A);
|
||||
|
||||
let (resp, _, _) = resolve_coalesced(
|
||||
&inflight,
|
||||
("question.test".to_string(), QueryType::A),
|
||||
&query,
|
||||
|| async { Err::<DnsPacket, _>("fail".into()) },
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(resp.header.rescode, ResultCode::SERVFAIL);
|
||||
assert_eq!(
|
||||
resp.questions.len(),
|
||||
1,
|
||||
"SERVFAIL must echo question section"
|
||||
);
|
||||
assert_eq!(resp.questions[0].name, "question.test");
|
||||
assert_eq!(resp.questions[0].qtype, QueryType::A);
|
||||
assert_eq!(resp.header.id, 500);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn leader_error_preserves_message() {
|
||||
let inflight: Mutex<InflightMap> = Mutex::new(HashMap::new());
|
||||
let query = DnsPacket::query(700, "err-msg.test", QueryType::A);
|
||||
|
||||
let (_, path, err) = resolve_coalesced(
|
||||
&inflight,
|
||||
("err-msg.test".to_string(), QueryType::A),
|
||||
&query,
|
||||
|| async { Err::<DnsPacket, _>("connection refused by upstream".into()) },
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(path, QueryPath::UpstreamError);
|
||||
assert_eq!(
|
||||
err.as_deref(),
|
||||
Some("connection refused by upstream"),
|
||||
"error message must be preserved for logging"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ use crate::cache::{DnsCache, DnssecStatus};
|
||||
use crate::packet::DnsPacket;
|
||||
use crate::question::QueryType;
|
||||
use crate::record::DnsRecord;
|
||||
use crate::srtt::SrttCache;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ValidationStats {
|
||||
@@ -65,7 +64,6 @@ pub async fn validate_response(
|
||||
response: &DnsPacket,
|
||||
cache: &RwLock<DnsCache>,
|
||||
root_hints: &[std::net::SocketAddr],
|
||||
srtt: &RwLock<SrttCache>,
|
||||
) -> (DnssecStatus, ValidationStats) {
|
||||
let start = Instant::now();
|
||||
let stats = Mutex::new(ValidationStats::default());
|
||||
@@ -97,7 +95,7 @@ pub async fn validate_response(
|
||||
}
|
||||
}
|
||||
for zone in &signer_zones {
|
||||
fetch_dnskeys(zone, cache, root_hints, srtt, &stats).await;
|
||||
fetch_dnskeys(zone, cache, root_hints, &stats).await;
|
||||
}
|
||||
|
||||
// Group answer records into RRsets (by domain + type, excluding RRSIGs)
|
||||
@@ -134,8 +132,7 @@ pub async fn validate_response(
|
||||
..
|
||||
} = rrsig
|
||||
{
|
||||
let dnskey_response =
|
||||
fetch_dnskeys(signer_name, cache, root_hints, srtt, &stats).await;
|
||||
let dnskey_response = fetch_dnskeys(signer_name, cache, root_hints, &stats).await;
|
||||
let dnskeys: Vec<&DnsRecord> = dnskey_response
|
||||
.iter()
|
||||
.filter(|r| matches!(r, DnsRecord::DNSKEY { .. }))
|
||||
@@ -209,7 +206,6 @@ pub async fn validate_response(
|
||||
&dnskey_response,
|
||||
cache,
|
||||
root_hints,
|
||||
srtt,
|
||||
trust_anchors,
|
||||
0,
|
||||
&stats,
|
||||
@@ -280,13 +276,11 @@ pub async fn validate_response(
|
||||
|
||||
/// Walk the chain of trust from zone DNSKEY up to root trust anchor.
|
||||
/// `zone_records` contains both DNSKEY and RRSIG records from the DNSKEY response.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn validate_chain<'a>(
|
||||
zone: &'a str,
|
||||
zone_records: &'a [DnsRecord],
|
||||
cache: &'a RwLock<DnsCache>,
|
||||
root_hints: &'a [std::net::SocketAddr],
|
||||
srtt: &'a RwLock<SrttCache>,
|
||||
trust_anchors: &'a [DnsRecord],
|
||||
depth: u8,
|
||||
stats: &'a Mutex<ValidationStats>,
|
||||
@@ -349,7 +343,7 @@ fn validate_chain<'a>(
|
||||
return DnssecStatus::Indeterminate;
|
||||
}
|
||||
let parent = parent_zone(zone);
|
||||
let ds_records = fetch_ds(zone, cache, root_hints, srtt, stats).await;
|
||||
let ds_records = fetch_ds(zone, cache, root_hints, stats).await;
|
||||
|
||||
if ds_records.is_empty() {
|
||||
debug!("dnssec: no DS for zone '{}' at parent '{}'", zone, parent);
|
||||
@@ -383,7 +377,7 @@ fn validate_chain<'a>(
|
||||
|
||||
// Walk up: validate the parent's DNSKEY
|
||||
trace!("dnssec: fetching parent DNSKEY for '{}'", parent);
|
||||
let parent_records = fetch_dnskeys(&parent, cache, root_hints, srtt, stats).await;
|
||||
let parent_records = fetch_dnskeys(&parent, cache, root_hints, stats).await;
|
||||
if parent_records.is_empty() {
|
||||
debug!("dnssec: no parent DNSKEY for '{}' — Indeterminate", parent);
|
||||
return DnssecStatus::Indeterminate;
|
||||
@@ -394,7 +388,6 @@ fn validate_chain<'a>(
|
||||
&parent_records,
|
||||
cache,
|
||||
root_hints,
|
||||
srtt,
|
||||
trust_anchors,
|
||||
depth + 1,
|
||||
stats,
|
||||
@@ -467,7 +460,6 @@ async fn fetch_dnskeys(
|
||||
zone: &str,
|
||||
cache: &RwLock<DnsCache>,
|
||||
root_hints: &[std::net::SocketAddr],
|
||||
srtt: &RwLock<SrttCache>,
|
||||
stats: &Mutex<ValidationStats>,
|
||||
) -> Vec<DnsRecord> {
|
||||
if let Some(pkt) = cache.read().unwrap().lookup(zone, QueryType::DNSKEY) {
|
||||
@@ -483,8 +475,7 @@ async fn fetch_dnskeys(
|
||||
trace!("dnssec: fetch_dnskeys('{}') cache miss — resolving", zone);
|
||||
stats.lock().unwrap().dnskey_fetches += 1;
|
||||
if let Ok(pkt) =
|
||||
crate::recursive::resolve_iterative(zone, QueryType::DNSKEY, cache, root_hints, srtt, 0, 0)
|
||||
.await
|
||||
crate::recursive::resolve_iterative(zone, QueryType::DNSKEY, cache, root_hints, 0, 0).await
|
||||
{
|
||||
cache.write().unwrap().insert(zone, QueryType::DNSKEY, &pkt);
|
||||
return pkt.answers;
|
||||
@@ -497,7 +488,6 @@ async fn fetch_ds(
|
||||
child: &str,
|
||||
cache: &RwLock<DnsCache>,
|
||||
root_hints: &[std::net::SocketAddr],
|
||||
srtt: &RwLock<SrttCache>,
|
||||
stats: &Mutex<ValidationStats>,
|
||||
) -> Vec<DnsRecord> {
|
||||
if let Some(pkt) = cache.read().unwrap().lookup(child, QueryType::DS) {
|
||||
@@ -511,8 +501,7 @@ async fn fetch_ds(
|
||||
|
||||
stats.lock().unwrap().ds_fetches += 1;
|
||||
if let Ok(pkt) =
|
||||
crate::recursive::resolve_iterative(child, QueryType::DS, cache, root_hints, srtt, 0, 0)
|
||||
.await
|
||||
crate::recursive::resolve_iterative(child, QueryType::DS, cache, root_hints, 0, 0).await
|
||||
{
|
||||
cache.write().unwrap().insert(child, QueryType::DS, &pkt);
|
||||
return pkt
|
||||
|
||||
542
src/dot.rs
542
src/dot.rs
@@ -1,542 +0,0 @@
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use log::{debug, error, info, warn};
|
||||
use rustls::ServerConfig;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::sync::Semaphore;
|
||||
use tokio_rustls::TlsAcceptor;
|
||||
|
||||
use crate::buffer::BytePacketBuffer;
|
||||
use crate::config::DotConfig;
|
||||
use crate::ctx::{resolve_query, ServerCtx};
|
||||
use crate::header::ResultCode;
|
||||
use crate::packet::DnsPacket;
|
||||
|
||||
const MAX_CONNECTIONS: usize = 512;
|
||||
const IDLE_TIMEOUT: Duration = Duration::from_secs(30);
|
||||
const HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
const WRITE_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
// Matches BytePacketBuffer::BUF_SIZE — RFC 7858 allows up to 65535 but our
|
||||
// buffer would silently truncate anything larger.
|
||||
const MAX_MSG_LEN: usize = 4096;
|
||||
|
||||
fn dot_alpn() -> Vec<Vec<u8>> {
|
||||
vec![b"dot".to_vec()]
|
||||
}
|
||||
|
||||
/// Build a TLS ServerConfig for DoT from user-provided cert/key PEM files.
|
||||
fn load_tls_config(cert_path: &Path, key_path: &Path) -> crate::Result<Arc<ServerConfig>> {
|
||||
// rustls needs a CryptoProvider installed before ServerConfig::builder().
|
||||
// The proxy's build_tls_config also does this; we repeat it here because
|
||||
// running DoT with user-provided certs while the proxy is disabled would
|
||||
// otherwise panic on first handshake (no default provider).
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
|
||||
let cert_pem = std::fs::read(cert_path)?;
|
||||
let key_pem = std::fs::read(key_path)?;
|
||||
|
||||
let certs: Vec<_> = rustls_pemfile::certs(&mut &cert_pem[..]).collect::<Result<_, _>>()?;
|
||||
let key = rustls_pemfile::private_key(&mut &key_pem[..])?
|
||||
.ok_or("no private key found in key file")?;
|
||||
|
||||
let mut config = ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(certs, key)?;
|
||||
config.alpn_protocols = dot_alpn();
|
||||
|
||||
Ok(Arc::new(config))
|
||||
}
|
||||
|
||||
/// Build a self-signed DoT TLS config. Can't reuse `ctx.tls_config` (the
|
||||
/// proxy's shared config) because DoT needs its own ALPN advertisement.
|
||||
///
|
||||
/// Pass `proxy_tld` itself as a service name so the cert gets an explicit
|
||||
/// `{tld}.{tld}` SAN (e.g. "numa.numa") matching the ServerName that
|
||||
/// setup-phone's mobileconfig sends as SNI. The `*.{tld}` wildcard alone
|
||||
/// is rejected by strict TLS clients under single-label TLDs (per the
|
||||
/// note in tls.rs::generate_service_cert).
|
||||
fn self_signed_tls(ctx: &ServerCtx) -> Option<Arc<ServerConfig>> {
|
||||
let service_names = [ctx.proxy_tld.clone()];
|
||||
match crate::tls::build_tls_config(&ctx.proxy_tld, &service_names, dot_alpn(), &ctx.data_dir) {
|
||||
Ok(cfg) => Some(cfg),
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"DoT: failed to generate self-signed TLS: {} — DoT disabled",
|
||||
e
|
||||
);
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Start the DNS-over-TLS listener (RFC 7858).
|
||||
pub async fn start_dot(ctx: Arc<ServerCtx>, config: &DotConfig) {
|
||||
let tls_config = match (&config.cert_path, &config.key_path) {
|
||||
(Some(cert), Some(key)) => match load_tls_config(cert, key) {
|
||||
Ok(cfg) => cfg,
|
||||
Err(e) => {
|
||||
warn!("DoT: failed to load TLS cert/key: {} — DoT disabled", e);
|
||||
return;
|
||||
}
|
||||
},
|
||||
_ => match self_signed_tls(&ctx) {
|
||||
Some(cfg) => cfg,
|
||||
None => return,
|
||||
},
|
||||
};
|
||||
|
||||
let bind_addr: IpAddr = config
|
||||
.bind_addr
|
||||
.parse()
|
||||
.unwrap_or(IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED));
|
||||
let addr = SocketAddr::new(bind_addr, config.port);
|
||||
let listener = match TcpListener::bind(addr).await {
|
||||
Ok(l) => l,
|
||||
Err(e) => {
|
||||
warn!("DoT: could not bind {} ({}) — DoT disabled", addr, e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
info!("DoT listening on {}", addr);
|
||||
|
||||
accept_loop(listener, TlsAcceptor::from(tls_config), ctx).await;
|
||||
}
|
||||
|
||||
async fn accept_loop(listener: TcpListener, acceptor: TlsAcceptor, ctx: Arc<ServerCtx>) {
|
||||
let semaphore = Arc::new(Semaphore::new(MAX_CONNECTIONS));
|
||||
|
||||
loop {
|
||||
let (tcp_stream, remote_addr) = match listener.accept().await {
|
||||
Ok(conn) => conn,
|
||||
Err(e) => {
|
||||
error!("DoT: TCP accept error: {}", e);
|
||||
// Back off to avoid tight-looping on persistent failures (e.g. fd exhaustion).
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let permit = match semaphore.clone().try_acquire_owned() {
|
||||
Ok(p) => p,
|
||||
Err(_) => {
|
||||
debug!("DoT: connection limit reached, rejecting {}", remote_addr);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let acceptor = acceptor.clone();
|
||||
let ctx = Arc::clone(&ctx);
|
||||
|
||||
tokio::spawn(async move {
|
||||
let _permit = permit; // held until task exits
|
||||
|
||||
let tls_stream =
|
||||
match tokio::time::timeout(HANDSHAKE_TIMEOUT, acceptor.accept(tcp_stream)).await {
|
||||
Ok(Ok(s)) => s,
|
||||
Ok(Err(e)) => {
|
||||
debug!("DoT: TLS handshake failed from {}: {}", remote_addr, e);
|
||||
return;
|
||||
}
|
||||
Err(_) => {
|
||||
debug!("DoT: TLS handshake timeout from {}", remote_addr);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
handle_dot_connection(tls_stream, remote_addr, &ctx).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle a single persistent DoT connection (RFC 7858).
|
||||
/// Reads length-prefixed DNS queries until EOF, idle timeout, or error.
|
||||
async fn handle_dot_connection<S>(mut stream: S, remote_addr: SocketAddr, ctx: &ServerCtx)
|
||||
where
|
||||
S: AsyncReadExt + AsyncWriteExt + Unpin,
|
||||
{
|
||||
loop {
|
||||
// Read 2-byte length prefix (RFC 1035 §4.2.2) with idle timeout
|
||||
let mut len_buf = [0u8; 2];
|
||||
let Ok(Ok(_)) = tokio::time::timeout(IDLE_TIMEOUT, stream.read_exact(&mut len_buf)).await
|
||||
else {
|
||||
break;
|
||||
};
|
||||
let msg_len = u16::from_be_bytes(len_buf) as usize;
|
||||
if msg_len > MAX_MSG_LEN {
|
||||
debug!("DoT: oversized message {} from {}", msg_len, remote_addr);
|
||||
break;
|
||||
}
|
||||
|
||||
let mut buffer = BytePacketBuffer::new();
|
||||
let Ok(Ok(_)) =
|
||||
tokio::time::timeout(IDLE_TIMEOUT, stream.read_exact(&mut buffer.buf[..msg_len])).await
|
||||
else {
|
||||
break;
|
||||
};
|
||||
|
||||
// Parse query up-front so we can echo its question section in SERVFAIL
|
||||
// responses when resolve_query fails.
|
||||
let query = match DnsPacket::from_buffer(&mut buffer) {
|
||||
Ok(q) => q,
|
||||
Err(e) => {
|
||||
warn!("{} | PARSE ERROR | {}", remote_addr, e);
|
||||
// BytePacketBuffer is zero-initialized, so buf[0..2] reads as 0x0000
|
||||
// for sub-2-byte messages — harmless FORMERR with id=0.
|
||||
let query_id = u16::from_be_bytes([buffer.buf[0], buffer.buf[1]]);
|
||||
let mut resp = DnsPacket::new();
|
||||
resp.header.id = query_id;
|
||||
resp.header.response = true;
|
||||
resp.header.rescode = ResultCode::FORMERR;
|
||||
if send_response(&mut stream, &resp, remote_addr)
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
match resolve_query(query.clone(), remote_addr, ctx).await {
|
||||
Ok(resp_buffer) => {
|
||||
if write_framed(&mut stream, resp_buffer.filled())
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("{} | RESOLVE ERROR | {}", remote_addr, e);
|
||||
// SERVFAIL that echoes the original question section.
|
||||
let resp = DnsPacket::response_from(&query, ResultCode::SERVFAIL);
|
||||
if send_response(&mut stream, &resp, remote_addr)
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialize a DNS response and send it framed. Logs serialization failures
|
||||
/// and returns Err so the caller can tear down the connection.
|
||||
async fn send_response<S>(
|
||||
stream: &mut S,
|
||||
resp: &DnsPacket,
|
||||
remote_addr: SocketAddr,
|
||||
) -> std::io::Result<()>
|
||||
where
|
||||
S: AsyncWriteExt + Unpin,
|
||||
{
|
||||
let mut out_buf = BytePacketBuffer::new();
|
||||
if resp.write(&mut out_buf).is_err() {
|
||||
debug!(
|
||||
"DoT: failed to serialize {:?} response for {}",
|
||||
resp.header.rescode, remote_addr
|
||||
);
|
||||
return Err(std::io::Error::other("serialize failed"));
|
||||
}
|
||||
write_framed(stream, out_buf.filled()).await
|
||||
}
|
||||
|
||||
/// Write a DNS message with its 2-byte length prefix, coalesced into one syscall.
|
||||
/// Bounded by WRITE_TIMEOUT so a stalled reader can't indefinitely hold a worker.
|
||||
async fn write_framed<S>(stream: &mut S, msg: &[u8]) -> std::io::Result<()>
|
||||
where
|
||||
S: AsyncWriteExt + Unpin,
|
||||
{
|
||||
let mut out = Vec::with_capacity(2 + msg.len());
|
||||
out.extend_from_slice(&(msg.len() as u16).to_be_bytes());
|
||||
out.extend_from_slice(msg);
|
||||
match tokio::time::timeout(WRITE_TIMEOUT, async {
|
||||
stream.write_all(&out).await?;
|
||||
stream.flush().await
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(result) => result,
|
||||
Err(_) => Err(std::io::Error::other("write timeout")),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Mutex, RwLock};
|
||||
|
||||
use rcgen::{CertificateParams, DnType, KeyPair};
|
||||
use rustls::pki_types::{CertificateDer, PrivateKeyDer, PrivatePkcs8KeyDer, ServerName};
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
|
||||
use crate::buffer::BytePacketBuffer;
|
||||
use crate::header::ResultCode;
|
||||
use crate::packet::DnsPacket;
|
||||
use crate::question::QueryType;
|
||||
use crate::record::DnsRecord;
|
||||
|
||||
/// Generate a self-signed DoT server config and return its leaf cert DER
|
||||
/// so callers can build matching client configs with arbitrary ALPN.
|
||||
fn test_tls_configs() -> (Arc<ServerConfig>, CertificateDer<'static>) {
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
|
||||
// Mirror production self_signed_tls SAN shape: *.numa wildcard plus
|
||||
// explicit numa.numa apex (the ServerName setup-phone uses as SNI).
|
||||
let key_pair = KeyPair::generate().unwrap();
|
||||
let mut params = CertificateParams::default();
|
||||
params
|
||||
.distinguished_name
|
||||
.push(DnType::CommonName, "Numa .numa services");
|
||||
params.subject_alt_names = vec![
|
||||
rcgen::SanType::DnsName("*.numa".try_into().unwrap()),
|
||||
rcgen::SanType::DnsName("numa.numa".try_into().unwrap()),
|
||||
];
|
||||
let cert = params.self_signed(&key_pair).unwrap();
|
||||
|
||||
let cert_der = CertificateDer::from(cert.der().to_vec());
|
||||
let key_der = PrivateKeyDer::Pkcs8(PrivatePkcs8KeyDer::from(key_pair.serialize_der()));
|
||||
|
||||
let mut server_config = ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(vec![cert_der.clone()], key_der)
|
||||
.unwrap();
|
||||
server_config.alpn_protocols = dot_alpn();
|
||||
|
||||
(Arc::new(server_config), cert_der)
|
||||
}
|
||||
|
||||
/// Build a TLS client config that trusts `cert_der` and advertises the
|
||||
/// given ALPN protocols. Used by tests to vary ALPN per test case.
|
||||
fn dot_client(
|
||||
cert_der: &CertificateDer<'static>,
|
||||
alpn: Vec<Vec<u8>>,
|
||||
) -> Arc<rustls::ClientConfig> {
|
||||
let mut root_store = rustls::RootCertStore::empty();
|
||||
root_store.add(cert_der.clone()).unwrap();
|
||||
let mut config = rustls::ClientConfig::builder()
|
||||
.with_root_certificates(root_store)
|
||||
.with_no_client_auth();
|
||||
config.alpn_protocols = alpn;
|
||||
Arc::new(config)
|
||||
}
|
||||
|
||||
/// Spin up a DoT listener with a test TLS config. Returns the bind addr
|
||||
/// and the leaf cert DER so callers can build clients with arbitrary ALPN.
|
||||
/// The upstream is pointed at a bound-but-unresponsive UDP socket we own, so
|
||||
/// any query that escapes to the upstream path times out deterministically
|
||||
/// (SERVFAIL) regardless of what the host has running on port 53.
|
||||
async fn spawn_dot_server() -> (SocketAddr, CertificateDer<'static>) {
|
||||
let (server_tls, cert_der) = test_tls_configs();
|
||||
|
||||
let socket = tokio::net::UdpSocket::bind("127.0.0.1:0").await.unwrap();
|
||||
// Bind an unresponsive upstream and leak it so it lives for the test duration.
|
||||
let blackhole = Box::leak(Box::new(std::net::UdpSocket::bind("127.0.0.1:0").unwrap()));
|
||||
let upstream_addr = blackhole.local_addr().unwrap();
|
||||
let ctx = Arc::new(ServerCtx {
|
||||
socket,
|
||||
zone_map: {
|
||||
let mut m = HashMap::new();
|
||||
let mut inner = HashMap::new();
|
||||
inner.insert(
|
||||
QueryType::A,
|
||||
vec![DnsRecord::A {
|
||||
domain: "dot-test.example".to_string(),
|
||||
addr: std::net::Ipv4Addr::new(10, 0, 0, 1),
|
||||
ttl: 300,
|
||||
}],
|
||||
);
|
||||
m.insert("dot-test.example".to_string(), inner);
|
||||
m
|
||||
},
|
||||
cache: RwLock::new(crate::cache::DnsCache::new(100, 60, 86400)),
|
||||
stats: Mutex::new(crate::stats::ServerStats::new()),
|
||||
overrides: RwLock::new(crate::override_store::OverrideStore::new()),
|
||||
blocklist: RwLock::new(crate::blocklist::BlocklistStore::new()),
|
||||
query_log: Mutex::new(crate::query_log::QueryLog::new(100)),
|
||||
services: Mutex::new(crate::service_store::ServiceStore::new()),
|
||||
lan_peers: Mutex::new(crate::lan::PeerStore::new(90)),
|
||||
forwarding_rules: Vec::new(),
|
||||
upstream: Mutex::new(crate::forward::Upstream::Udp(upstream_addr)),
|
||||
upstream_auto: false,
|
||||
upstream_port: 53,
|
||||
lan_ip: Mutex::new(std::net::Ipv4Addr::LOCALHOST),
|
||||
timeout: Duration::from_millis(200),
|
||||
proxy_tld: "numa".to_string(),
|
||||
proxy_tld_suffix: ".numa".to_string(),
|
||||
lan_enabled: false,
|
||||
config_path: String::new(),
|
||||
config_found: false,
|
||||
config_dir: std::path::PathBuf::from("/tmp"),
|
||||
data_dir: std::path::PathBuf::from("/tmp"),
|
||||
tls_config: Some(arc_swap::ArcSwap::from(server_tls)),
|
||||
upstream_mode: crate::config::UpstreamMode::Forward,
|
||||
root_hints: Vec::new(),
|
||||
srtt: RwLock::new(crate::srtt::SrttCache::new(true)),
|
||||
inflight: Mutex::new(HashMap::new()),
|
||||
dnssec_enabled: false,
|
||||
dnssec_strict: false,
|
||||
});
|
||||
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let addr = listener.local_addr().unwrap();
|
||||
|
||||
let tls_config = Arc::clone(&*ctx.tls_config.as_ref().unwrap().load());
|
||||
let acceptor = TlsAcceptor::from(tls_config);
|
||||
|
||||
tokio::spawn(accept_loop(listener, acceptor, ctx));
|
||||
|
||||
(addr, cert_der)
|
||||
}
|
||||
|
||||
/// Open a TLS connection to the DoT server and return the stream.
|
||||
/// Uses SNI "numa.numa" to mirror what setup-phone's mobileconfig sends.
|
||||
async fn dot_connect(
|
||||
addr: SocketAddr,
|
||||
client_config: &Arc<rustls::ClientConfig>,
|
||||
) -> tokio_rustls::client::TlsStream<tokio::net::TcpStream> {
|
||||
let connector = tokio_rustls::TlsConnector::from(Arc::clone(client_config));
|
||||
let tcp = tokio::net::TcpStream::connect(addr).await.unwrap();
|
||||
connector
|
||||
.connect(ServerName::try_from("numa.numa").unwrap(), tcp)
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Send a DNS query over a DoT stream and read the response.
|
||||
async fn dot_exchange(
|
||||
stream: &mut tokio_rustls::client::TlsStream<tokio::net::TcpStream>,
|
||||
query: &DnsPacket,
|
||||
) -> DnsPacket {
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
query.write(&mut buf).unwrap();
|
||||
let msg = buf.filled();
|
||||
|
||||
let mut out = Vec::with_capacity(2 + msg.len());
|
||||
out.extend_from_slice(&(msg.len() as u16).to_be_bytes());
|
||||
out.extend_from_slice(msg);
|
||||
stream.write_all(&out).await.unwrap();
|
||||
|
||||
let mut len_buf = [0u8; 2];
|
||||
stream.read_exact(&mut len_buf).await.unwrap();
|
||||
let resp_len = u16::from_be_bytes(len_buf) as usize;
|
||||
|
||||
let mut data = vec![0u8; resp_len];
|
||||
stream.read_exact(&mut data).await.unwrap();
|
||||
|
||||
let mut resp_buf = BytePacketBuffer::from_bytes(&data);
|
||||
DnsPacket::from_buffer(&mut resp_buf).unwrap()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn dot_resolves_local_zone() {
|
||||
let (addr, cert_der) = spawn_dot_server().await;
|
||||
let client_config = dot_client(&cert_der, dot_alpn());
|
||||
let mut stream = dot_connect(addr, &client_config).await;
|
||||
|
||||
let query = DnsPacket::query(0x1234, "dot-test.example", QueryType::A);
|
||||
let resp = dot_exchange(&mut stream, &query).await;
|
||||
|
||||
assert_eq!(resp.header.id, 0x1234);
|
||||
assert!(resp.header.response);
|
||||
assert_eq!(resp.header.rescode, ResultCode::NOERROR);
|
||||
assert_eq!(resp.answers.len(), 1);
|
||||
match &resp.answers[0] {
|
||||
DnsRecord::A { domain, addr, ttl } => {
|
||||
assert_eq!(domain, "dot-test.example");
|
||||
assert_eq!(*addr, std::net::Ipv4Addr::new(10, 0, 0, 1));
|
||||
assert_eq!(*ttl, 300);
|
||||
}
|
||||
other => panic!("expected A record, got {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn dot_multiple_queries_on_persistent_connection() {
|
||||
let (addr, cert_der) = spawn_dot_server().await;
|
||||
let client_config = dot_client(&cert_der, dot_alpn());
|
||||
let mut stream = dot_connect(addr, &client_config).await;
|
||||
|
||||
for i in 0..3u16 {
|
||||
let query = DnsPacket::query(0xA000 + i, "dot-test.example", QueryType::A);
|
||||
let resp = dot_exchange(&mut stream, &query).await;
|
||||
assert_eq!(resp.header.id, 0xA000 + i);
|
||||
assert_eq!(resp.header.rescode, ResultCode::NOERROR);
|
||||
assert_eq!(resp.answers.len(), 1);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn dot_nxdomain_for_unknown() {
|
||||
let (addr, cert_der) = spawn_dot_server().await;
|
||||
let client_config = dot_client(&cert_der, dot_alpn());
|
||||
let mut stream = dot_connect(addr, &client_config).await;
|
||||
|
||||
let query = DnsPacket::query(0xBEEF, "nonexistent.test", QueryType::A);
|
||||
let resp = dot_exchange(&mut stream, &query).await;
|
||||
|
||||
assert_eq!(resp.header.id, 0xBEEF);
|
||||
assert!(resp.header.response);
|
||||
// Query goes to the blackhole upstream which never replies → SERVFAIL.
|
||||
// The SERVFAIL response echoes the question section.
|
||||
assert_eq!(resp.header.rescode, ResultCode::SERVFAIL);
|
||||
assert_eq!(resp.questions.len(), 1);
|
||||
assert_eq!(resp.questions[0].name, "nonexistent.test");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn dot_negotiates_alpn() {
|
||||
let (addr, cert_der) = spawn_dot_server().await;
|
||||
let client_config = dot_client(&cert_der, dot_alpn());
|
||||
let stream = dot_connect(addr, &client_config).await;
|
||||
let (_io, conn) = stream.get_ref();
|
||||
assert_eq!(conn.alpn_protocol(), Some(&b"dot"[..]));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn dot_rejects_non_dot_alpn() {
|
||||
// Cross-protocol confusion defense: a client that only offers "h2"
|
||||
// (e.g. an HTTP/2 client mistakenly hitting :853) must not complete
|
||||
// a TLS handshake with the DoT server. Verifies the rustls server
|
||||
// sends `no_application_protocol` rather than silently negotiating.
|
||||
let (addr, cert_der) = spawn_dot_server().await;
|
||||
let client_config = dot_client(&cert_der, vec![b"h2".to_vec()]);
|
||||
let connector = tokio_rustls::TlsConnector::from(client_config);
|
||||
let tcp = tokio::net::TcpStream::connect(addr).await.unwrap();
|
||||
let result = connector
|
||||
.connect(ServerName::try_from("numa.numa").unwrap(), tcp)
|
||||
.await;
|
||||
assert!(
|
||||
result.is_err(),
|
||||
"DoT server must reject ALPN that doesn't include \"dot\""
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn dot_concurrent_connections() {
|
||||
let (addr, cert_der) = spawn_dot_server().await;
|
||||
let client_config = dot_client(&cert_der, dot_alpn());
|
||||
|
||||
let mut handles = Vec::new();
|
||||
for i in 0..5u16 {
|
||||
let cfg = Arc::clone(&client_config);
|
||||
handles.push(tokio::spawn(async move {
|
||||
let mut stream = dot_connect(addr, &cfg).await;
|
||||
let query = DnsPacket::query(0xC000 + i, "dot-test.example", QueryType::A);
|
||||
let resp = dot_exchange(&mut stream, &query).await;
|
||||
assert_eq!(resp.header.id, 0xC000 + i);
|
||||
assert_eq!(resp.header.rescode, ResultCode::NOERROR);
|
||||
assert_eq!(resp.answers.len(), 1);
|
||||
}));
|
||||
}
|
||||
|
||||
for h in handles {
|
||||
h.await.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -141,7 +141,7 @@ mod tests {
|
||||
use std::future::IntoFuture;
|
||||
|
||||
use crate::header::ResultCode;
|
||||
use crate::question::QueryType;
|
||||
use crate::question::{DnsQuestion, QueryType};
|
||||
use crate::record::DnsRecord;
|
||||
|
||||
#[test]
|
||||
@@ -160,7 +160,12 @@ mod tests {
|
||||
}
|
||||
|
||||
fn make_query() -> DnsPacket {
|
||||
DnsPacket::query(0xABCD, "example.com", QueryType::A)
|
||||
let mut q = DnsPacket::new();
|
||||
q.header.id = 0xABCD;
|
||||
q.header.recursion_desired = true;
|
||||
q.questions
|
||||
.push(DnsQuestion::new("example.com".to_string(), QueryType::A));
|
||||
q
|
||||
}
|
||||
|
||||
fn make_response(query: &DnsPacket) -> DnsPacket {
|
||||
|
||||
73
src/lib.rs
73
src/lib.rs
@@ -5,7 +5,6 @@ pub mod cache;
|
||||
pub mod config;
|
||||
pub mod ctx;
|
||||
pub mod dnssec;
|
||||
pub mod dot;
|
||||
pub mod forward;
|
||||
pub mod header;
|
||||
pub mod lan;
|
||||
@@ -17,7 +16,6 @@ pub mod question;
|
||||
pub mod record;
|
||||
pub mod recursive;
|
||||
pub mod service_store;
|
||||
pub mod srtt;
|
||||
pub mod stats;
|
||||
pub mod system_dns;
|
||||
pub mod tls;
|
||||
@@ -26,10 +24,7 @@ pub type Error = Box<dyn std::error::Error + Send + Sync>;
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
/// Shared config directory for persistent data (services.json, etc).
|
||||
/// Unix users: ~/.config/numa/
|
||||
/// Linux root daemon: /var/lib/numa (FHS) — falls back to /usr/local/var/numa
|
||||
/// if a pre-v0.10.1 install already lives there.
|
||||
/// macOS root daemon: /usr/local/var/numa (Homebrew prefix)
|
||||
/// Unix: ~/.config/numa/ (or /usr/local/var/numa/ when running as root daemon)
|
||||
/// Windows: %APPDATA%\numa
|
||||
pub fn config_dir() -> std::path::PathBuf {
|
||||
#[cfg(windows)]
|
||||
@@ -66,15 +61,11 @@ fn config_dir_unix() -> std::path::PathBuf {
|
||||
}
|
||||
|
||||
// Running as root daemon (launchd/systemd) — use system-wide path
|
||||
daemon_data_dir()
|
||||
std::path::PathBuf::from("/usr/local/var/numa")
|
||||
}
|
||||
|
||||
/// Default system-wide data directory for TLS certs. Overridable via
|
||||
/// `[server] data_dir = "..."` in numa.toml — this function only provides
|
||||
/// the fallback when the config doesn't set it.
|
||||
/// Linux: /var/lib/numa (FHS) — falls back to /usr/local/var/numa if a
|
||||
/// pre-v0.10.1 install already has data there.
|
||||
/// macOS: /usr/local/var/numa (Homebrew prefix)
|
||||
/// System-wide data directory for TLS certs.
|
||||
/// Unix: /usr/local/var/numa
|
||||
/// Windows: %PROGRAMDATA%\numa
|
||||
pub fn data_dir() -> std::path::PathBuf {
|
||||
#[cfg(windows)]
|
||||
@@ -86,62 +77,6 @@ pub fn data_dir() -> std::path::PathBuf {
|
||||
}
|
||||
#[cfg(not(windows))]
|
||||
{
|
||||
daemon_data_dir()
|
||||
}
|
||||
}
|
||||
|
||||
/// Resolve the system-wide data directory for the running platform.
|
||||
/// Honors backwards compatibility with pre-v0.10.1 installs that still
|
||||
/// have their CA cert + services.json under `/usr/local/var/numa`.
|
||||
#[cfg(not(windows))]
|
||||
fn daemon_data_dir() -> std::path::PathBuf {
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
std::path::PathBuf::from(resolve_linux_data_dir(
|
||||
std::path::Path::new("/usr/local/var/numa").exists(),
|
||||
std::path::Path::new("/var/lib/numa").exists(),
|
||||
))
|
||||
}
|
||||
#[cfg(target_os = "macos")]
|
||||
{
|
||||
// macOS uses the Homebrew prefix convention; no FHS migration needed.
|
||||
std::path::PathBuf::from("/usr/local/var/numa")
|
||||
}
|
||||
}
|
||||
|
||||
/// Extracted as a pure function so the migration logic is unit-testable
|
||||
/// without touching the real filesystem.
|
||||
#[cfg(any(target_os = "linux", test))]
|
||||
fn resolve_linux_data_dir(legacy_exists: bool, fhs_exists: bool) -> &'static str {
|
||||
if legacy_exists && !fhs_exists {
|
||||
"/usr/local/var/numa"
|
||||
} else {
|
||||
"/var/lib/numa"
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn linux_data_dir_fresh_install_uses_fhs() {
|
||||
assert_eq!(resolve_linux_data_dir(false, false), "/var/lib/numa");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn linux_data_dir_upgrading_install_keeps_legacy() {
|
||||
// Migration must keep legacy so the user doesn't lose their CA on upgrade.
|
||||
assert_eq!(resolve_linux_data_dir(true, false), "/usr/local/var/numa");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn linux_data_dir_after_migration_uses_fhs() {
|
||||
assert_eq!(resolve_linux_data_dir(true, true), "/var/lib/numa");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn linux_data_dir_only_fhs_uses_fhs() {
|
||||
assert_eq!(resolve_linux_data_dir(false, true), "/var/lib/numa");
|
||||
}
|
||||
}
|
||||
|
||||
197
src/main.rs
197
src/main.rs
@@ -17,12 +17,10 @@ use numa::query_log::QueryLog;
|
||||
use numa::service_store::ServiceStore;
|
||||
use numa::stats::ServerStats;
|
||||
use numa::system_dns::{
|
||||
discover_system_dns, install_service, restart_service, service_status, uninstall_service,
|
||||
discover_system_dns, install_service, install_system_dns, restart_service, service_status,
|
||||
uninstall_service, uninstall_system_dns,
|
||||
};
|
||||
|
||||
const QUAD9_IP: &str = "9.9.9.9";
|
||||
const DOH_FALLBACK: &str = "https://9.9.9.9/dns-query";
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> numa::Result<()> {
|
||||
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info"))
|
||||
@@ -33,12 +31,12 @@ async fn main() -> numa::Result<()> {
|
||||
let arg1 = std::env::args().nth(1).unwrap_or_default();
|
||||
match arg1.as_str() {
|
||||
"install" => {
|
||||
eprintln!("\x1b[1;38;2;192;98;58mNuma\x1b[0m — installing\n");
|
||||
return install_service().map_err(|e| e.into());
|
||||
eprintln!("\x1b[1;38;2;192;98;58mNuma\x1b[0m — configuring system DNS\n");
|
||||
return install_system_dns().map_err(|e| e.into());
|
||||
}
|
||||
"uninstall" => {
|
||||
eprintln!("\x1b[1;38;2;192;98;58mNuma\x1b[0m — uninstalling\n");
|
||||
return uninstall_service().map_err(|e| e.into());
|
||||
eprintln!("\x1b[1;38;2;192;98;58mNuma\x1b[0m — restoring system DNS\n");
|
||||
return uninstall_system_dns().map_err(|e| e.into());
|
||||
}
|
||||
"service" => {
|
||||
let sub = std::env::args().nth(2).unwrap_or_default();
|
||||
@@ -109,81 +107,32 @@ async fn main() -> numa::Result<()> {
|
||||
// Discover system DNS in a single pass (upstream + forwarding rules)
|
||||
let system_dns = discover_system_dns();
|
||||
|
||||
let root_hints = numa::recursive::parse_root_hints(&config.upstream.root_hints);
|
||||
|
||||
let (resolved_mode, upstream_auto, upstream, upstream_label) = match config.upstream.mode {
|
||||
numa::config::UpstreamMode::Auto => {
|
||||
info!("auto mode: probing recursive resolution...");
|
||||
if numa::recursive::probe_recursive(&root_hints).await {
|
||||
info!("recursive probe succeeded — self-sovereign mode");
|
||||
let dummy = Upstream::Udp("0.0.0.0:0".parse().unwrap());
|
||||
(
|
||||
numa::config::UpstreamMode::Recursive,
|
||||
false,
|
||||
dummy,
|
||||
"recursive (root hints)".to_string(),
|
||||
)
|
||||
} else {
|
||||
log::warn!("recursive probe failed — falling back to Quad9 DoH");
|
||||
let client = reqwest::Client::builder()
|
||||
.use_rustls_tls()
|
||||
.build()
|
||||
.unwrap_or_default();
|
||||
let url = DOH_FALLBACK.to_string();
|
||||
let label = url.clone();
|
||||
(
|
||||
numa::config::UpstreamMode::Forward,
|
||||
false,
|
||||
Upstream::Doh { url, client },
|
||||
label,
|
||||
)
|
||||
}
|
||||
}
|
||||
numa::config::UpstreamMode::Recursive => {
|
||||
let dummy = Upstream::Udp("0.0.0.0:0".parse().unwrap());
|
||||
(
|
||||
numa::config::UpstreamMode::Recursive,
|
||||
false,
|
||||
dummy,
|
||||
"recursive (root hints)".to_string(),
|
||||
)
|
||||
}
|
||||
numa::config::UpstreamMode::Forward => {
|
||||
let upstream_addr = if config.upstream.address.is_empty() {
|
||||
system_dns
|
||||
.default_upstream
|
||||
.or_else(numa::system_dns::detect_dhcp_dns)
|
||||
.unwrap_or_else(|| {
|
||||
info!("could not detect system DNS, falling back to Quad9 DoH");
|
||||
DOH_FALLBACK.to_string()
|
||||
})
|
||||
} else {
|
||||
config.upstream.address.clone()
|
||||
};
|
||||
|
||||
let upstream: Upstream = if upstream_addr.starts_with("https://") {
|
||||
let client = reqwest::Client::builder()
|
||||
.use_rustls_tls()
|
||||
.build()
|
||||
.unwrap_or_default();
|
||||
Upstream::Doh {
|
||||
url: upstream_addr,
|
||||
client,
|
||||
}
|
||||
} else {
|
||||
let addr: SocketAddr =
|
||||
format!("{}:{}", upstream_addr, config.upstream.port).parse()?;
|
||||
Upstream::Udp(addr)
|
||||
};
|
||||
let label = upstream.to_string();
|
||||
(
|
||||
numa::config::UpstreamMode::Forward,
|
||||
config.upstream.address.is_empty(),
|
||||
upstream,
|
||||
label,
|
||||
)
|
||||
}
|
||||
let upstream_addr = if config.upstream.address.is_empty() {
|
||||
system_dns
|
||||
.default_upstream
|
||||
.or_else(numa::system_dns::detect_dhcp_dns)
|
||||
.unwrap_or_else(|| {
|
||||
info!("could not detect system DNS, falling back to Quad9 DoH");
|
||||
"https://dns.quad9.net/dns-query".to_string()
|
||||
})
|
||||
} else {
|
||||
config.upstream.address.clone()
|
||||
};
|
||||
|
||||
let upstream: Upstream = if upstream_addr.starts_with("https://") {
|
||||
let client = reqwest::Client::builder()
|
||||
.use_rustls_tls()
|
||||
.build()
|
||||
.unwrap_or_default();
|
||||
Upstream::Doh {
|
||||
url: upstream_addr,
|
||||
client,
|
||||
}
|
||||
} else {
|
||||
let addr: SocketAddr = format!("{}:{}", upstream_addr, config.upstream.port).parse()?;
|
||||
Upstream::Udp(addr)
|
||||
};
|
||||
let upstream_label = upstream.to_string();
|
||||
let api_port = config.server.api_port;
|
||||
|
||||
let mut blocklist = BlocklistStore::new();
|
||||
@@ -204,23 +153,10 @@ async fn main() -> numa::Result<()> {
|
||||
|
||||
let forwarding_rules = system_dns.forwarding_rules;
|
||||
|
||||
// Resolve data_dir from config, falling back to the platform default.
|
||||
// Used for TLS CA storage below and stored on ServerCtx for runtime use.
|
||||
let resolved_data_dir = config
|
||||
.server
|
||||
.data_dir
|
||||
.clone()
|
||||
.unwrap_or_else(numa::data_dir);
|
||||
|
||||
// Build initial TLS config before ServerCtx (so ArcSwap is ready at construction)
|
||||
let initial_tls = if config.proxy.enabled && config.proxy.tls_port > 0 {
|
||||
let service_names = service_store.names();
|
||||
match numa::tls::build_tls_config(
|
||||
&config.proxy.tld,
|
||||
&service_names,
|
||||
Vec::new(),
|
||||
&resolved_data_dir,
|
||||
) {
|
||||
match numa::tls::build_tls_config(&config.proxy.tld, &service_names) {
|
||||
Ok(tls_config) => Some(ArcSwap::from(tls_config)),
|
||||
Err(e) => {
|
||||
log::warn!("TLS setup failed, HTTPS proxy disabled: {}", e);
|
||||
@@ -247,7 +183,7 @@ async fn main() -> numa::Result<()> {
|
||||
lan_peers: Mutex::new(numa::lan::PeerStore::new(config.lan.peer_timeout_secs)),
|
||||
forwarding_rules,
|
||||
upstream: Mutex::new(upstream),
|
||||
upstream_auto,
|
||||
upstream_auto: config.upstream.address.is_empty(),
|
||||
upstream_port: config.upstream.port,
|
||||
lan_ip: Mutex::new(numa::lan::detect_lan_ip().unwrap_or(std::net::Ipv4Addr::LOCALHOST)),
|
||||
timeout: Duration::from_millis(config.upstream.timeout_ms),
|
||||
@@ -261,17 +197,16 @@ async fn main() -> numa::Result<()> {
|
||||
config_path: resolved_config_path,
|
||||
config_found,
|
||||
config_dir: numa::config_dir(),
|
||||
data_dir: resolved_data_dir,
|
||||
data_dir: numa::data_dir(),
|
||||
tls_config: initial_tls,
|
||||
upstream_mode: resolved_mode,
|
||||
root_hints,
|
||||
srtt: std::sync::RwLock::new(numa::srtt::SrttCache::new(config.upstream.srtt)),
|
||||
inflight: std::sync::Mutex::new(std::collections::HashMap::new()),
|
||||
upstream_mode: config.upstream.mode,
|
||||
root_hints: numa::recursive::parse_root_hints(&config.upstream.root_hints),
|
||||
dnssec_enabled: config.dnssec.enabled,
|
||||
dnssec_strict: config.dnssec.strict,
|
||||
});
|
||||
|
||||
let zone_count: usize = ctx.zone_map.values().map(|m| m.len()).sum();
|
||||
|
||||
// Build banner rows, then size the box to fit the longest value
|
||||
let api_url = format!("http://localhost:{}", api_port);
|
||||
let proxy_label = if config.proxy.enabled {
|
||||
@@ -371,20 +306,6 @@ async fn main() -> numa::Result<()> {
|
||||
);
|
||||
if let Some(ref label) = proxy_label {
|
||||
row("Proxy", g, label);
|
||||
if config.proxy.bind_addr == "127.0.0.1" {
|
||||
let y = "\x1b[38;2;204;176;59m"; // yellow
|
||||
row(
|
||||
"",
|
||||
y,
|
||||
&format!(
|
||||
"⚠ proxy on 127.0.0.1 — .{} not LAN reachable",
|
||||
config.proxy.tld
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
if config.dot.enabled {
|
||||
row("DoT", g, &format!("tls://:{}", config.dot.port));
|
||||
}
|
||||
if config.lan.enabled {
|
||||
row("LAN", g, "mDNS (_numa._tcp.local)");
|
||||
@@ -432,13 +353,8 @@ async fn main() -> numa::Result<()> {
|
||||
let prime_ctx = Arc::clone(&ctx);
|
||||
let prime_tlds = config.upstream.prime_tlds;
|
||||
tokio::spawn(async move {
|
||||
numa::recursive::prime_tld_cache(
|
||||
&prime_ctx.cache,
|
||||
&prime_ctx.root_hints,
|
||||
&prime_tlds,
|
||||
&prime_ctx.srtt,
|
||||
)
|
||||
.await;
|
||||
numa::recursive::prime_tld_cache(&prime_ctx.cache, &prime_ctx.root_hints, &prime_tlds)
|
||||
.await;
|
||||
});
|
||||
}
|
||||
|
||||
@@ -452,11 +368,16 @@ async fn main() -> numa::Result<()> {
|
||||
axum::serve(listener, app).await.unwrap();
|
||||
});
|
||||
|
||||
let proxy_bind: std::net::Ipv4Addr = config
|
||||
.proxy
|
||||
.bind_addr
|
||||
.parse()
|
||||
.unwrap_or(std::net::Ipv4Addr::LOCALHOST);
|
||||
// Proxy binds 0.0.0.0 when LAN is enabled (cross-machine access), otherwise config value
|
||||
let proxy_bind: std::net::Ipv4Addr = if config.lan.enabled {
|
||||
std::net::Ipv4Addr::UNSPECIFIED
|
||||
} else {
|
||||
config
|
||||
.proxy
|
||||
.bind_addr
|
||||
.parse()
|
||||
.unwrap_or(std::net::Ipv4Addr::LOCALHOST)
|
||||
};
|
||||
|
||||
// Spawn HTTP reverse proxy for .numa domains
|
||||
if config.proxy.enabled {
|
||||
@@ -493,27 +414,11 @@ async fn main() -> numa::Result<()> {
|
||||
});
|
||||
}
|
||||
|
||||
// Spawn DNS-over-TLS listener (RFC 7858)
|
||||
if config.dot.enabled {
|
||||
let dot_ctx = Arc::clone(&ctx);
|
||||
let dot_config = config.dot.clone();
|
||||
tokio::spawn(async move {
|
||||
numa::dot::start_dot(dot_ctx, &dot_config).await;
|
||||
});
|
||||
}
|
||||
|
||||
// UDP DNS listener
|
||||
#[allow(clippy::infinite_loop)]
|
||||
loop {
|
||||
let mut buffer = BytePacketBuffer::new();
|
||||
let (_, src_addr) = match ctx.socket.recv_from(&mut buffer.buf).await {
|
||||
Ok(r) => r,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::ConnectionReset => {
|
||||
// Windows delivers ICMP port-unreachable as ConnectionReset on UDP sockets
|
||||
continue;
|
||||
}
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
let (_, src_addr) = ctx.socket.recv_from(&mut buffer.buf).await?;
|
||||
|
||||
let ctx = Arc::clone(&ctx);
|
||||
tokio::spawn(async move {
|
||||
@@ -556,7 +461,7 @@ async fn network_watch_loop(ctx: Arc<numa::ctx::ServerCtx>) {
|
||||
let new_addr = dns_info
|
||||
.default_upstream
|
||||
.or_else(numa::system_dns::detect_dhcp_dns)
|
||||
.unwrap_or_else(|| QUAD9_IP.to_string());
|
||||
.unwrap_or_else(|| "9.9.9.9".to_string());
|
||||
if let Ok(new_sock) =
|
||||
format!("{}:{}", new_addr, ctx.upstream_port).parse::<SocketAddr>()
|
||||
{
|
||||
|
||||
@@ -117,22 +117,6 @@ impl OverrideStore {
|
||||
self.entries.clear();
|
||||
}
|
||||
|
||||
pub fn heap_bytes(&self) -> usize {
|
||||
let per_slot = std::mem::size_of::<u64>()
|
||||
+ std::mem::size_of::<String>()
|
||||
+ std::mem::size_of::<OverrideEntry>()
|
||||
+ 1;
|
||||
let table = self.entries.capacity() * per_slot;
|
||||
let heap: usize = self
|
||||
.entries
|
||||
.iter()
|
||||
.map(|(k, v)| {
|
||||
k.capacity() + v.domain.capacity() + v.target.capacity() + v.record.heap_bytes()
|
||||
})
|
||||
.sum();
|
||||
table + heap
|
||||
}
|
||||
|
||||
pub fn active_count(&self) -> usize {
|
||||
self.entries.values().filter(|e| !e.is_expired()).count()
|
||||
}
|
||||
@@ -170,16 +154,3 @@ fn parse_target(domain: &str, target: &str, ttl: u32) -> Result<(QueryType, DnsR
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn heap_bytes_grows_with_entries() {
|
||||
let mut store = OverrideStore::new();
|
||||
let empty = store.heap_bytes();
|
||||
store.insert("example.com", "1.2.3.4", 300, None).unwrap();
|
||||
assert!(store.heap_bytes() > empty);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,34 +57,6 @@ impl DnsPacket {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn query(id: u16, domain: &str, qtype: crate::question::QueryType) -> DnsPacket {
|
||||
let mut pkt = DnsPacket::new();
|
||||
pkt.header.id = id;
|
||||
pkt.header.recursion_desired = true;
|
||||
pkt.questions
|
||||
.push(crate::question::DnsQuestion::new(domain.to_string(), qtype));
|
||||
pkt
|
||||
}
|
||||
|
||||
pub fn heap_bytes(&self) -> usize {
|
||||
fn records_heap(records: &[DnsRecord]) -> usize {
|
||||
records
|
||||
.iter()
|
||||
.map(|r| std::mem::size_of::<DnsRecord>() + r.heap_bytes())
|
||||
.sum::<usize>()
|
||||
}
|
||||
let questions: usize = self
|
||||
.questions
|
||||
.iter()
|
||||
.map(|q| std::mem::size_of::<DnsQuestion>() + q.name.capacity())
|
||||
.sum();
|
||||
questions
|
||||
+ records_heap(&self.answers)
|
||||
+ records_heap(&self.authorities)
|
||||
+ records_heap(&self.resources)
|
||||
+ self.edns.as_ref().map_or(0, |e| e.options.capacity())
|
||||
}
|
||||
|
||||
pub fn response_from(query: &DnsPacket, rescode: crate::header::ResultCode) -> DnsPacket {
|
||||
let mut resp = DnsPacket::new();
|
||||
resp.header.id = query.header.id;
|
||||
@@ -610,16 +582,4 @@ mod tests {
|
||||
panic!("expected DNSKEY");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn heap_bytes_accounts_for_records() {
|
||||
let mut pkt = DnsPacket::new();
|
||||
let empty = pkt.heap_bytes();
|
||||
pkt.answers.push(DnsRecord::A {
|
||||
domain: "example.com".into(),
|
||||
addr: "1.2.3.4".parse().unwrap(),
|
||||
ttl: 300,
|
||||
});
|
||||
assert!(pkt.heap_bytes() > empty);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,21 +38,6 @@ impl QueryLog {
|
||||
self.entries.push_back(entry);
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.entries.len()
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.entries.is_empty()
|
||||
}
|
||||
|
||||
pub fn heap_bytes(&self) -> usize {
|
||||
self.entries
|
||||
.iter()
|
||||
.map(|e| std::mem::size_of::<QueryLogEntry>() + e.domain.capacity())
|
||||
.sum()
|
||||
}
|
||||
|
||||
pub fn query(&self, filter: &QueryLogFilter) -> Vec<&QueryLogEntry> {
|
||||
self.entries
|
||||
.iter()
|
||||
@@ -92,25 +77,3 @@ pub struct QueryLogFilter {
|
||||
pub since: Option<SystemTime>,
|
||||
pub limit: Option<usize>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn heap_bytes_grows_with_entries() {
|
||||
let mut log = QueryLog::new(100);
|
||||
let empty = log.heap_bytes();
|
||||
log.push(QueryLogEntry {
|
||||
timestamp: SystemTime::now(),
|
||||
src_addr: "127.0.0.1:1234".parse().unwrap(),
|
||||
domain: "example.com".into(),
|
||||
query_type: QueryType::A,
|
||||
path: QueryPath::Forwarded,
|
||||
rescode: ResultCode::NOERROR,
|
||||
latency_us: 500,
|
||||
dnssec: DnssecStatus::Indeterminate,
|
||||
});
|
||||
assert!(log.heap_bytes() > empty);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -136,46 +136,6 @@ impl DnsRecord {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn heap_bytes(&self) -> usize {
|
||||
match self {
|
||||
DnsRecord::A { domain, .. } => domain.capacity(),
|
||||
DnsRecord::NS { domain, host, .. } | DnsRecord::CNAME { domain, host, .. } => {
|
||||
domain.capacity() + host.capacity()
|
||||
}
|
||||
DnsRecord::MX { domain, host, .. } => domain.capacity() + host.capacity(),
|
||||
DnsRecord::AAAA { domain, .. } => domain.capacity(),
|
||||
DnsRecord::DNSKEY {
|
||||
domain, public_key, ..
|
||||
} => domain.capacity() + public_key.capacity(),
|
||||
DnsRecord::DS { domain, digest, .. } => domain.capacity() + digest.capacity(),
|
||||
DnsRecord::RRSIG {
|
||||
domain,
|
||||
signer_name,
|
||||
signature,
|
||||
..
|
||||
} => domain.capacity() + signer_name.capacity() + signature.capacity(),
|
||||
DnsRecord::NSEC {
|
||||
domain,
|
||||
next_domain,
|
||||
type_bitmap,
|
||||
..
|
||||
} => domain.capacity() + next_domain.capacity() + type_bitmap.capacity(),
|
||||
DnsRecord::NSEC3 {
|
||||
domain,
|
||||
salt,
|
||||
next_hashed_owner,
|
||||
type_bitmap,
|
||||
..
|
||||
} => {
|
||||
domain.capacity()
|
||||
+ salt.capacity()
|
||||
+ next_hashed_owner.capacity()
|
||||
+ type_bitmap.capacity()
|
||||
}
|
||||
DnsRecord::UNKNOWN { domain, data, .. } => domain.capacity() + data.capacity(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_ttl(&mut self, new_ttl: u32) {
|
||||
match self {
|
||||
DnsRecord::A { ttl, .. }
|
||||
@@ -690,14 +650,4 @@ mod tests {
|
||||
let parsed = round_trip(&rec);
|
||||
assert_eq!(rec, parsed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn heap_bytes_reflects_string_capacity() {
|
||||
let rec = DnsRecord::CNAME {
|
||||
domain: "a]".repeat(100),
|
||||
host: "b".repeat(200),
|
||||
ttl: 60,
|
||||
};
|
||||
assert!(rec.heap_bytes() >= 300);
|
||||
}
|
||||
}
|
||||
|
||||
211
src/recursive.rs
211
src/recursive.rs
@@ -1,7 +1,7 @@
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::sync::atomic::{AtomicU16, Ordering};
|
||||
use std::sync::RwLock;
|
||||
use std::time::{Duration, Instant};
|
||||
use std::time::Duration;
|
||||
|
||||
use log::{debug, info};
|
||||
|
||||
@@ -9,9 +9,8 @@ use crate::cache::DnsCache;
|
||||
use crate::forward::forward_udp;
|
||||
use crate::header::ResultCode;
|
||||
use crate::packet::DnsPacket;
|
||||
use crate::question::QueryType;
|
||||
use crate::question::{DnsQuestion, QueryType};
|
||||
use crate::record::DnsRecord;
|
||||
use crate::srtt::SrttCache;
|
||||
|
||||
const MAX_REFERRAL_DEPTH: u8 = 10;
|
||||
const MAX_CNAME_DEPTH: u8 = 8;
|
||||
@@ -21,8 +20,7 @@ const UDP_FAIL_THRESHOLD: u8 = 3;
|
||||
|
||||
static QUERY_ID: AtomicU16 = AtomicU16::new(1);
|
||||
static UDP_FAILURES: std::sync::atomic::AtomicU8 = std::sync::atomic::AtomicU8::new(0);
|
||||
pub(crate) static UDP_DISABLED: std::sync::atomic::AtomicBool =
|
||||
std::sync::atomic::AtomicBool::new(false);
|
||||
static UDP_DISABLED: std::sync::atomic::AtomicBool = std::sync::atomic::AtomicBool::new(false);
|
||||
|
||||
fn next_id() -> u16 {
|
||||
QUERY_ID.fetch_add(1, Ordering::Relaxed)
|
||||
@@ -32,14 +30,6 @@ fn dns_addr(ip: impl Into<IpAddr>) -> SocketAddr {
|
||||
SocketAddr::new(ip.into(), 53)
|
||||
}
|
||||
|
||||
fn record_to_addr(rec: &DnsRecord) -> Option<SocketAddr> {
|
||||
match rec {
|
||||
DnsRecord::A { addr, .. } => Some(dns_addr(*addr)),
|
||||
DnsRecord::AAAA { addr, .. } => Some(dns_addr(*addr)),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reset_udp_state() {
|
||||
UDP_DISABLED.store(false, Ordering::Release);
|
||||
UDP_FAILURES.store(0, Ordering::Release);
|
||||
@@ -54,8 +44,11 @@ pub async fn probe_udp(root_hints: &[SocketAddr]) {
|
||||
Some(h) => *h,
|
||||
None => return,
|
||||
};
|
||||
let mut probe = DnsPacket::query(next_id(), ".", QueryType::NS);
|
||||
probe.header.recursion_desired = false;
|
||||
let mut probe = DnsPacket::new();
|
||||
probe.header.id = next_id();
|
||||
probe
|
||||
.questions
|
||||
.push(DnsQuestion::new(".".to_string(), QueryType::NS));
|
||||
if forward_udp(&probe, hint, Duration::from_millis(1500))
|
||||
.await
|
||||
.is_ok()
|
||||
@@ -65,27 +58,7 @@ pub async fn probe_udp(root_hints: &[SocketAddr]) {
|
||||
}
|
||||
}
|
||||
|
||||
/// Probe whether recursive resolution works by querying root servers.
|
||||
/// Tries up to 3 hints before declaring failure.
|
||||
pub async fn probe_recursive(root_hints: &[SocketAddr]) -> bool {
|
||||
let mut probe = DnsPacket::query(next_id(), ".", QueryType::NS);
|
||||
probe.header.recursion_desired = false;
|
||||
for hint in root_hints.iter().take(3) {
|
||||
if let Ok(resp) = forward_udp(&probe, *hint, Duration::from_secs(3)).await {
|
||||
if !resp.answers.is_empty() || !resp.authorities.is_empty() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
pub async fn prime_tld_cache(
|
||||
cache: &RwLock<DnsCache>,
|
||||
root_hints: &[SocketAddr],
|
||||
tlds: &[String],
|
||||
srtt: &RwLock<SrttCache>,
|
||||
) {
|
||||
pub async fn prime_tld_cache(cache: &RwLock<DnsCache>, root_hints: &[SocketAddr], tlds: &[String]) {
|
||||
if root_hints.is_empty() || tlds.is_empty() {
|
||||
return;
|
||||
}
|
||||
@@ -93,7 +66,7 @@ pub async fn prime_tld_cache(
|
||||
let mut root_addr = root_hints[0];
|
||||
for hint in root_hints {
|
||||
info!("prime: probing root {}", hint);
|
||||
match send_query(".", QueryType::NS, *hint, srtt).await {
|
||||
match send_query(".", QueryType::NS, *hint).await {
|
||||
Ok(_) => {
|
||||
info!("prime: root {} reachable", hint);
|
||||
root_addr = *hint;
|
||||
@@ -106,7 +79,7 @@ pub async fn prime_tld_cache(
|
||||
}
|
||||
|
||||
// Fetch root DNSKEY (needed for DNSSEC chain-of-trust terminus)
|
||||
if let Ok(root_dnskey) = send_query(".", QueryType::DNSKEY, root_addr, srtt).await {
|
||||
if let Ok(root_dnskey) = send_query(".", QueryType::DNSKEY, root_addr).await {
|
||||
cache
|
||||
.write()
|
||||
.unwrap()
|
||||
@@ -118,7 +91,7 @@ pub async fn prime_tld_cache(
|
||||
|
||||
for tld in tlds {
|
||||
// Fetch NS referral (includes DS in authority section from root)
|
||||
let response = match send_query(tld, QueryType::NS, root_addr, srtt).await {
|
||||
let response = match send_query(tld, QueryType::NS, root_addr).await {
|
||||
Ok(r) => r,
|
||||
Err(e) => {
|
||||
debug!("prime: failed to query NS for .{}: {}", tld, e);
|
||||
@@ -135,6 +108,7 @@ pub async fn prime_tld_cache(
|
||||
let mut cache_w = cache.write().unwrap();
|
||||
cache_w.insert(tld, QueryType::NS, &response);
|
||||
cache_glue(&mut cache_w, &response, &ns_names);
|
||||
// Cache DS records from referral authority section
|
||||
cache_ds_from_authority(&mut cache_w, &response);
|
||||
}
|
||||
|
||||
@@ -142,7 +116,7 @@ pub async fn prime_tld_cache(
|
||||
let first_ns_name = ns_names.first().map(|s| s.as_str()).unwrap_or("");
|
||||
let first_ns = glue_addrs_for(&response, first_ns_name);
|
||||
if let Some(ns_addr) = first_ns.first() {
|
||||
if let Ok(dnskey_resp) = send_query(tld, QueryType::DNSKEY, *ns_addr, srtt).await {
|
||||
if let Ok(dnskey_resp) = send_query(tld, QueryType::DNSKEY, *ns_addr).await {
|
||||
cache
|
||||
.write()
|
||||
.unwrap()
|
||||
@@ -166,11 +140,10 @@ pub async fn resolve_recursive(
|
||||
cache: &RwLock<DnsCache>,
|
||||
original_query: &DnsPacket,
|
||||
root_hints: &[SocketAddr],
|
||||
srtt: &RwLock<SrttCache>,
|
||||
) -> crate::Result<DnsPacket> {
|
||||
// No overall timeout — each hop is bounded by NS_QUERY_TIMEOUT (UDP + TCP fallback),
|
||||
// and MAX_REFERRAL_DEPTH caps the chain length.
|
||||
let mut resp = resolve_iterative(qname, qtype, cache, root_hints, srtt, 0, 0).await?;
|
||||
let mut resp = resolve_iterative(qname, qtype, cache, root_hints, 0, 0).await?;
|
||||
|
||||
resp.header.id = original_query.header.id;
|
||||
resp.header.recursion_available = true;
|
||||
@@ -184,7 +157,6 @@ pub(crate) fn resolve_iterative<'a>(
|
||||
qtype: QueryType,
|
||||
cache: &'a RwLock<DnsCache>,
|
||||
root_hints: &'a [SocketAddr],
|
||||
srtt: &'a RwLock<SrttCache>,
|
||||
referral_depth: u8,
|
||||
cname_depth: u8,
|
||||
) -> std::pin::Pin<Box<dyn std::future::Future<Output = crate::Result<DnsPacket>> + Send + 'a>> {
|
||||
@@ -198,7 +170,6 @@ pub(crate) fn resolve_iterative<'a>(
|
||||
}
|
||||
|
||||
let (mut current_zone, mut ns_addrs) = find_closest_ns(qname, cache, root_hints);
|
||||
srtt.read().unwrap().sort_by_rtt(&mut ns_addrs);
|
||||
let mut ns_idx = 0;
|
||||
|
||||
for _ in 0..MAX_REFERRAL_DEPTH {
|
||||
@@ -214,7 +185,7 @@ pub(crate) fn resolve_iterative<'a>(
|
||||
ns_addr, q_type, q_name, current_zone, referral_depth
|
||||
);
|
||||
|
||||
let response = match send_query(q_name, q_type, ns_addr, srtt).await {
|
||||
let response = match send_query(q_name, q_type, ns_addr).await {
|
||||
Ok(r) => r,
|
||||
Err(e) => {
|
||||
debug!("recursive: NS {} failed: {}", ns_addr, e);
|
||||
@@ -223,6 +194,7 @@ pub(crate) fn resolve_iterative<'a>(
|
||||
}
|
||||
};
|
||||
|
||||
// Minimized query response — treat as referral, not final answer
|
||||
if (q_type != qtype || !q_name.eq_ignore_ascii_case(qname))
|
||||
&& (!response.authorities.is_empty() || !response.answers.is_empty())
|
||||
{
|
||||
@@ -233,9 +205,8 @@ pub(crate) fn resolve_iterative<'a>(
|
||||
if all_ns.is_empty() {
|
||||
all_ns = extract_ns_names(&response);
|
||||
}
|
||||
let mut new_addrs = resolve_ns_addrs_from_glue(&response, &all_ns, cache);
|
||||
let new_addrs = resolve_ns_addrs_from_glue(&response, &all_ns, cache);
|
||||
if !new_addrs.is_empty() {
|
||||
srtt.read().unwrap().sort_by_rtt(&mut new_addrs);
|
||||
ns_addrs = new_addrs;
|
||||
ns_idx = 0;
|
||||
continue;
|
||||
@@ -262,7 +233,6 @@ pub(crate) fn resolve_iterative<'a>(
|
||||
qtype,
|
||||
cache,
|
||||
root_hints,
|
||||
srtt,
|
||||
0,
|
||||
cname_depth + 1,
|
||||
)
|
||||
@@ -286,6 +256,8 @@ pub(crate) fn resolve_iterative<'a>(
|
||||
return Ok(response);
|
||||
}
|
||||
|
||||
// Referral — extract NS + glue, cache glue, resolve NS addresses
|
||||
// Update zone for query minimization
|
||||
if let Some(zone) = referral_zone(&response) {
|
||||
current_zone = zone;
|
||||
}
|
||||
@@ -304,20 +276,29 @@ pub(crate) fn resolve_iterative<'a>(
|
||||
for ns_name in &ns_names {
|
||||
if referral_depth < MAX_REFERRAL_DEPTH {
|
||||
debug!("recursive: resolving glue-less NS {}", ns_name);
|
||||
// Try A first, then AAAA
|
||||
for qt in [QueryType::A, QueryType::AAAA] {
|
||||
if let Ok(ns_resp) = resolve_iterative(
|
||||
ns_name,
|
||||
qt,
|
||||
cache,
|
||||
root_hints,
|
||||
srtt,
|
||||
referral_depth + 1,
|
||||
cname_depth,
|
||||
)
|
||||
.await
|
||||
{
|
||||
new_ns_addrs
|
||||
.extend(ns_resp.answers.iter().filter_map(record_to_addr));
|
||||
for rec in &ns_resp.answers {
|
||||
match rec {
|
||||
DnsRecord::A { addr, .. } => {
|
||||
new_ns_addrs.push(dns_addr(*addr));
|
||||
}
|
||||
DnsRecord::AAAA { addr, .. } => {
|
||||
new_ns_addrs.push(dns_addr(*addr));
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
if !new_ns_addrs.is_empty() {
|
||||
break;
|
||||
@@ -335,7 +316,6 @@ pub(crate) fn resolve_iterative<'a>(
|
||||
return Err(format!("could not resolve any NS for {}", qname).into());
|
||||
}
|
||||
|
||||
srtt.read().unwrap().sort_by_rtt(&mut new_ns_addrs);
|
||||
ns_addrs = new_ns_addrs;
|
||||
ns_idx = 0;
|
||||
}
|
||||
@@ -371,7 +351,13 @@ fn find_closest_ns(
|
||||
if let DnsRecord::NS { host, .. } = ns_rec {
|
||||
for qt in [QueryType::A, QueryType::AAAA] {
|
||||
if let Some(resp) = guard.lookup(host, qt) {
|
||||
addrs.extend(resp.answers.iter().filter_map(record_to_addr));
|
||||
for rec in &resp.answers {
|
||||
match rec {
|
||||
DnsRecord::A { addr, .. } => addrs.push(dns_addr(*addr)),
|
||||
DnsRecord::AAAA { addr, .. } => addrs.push(dns_addr(*addr)),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -457,7 +443,13 @@ fn addrs_from_cache(cache: &RwLock<DnsCache>, name: &str) -> Vec<SocketAddr> {
|
||||
let mut addrs = Vec::new();
|
||||
for qt in [QueryType::A, QueryType::AAAA] {
|
||||
if let Some(pkt) = guard.lookup(name, qt) {
|
||||
addrs.extend(pkt.answers.iter().filter_map(record_to_addr));
|
||||
for rec in &pkt.answers {
|
||||
match rec {
|
||||
DnsRecord::A { addr, .. } => addrs.push(dns_addr(*addr)),
|
||||
DnsRecord::AAAA { addr, .. } => addrs.push(dns_addr(*addr)),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
addrs
|
||||
@@ -467,13 +459,15 @@ fn glue_addrs_for(response: &DnsPacket, ns_name: &str) -> Vec<SocketAddr> {
|
||||
response
|
||||
.resources
|
||||
.iter()
|
||||
.filter(|r| match r {
|
||||
DnsRecord::A { domain, .. } | DnsRecord::AAAA { domain, .. } => {
|
||||
domain.eq_ignore_ascii_case(ns_name)
|
||||
.filter_map(|r| match r {
|
||||
DnsRecord::A { domain, addr, .. } if domain.eq_ignore_ascii_case(ns_name) => {
|
||||
Some(dns_addr(*addr))
|
||||
}
|
||||
_ => false,
|
||||
DnsRecord::AAAA { domain, addr, .. } if domain.eq_ignore_ascii_case(ns_name) => {
|
||||
Some(dns_addr(*addr))
|
||||
}
|
||||
_ => None,
|
||||
})
|
||||
.filter_map(record_to_addr)
|
||||
.collect()
|
||||
}
|
||||
|
||||
@@ -567,63 +561,36 @@ fn make_glue_packet() -> DnsPacket {
|
||||
pkt
|
||||
}
|
||||
|
||||
async fn tcp_with_srtt(
|
||||
query: &DnsPacket,
|
||||
server: SocketAddr,
|
||||
srtt: &RwLock<SrttCache>,
|
||||
start: Instant,
|
||||
) -> crate::Result<DnsPacket> {
|
||||
match crate::forward::forward_tcp(query, server, TCP_TIMEOUT).await {
|
||||
Ok(resp) => {
|
||||
srtt.write()
|
||||
.unwrap()
|
||||
.record_rtt(server.ip(), start.elapsed().as_millis() as u64, true);
|
||||
Ok(resp)
|
||||
}
|
||||
Err(e) => {
|
||||
srtt.write().unwrap().record_failure(server.ip());
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_query(
|
||||
qname: &str,
|
||||
qtype: QueryType,
|
||||
server: SocketAddr,
|
||||
srtt: &RwLock<SrttCache>,
|
||||
) -> crate::Result<DnsPacket> {
|
||||
let mut query = DnsPacket::query(next_id(), qname, qtype);
|
||||
async fn send_query(qname: &str, qtype: QueryType, server: SocketAddr) -> crate::Result<DnsPacket> {
|
||||
let mut query = DnsPacket::new();
|
||||
query.header.id = next_id();
|
||||
query.header.recursion_desired = false;
|
||||
query
|
||||
.questions
|
||||
.push(DnsQuestion::new(qname.to_string(), qtype));
|
||||
query.edns = Some(crate::packet::EdnsOpt {
|
||||
do_bit: true,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
// IPv6 forced to TCP — our UDP socket is bound to 0.0.0.0
|
||||
// Skip IPv6 if the socket can't handle it (bound to 0.0.0.0)
|
||||
if server.is_ipv6() {
|
||||
return tcp_with_srtt(&query, server, srtt, start).await;
|
||||
return crate::forward::forward_tcp(&query, server, TCP_TIMEOUT).await;
|
||||
}
|
||||
|
||||
// UDP detected as blocked — go TCP-first
|
||||
// If UDP has been detected as blocked, go TCP-first
|
||||
if UDP_DISABLED.load(Ordering::Acquire) {
|
||||
return tcp_with_srtt(&query, server, srtt, start).await;
|
||||
return crate::forward::forward_tcp(&query, server, TCP_TIMEOUT).await;
|
||||
}
|
||||
|
||||
match forward_udp(&query, server, NS_QUERY_TIMEOUT).await {
|
||||
Ok(resp) if resp.header.truncated_message => {
|
||||
debug!("send_query: truncated from {}, retrying TCP", server);
|
||||
tcp_with_srtt(&query, server, srtt, start).await
|
||||
crate::forward::forward_tcp(&query, server, TCP_TIMEOUT).await
|
||||
}
|
||||
Ok(resp) => {
|
||||
// UDP works — reset failure counter
|
||||
UDP_FAILURES.store(0, Ordering::Release);
|
||||
srtt.write().unwrap().record_rtt(
|
||||
server.ip(),
|
||||
start.elapsed().as_millis() as u64,
|
||||
false,
|
||||
);
|
||||
Ok(resp)
|
||||
}
|
||||
Err(e) => {
|
||||
@@ -636,7 +603,7 @@ async fn send_query(
|
||||
);
|
||||
}
|
||||
debug!("send_query: UDP failed for {}: {}, trying TCP", server, e);
|
||||
tcp_with_srtt(&query, server, srtt, start).await
|
||||
crate::forward::forward_tcp(&query, server, TCP_TIMEOUT).await
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -870,25 +837,14 @@ mod tests {
|
||||
};
|
||||
let handler = handler.clone();
|
||||
tokio::spawn(async move {
|
||||
let timeout = std::time::Duration::from_secs(5);
|
||||
// Read length-prefixed DNS query
|
||||
let mut len_buf = [0u8; 2];
|
||||
if tokio::time::timeout(timeout, stream.read_exact(&mut len_buf))
|
||||
.await
|
||||
.ok()
|
||||
.and_then(|r| r.ok())
|
||||
.is_none()
|
||||
{
|
||||
if stream.read_exact(&mut len_buf).await.is_err() {
|
||||
return;
|
||||
}
|
||||
let len = u16::from_be_bytes(len_buf) as usize;
|
||||
let mut data = vec![0u8; len];
|
||||
if tokio::time::timeout(timeout, stream.read_exact(&mut data))
|
||||
.await
|
||||
.ok()
|
||||
.and_then(|r| r.ok())
|
||||
.is_none()
|
||||
{
|
||||
if stream.read_exact(&mut data).await.is_err() {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -938,8 +894,7 @@ mod tests {
|
||||
})
|
||||
.await;
|
||||
|
||||
let srtt = RwLock::new(SrttCache::new(true));
|
||||
let result = send_query("test.example.com", QueryType::A, server_addr, &srtt).await;
|
||||
let result = send_query("test.example.com", QueryType::A, server_addr).await;
|
||||
|
||||
let resp = result.expect("should resolve via TCP fallback");
|
||||
assert_eq!(resp.header.rescode, ResultCode::NOERROR);
|
||||
@@ -990,8 +945,7 @@ mod tests {
|
||||
})
|
||||
.await;
|
||||
|
||||
let srtt = RwLock::new(SrttCache::new(true));
|
||||
let result = send_query("hello.example.com", QueryType::A, server_addr, &srtt).await;
|
||||
let result = send_query("hello.example.com", QueryType::A, server_addr).await;
|
||||
let resp = result.expect("TCP-only send_query should work");
|
||||
assert_eq!(resp.header.rescode, ResultCode::NOERROR);
|
||||
match &resp.answers[0] {
|
||||
@@ -1013,19 +967,10 @@ mod tests {
|
||||
.await;
|
||||
|
||||
let cache = RwLock::new(DnsCache::new(100, 60, 86400));
|
||||
let srtt = RwLock::new(SrttCache::new(true));
|
||||
let root_hints = vec![server_addr];
|
||||
|
||||
let result = resolve_iterative(
|
||||
"nonexistent.test",
|
||||
QueryType::A,
|
||||
&cache,
|
||||
&root_hints,
|
||||
&srtt,
|
||||
0,
|
||||
0,
|
||||
)
|
||||
.await;
|
||||
let result =
|
||||
resolve_iterative("nonexistent.test", QueryType::A, &cache, &root_hints, 0, 0).await;
|
||||
|
||||
let resp = result.expect("NXDOMAIN should still return a response");
|
||||
assert_eq!(resp.header.rescode, ResultCode::NXDOMAIN);
|
||||
@@ -1060,7 +1005,11 @@ mod tests {
|
||||
})
|
||||
.await;
|
||||
|
||||
let query = DnsPacket::query(0xBEEF, "test.com", QueryType::A);
|
||||
let mut query = DnsPacket::new();
|
||||
query.header.id = 0xBEEF;
|
||||
query
|
||||
.questions
|
||||
.push(DnsQuestion::new("test.com".to_string(), QueryType::A));
|
||||
|
||||
let resp = crate::forward::forward_tcp(&query, server_addr, Duration::from_secs(2))
|
||||
.await
|
||||
@@ -1120,7 +1069,11 @@ mod tests {
|
||||
.unwrap();
|
||||
});
|
||||
|
||||
let query = DnsPacket::query(0xCAFE, "strict.test", QueryType::A);
|
||||
let mut query = DnsPacket::new();
|
||||
query.header.id = 0xCAFE;
|
||||
query
|
||||
.questions
|
||||
.push(DnsQuestion::new("strict.test".to_string(), QueryType::A));
|
||||
|
||||
let resp = crate::forward::forward_tcp(&query, addr, Duration::from_secs(2))
|
||||
.await
|
||||
|
||||
318
src/srtt.rs
318
src/srtt.rs
@@ -1,318 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::time::Instant;
|
||||
|
||||
const INITIAL_SRTT_MS: u64 = 200;
|
||||
const FAILURE_PENALTY_MS: u64 = 5000;
|
||||
const TCP_PENALTY_MS: u64 = 100;
|
||||
const DECAY_AFTER_SECS: u64 = 300;
|
||||
const MAX_ENTRIES: usize = 4096;
|
||||
const EVICT_BATCH: usize = 64;
|
||||
|
||||
struct SrttEntry {
|
||||
srtt_ms: u64,
|
||||
updated_at: Instant,
|
||||
}
|
||||
|
||||
pub struct SrttCache {
|
||||
entries: HashMap<IpAddr, SrttEntry>,
|
||||
enabled: bool,
|
||||
}
|
||||
|
||||
impl Default for SrttCache {
|
||||
fn default() -> Self {
|
||||
Self::new(true)
|
||||
}
|
||||
}
|
||||
|
||||
impl SrttCache {
|
||||
pub fn new(enabled: bool) -> Self {
|
||||
Self {
|
||||
entries: HashMap::new(),
|
||||
enabled,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_enabled(&self) -> bool {
|
||||
self.enabled
|
||||
}
|
||||
|
||||
/// Get current SRTT for an IP, applying decay if stale. Returns INITIAL for unknown.
|
||||
pub fn get(&self, ip: IpAddr) -> u64 {
|
||||
match self.entries.get(&ip) {
|
||||
Some(entry) => Self::decayed_srtt(entry),
|
||||
None => INITIAL_SRTT_MS,
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply time-based decay: each DECAY_AFTER_SECS period halves distance to INITIAL.
|
||||
fn decayed_srtt(entry: &SrttEntry) -> u64 {
|
||||
Self::decay_for_age(entry.srtt_ms, entry.updated_at.elapsed().as_secs())
|
||||
}
|
||||
|
||||
fn decay_for_age(srtt_ms: u64, age_secs: u64) -> u64 {
|
||||
if age_secs > DECAY_AFTER_SECS {
|
||||
let periods = (age_secs / DECAY_AFTER_SECS).min(8);
|
||||
let mut srtt = srtt_ms;
|
||||
for _ in 0..periods {
|
||||
srtt = (srtt + INITIAL_SRTT_MS) / 2;
|
||||
}
|
||||
srtt
|
||||
} else {
|
||||
srtt_ms
|
||||
}
|
||||
}
|
||||
|
||||
/// Record a successful query RTT. No-op when disabled.
|
||||
pub fn record_rtt(&mut self, ip: IpAddr, rtt_ms: u64, tcp: bool) {
|
||||
if !self.enabled {
|
||||
return;
|
||||
}
|
||||
let effective = if tcp { rtt_ms + TCP_PENALTY_MS } else { rtt_ms };
|
||||
self.maybe_evict();
|
||||
let entry = self.entries.entry(ip).or_insert(SrttEntry {
|
||||
srtt_ms: effective,
|
||||
updated_at: Instant::now(),
|
||||
});
|
||||
// Apply decay before EWMA so recovered servers aren't stuck at stale penalties
|
||||
let base = Self::decayed_srtt(entry);
|
||||
// BIND EWMA: new = (old * 7 + sample) / 8
|
||||
entry.srtt_ms = (base * 7 + effective) / 8;
|
||||
entry.updated_at = Instant::now();
|
||||
}
|
||||
|
||||
/// Record a failure (timeout or error). No-op when disabled.
|
||||
pub fn record_failure(&mut self, ip: IpAddr) {
|
||||
if !self.enabled {
|
||||
return;
|
||||
}
|
||||
self.maybe_evict();
|
||||
let entry = self.entries.entry(ip).or_insert(SrttEntry {
|
||||
srtt_ms: FAILURE_PENALTY_MS,
|
||||
updated_at: Instant::now(),
|
||||
});
|
||||
entry.srtt_ms = FAILURE_PENALTY_MS;
|
||||
entry.updated_at = Instant::now();
|
||||
}
|
||||
|
||||
/// Sort addresses by SRTT ascending (lowest/fastest first). No-op when disabled.
|
||||
pub fn sort_by_rtt(&self, addrs: &mut [SocketAddr]) {
|
||||
if !self.enabled {
|
||||
return;
|
||||
}
|
||||
addrs.sort_by_key(|a| self.get(a.ip()));
|
||||
}
|
||||
|
||||
pub fn heap_bytes(&self) -> usize {
|
||||
let per_slot = std::mem::size_of::<u64>()
|
||||
+ std::mem::size_of::<IpAddr>()
|
||||
+ std::mem::size_of::<SrttEntry>()
|
||||
+ 1;
|
||||
self.entries.capacity() * per_slot
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.entries.len()
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.entries.is_empty()
|
||||
}
|
||||
|
||||
fn maybe_evict(&mut self) {
|
||||
if self.entries.len() < MAX_ENTRIES {
|
||||
return;
|
||||
}
|
||||
// Batch eviction: remove the oldest EVICT_BATCH entries at once
|
||||
let mut by_age: Vec<IpAddr> = self.entries.keys().copied().collect();
|
||||
by_age.sort_by_key(|ip| self.entries[ip].updated_at);
|
||||
for ip in by_age.into_iter().take(EVICT_BATCH) {
|
||||
self.entries.remove(&ip);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::net::Ipv4Addr;
|
||||
|
||||
fn ip(last: u8) -> IpAddr {
|
||||
IpAddr::V4(Ipv4Addr::new(192, 0, 2, last))
|
||||
}
|
||||
|
||||
fn sock(last: u8) -> SocketAddr {
|
||||
SocketAddr::new(ip(last), 53)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unknown_returns_initial() {
|
||||
let cache = SrttCache::new(true);
|
||||
assert_eq!(cache.get(ip(1)), INITIAL_SRTT_MS);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ewma_converges() {
|
||||
let mut cache = SrttCache::new(true);
|
||||
for _ in 0..20 {
|
||||
cache.record_rtt(ip(1), 100, false);
|
||||
}
|
||||
let srtt = cache.get(ip(1));
|
||||
assert!(srtt >= 98 && srtt <= 102, "srtt={}", srtt);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn failure_sets_penalty() {
|
||||
let mut cache = SrttCache::new(true);
|
||||
cache.record_rtt(ip(1), 50, false);
|
||||
cache.record_failure(ip(1));
|
||||
assert_eq!(cache.get(ip(1)), FAILURE_PENALTY_MS);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tcp_penalty_added() {
|
||||
let mut cache = SrttCache::new(true);
|
||||
for _ in 0..20 {
|
||||
cache.record_rtt(ip(1), 50, true);
|
||||
}
|
||||
let srtt = cache.get(ip(1));
|
||||
assert!(srtt >= 148 && srtt <= 152, "srtt={}", srtt);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sort_by_rtt_orders_correctly() {
|
||||
let mut cache = SrttCache::new(true);
|
||||
for _ in 0..20 {
|
||||
cache.record_rtt(ip(1), 500, false);
|
||||
cache.record_rtt(ip(2), 100, false);
|
||||
cache.record_rtt(ip(3), 10, false);
|
||||
}
|
||||
let mut addrs = vec![sock(1), sock(2), sock(3)];
|
||||
cache.sort_by_rtt(&mut addrs);
|
||||
assert_eq!(addrs, vec![sock(3), sock(2), sock(1)]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unknown_servers_sort_equal() {
|
||||
let cache = SrttCache::new(true);
|
||||
let mut addrs = vec![sock(1), sock(2), sock(3)];
|
||||
let original = addrs.clone();
|
||||
cache.sort_by_rtt(&mut addrs);
|
||||
assert_eq!(addrs, original);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn disabled_is_noop() {
|
||||
let mut cache = SrttCache::new(false);
|
||||
cache.record_rtt(ip(1), 50, false);
|
||||
cache.record_failure(ip(2));
|
||||
assert_eq!(cache.len(), 0);
|
||||
|
||||
let mut addrs = vec![sock(2), sock(1)];
|
||||
let original = addrs.clone();
|
||||
cache.sort_by_rtt(&mut addrs);
|
||||
assert_eq!(addrs, original);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_decay_within_threshold() {
|
||||
// At exactly DECAY_AFTER_SECS, no decay applied
|
||||
let result = SrttCache::decay_for_age(FAILURE_PENALTY_MS, DECAY_AFTER_SECS);
|
||||
assert_eq!(result, FAILURE_PENALTY_MS);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn one_decay_period() {
|
||||
let result = SrttCache::decay_for_age(FAILURE_PENALTY_MS, DECAY_AFTER_SECS + 1);
|
||||
let expected = (FAILURE_PENALTY_MS + INITIAL_SRTT_MS) / 2;
|
||||
assert_eq!(result, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multiple_decay_periods() {
|
||||
let result = SrttCache::decay_for_age(FAILURE_PENALTY_MS, DECAY_AFTER_SECS * 4 + 1);
|
||||
let mut expected = FAILURE_PENALTY_MS;
|
||||
for _ in 0..4 {
|
||||
expected = (expected + INITIAL_SRTT_MS) / 2;
|
||||
}
|
||||
assert_eq!(result, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decay_caps_at_8_periods() {
|
||||
// 9 periods and 100 periods should produce the same result (capped at 8)
|
||||
let a = SrttCache::decay_for_age(FAILURE_PENALTY_MS, DECAY_AFTER_SECS * 9 + 1);
|
||||
let b = SrttCache::decay_for_age(FAILURE_PENALTY_MS, DECAY_AFTER_SECS * 100);
|
||||
assert_eq!(a, b);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decay_converges_toward_initial() {
|
||||
let decayed = SrttCache::decay_for_age(FAILURE_PENALTY_MS, DECAY_AFTER_SECS * 100);
|
||||
let diff = decayed.abs_diff(INITIAL_SRTT_MS);
|
||||
assert!(
|
||||
diff < 25,
|
||||
"expected near INITIAL_SRTT_MS, got {} (diff={})",
|
||||
decayed,
|
||||
diff
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn record_rtt_applies_decay_before_ewma() {
|
||||
// Verify decay is applied before EWMA in record_rtt by checking
|
||||
// that a saturated penalty + long age + new sample produces a low SRTT
|
||||
let decayed = SrttCache::decay_for_age(FAILURE_PENALTY_MS, DECAY_AFTER_SECS * 8);
|
||||
// EWMA: (decayed * 7 + 50) / 8
|
||||
let after_ewma = (decayed * 7 + 50) / 8;
|
||||
assert!(
|
||||
after_ewma < 500,
|
||||
"expected decay before EWMA, got srtt={}",
|
||||
after_ewma
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decay_reranks_stale_failures() {
|
||||
// After enough decay, a failed server (5000ms) converges toward
|
||||
// INITIAL (200ms), which is below a stable server at 300ms
|
||||
let decayed = SrttCache::decay_for_age(FAILURE_PENALTY_MS, DECAY_AFTER_SECS * 100);
|
||||
assert!(
|
||||
decayed < 300,
|
||||
"expected decayed penalty ({}) < 300ms",
|
||||
decayed
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn heap_bytes_grows_with_entries() {
|
||||
let mut cache = SrttCache::new(true);
|
||||
let empty = cache.heap_bytes();
|
||||
for i in 1..=10u8 {
|
||||
cache.record_rtt(ip(i), 100, false);
|
||||
}
|
||||
assert!(cache.heap_bytes() > empty);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn eviction_removes_oldest() {
|
||||
let mut cache = SrttCache::new(true);
|
||||
for i in 0..MAX_ENTRIES {
|
||||
let octets = [
|
||||
10,
|
||||
((i >> 16) & 0xFF) as u8,
|
||||
((i >> 8) & 0xFF) as u8,
|
||||
(i & 0xFF) as u8,
|
||||
];
|
||||
cache.record_rtt(
|
||||
IpAddr::V4(Ipv4Addr::new(octets[0], octets[1], octets[2], octets[3])),
|
||||
100,
|
||||
false,
|
||||
);
|
||||
}
|
||||
assert_eq!(cache.len(), MAX_ENTRIES);
|
||||
cache.record_rtt(ip(1), 100, false);
|
||||
// Batch eviction removes EVICT_BATCH entries
|
||||
assert!(cache.len() <= MAX_ENTRIES - EVICT_BATCH + 1);
|
||||
}
|
||||
}
|
||||
101
src/stats.rs
101
src/stats.rs
@@ -1,97 +1,9 @@
|
||||
use std::time::Instant;
|
||||
|
||||
/// Returns the process memory footprint in bytes, or 0 if unavailable.
|
||||
/// macOS: phys_footprint (matches Activity Monitor). Linux: RSS from /proc/self/statm.
|
||||
pub fn process_memory_bytes() -> usize {
|
||||
#[cfg(target_os = "macos")]
|
||||
{
|
||||
macos_rss()
|
||||
}
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
linux_rss()
|
||||
}
|
||||
#[cfg(not(any(target_os = "macos", target_os = "linux")))]
|
||||
{
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
fn macos_rss() -> usize {
|
||||
use std::mem;
|
||||
extern "C" {
|
||||
fn mach_task_self() -> u32;
|
||||
fn task_info(
|
||||
target_task: u32,
|
||||
flavor: u32,
|
||||
task_info_out: *mut TaskVmInfo,
|
||||
task_info_count: *mut u32,
|
||||
) -> i32;
|
||||
}
|
||||
// Partial task_vm_info_data_t — only fields up to phys_footprint.
|
||||
#[repr(C)]
|
||||
struct TaskVmInfo {
|
||||
virtual_size: u64,
|
||||
region_count: i32,
|
||||
page_size: i32,
|
||||
resident_size: u64,
|
||||
resident_size_peak: u64,
|
||||
device: u64,
|
||||
device_peak: u64,
|
||||
internal: u64,
|
||||
internal_peak: u64,
|
||||
external: u64,
|
||||
external_peak: u64,
|
||||
reusable: u64,
|
||||
reusable_peak: u64,
|
||||
purgeable_volatile_pmap: u64,
|
||||
purgeable_volatile_resident: u64,
|
||||
purgeable_volatile_virtual: u64,
|
||||
compressed: u64,
|
||||
compressed_peak: u64,
|
||||
compressed_lifetime: u64,
|
||||
phys_footprint: u64,
|
||||
}
|
||||
const TASK_VM_INFO: u32 = 22;
|
||||
let mut info: TaskVmInfo = unsafe { mem::zeroed() };
|
||||
let mut count = (mem::size_of::<TaskVmInfo>() / mem::size_of::<u32>()) as u32;
|
||||
let kr = unsafe { task_info(mach_task_self(), TASK_VM_INFO, &mut info, &mut count) };
|
||||
if kr == 0 {
|
||||
info.phys_footprint as usize
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
fn linux_rss() -> usize {
|
||||
extern "C" {
|
||||
fn sysconf(name: i32) -> i64;
|
||||
}
|
||||
const SC_PAGESIZE: i32 = 30; // x86_64 + aarch64; differs on mips (28), sparc (29)
|
||||
let page_size = unsafe { sysconf(SC_PAGESIZE) };
|
||||
let page_size = if page_size > 0 {
|
||||
page_size as usize
|
||||
} else {
|
||||
4096
|
||||
};
|
||||
|
||||
if let Ok(statm) = std::fs::read_to_string("/proc/self/statm") {
|
||||
if let Some(rss_pages) = statm.split_whitespace().nth(1) {
|
||||
if let Ok(pages) = rss_pages.parse::<usize>() {
|
||||
return pages * page_size;
|
||||
}
|
||||
}
|
||||
}
|
||||
0
|
||||
}
|
||||
|
||||
pub struct ServerStats {
|
||||
queries_total: u64,
|
||||
queries_forwarded: u64,
|
||||
queries_recursive: u64,
|
||||
queries_coalesced: u64,
|
||||
queries_cached: u64,
|
||||
queries_blocked: u64,
|
||||
queries_local: u64,
|
||||
@@ -100,13 +12,12 @@ pub struct ServerStats {
|
||||
started_at: Instant,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
#[derive(Clone, Copy, PartialEq, Eq)]
|
||||
pub enum QueryPath {
|
||||
Local,
|
||||
Cached,
|
||||
Forwarded,
|
||||
Recursive,
|
||||
Coalesced,
|
||||
Blocked,
|
||||
Overridden,
|
||||
UpstreamError,
|
||||
@@ -119,7 +30,6 @@ impl QueryPath {
|
||||
QueryPath::Cached => "CACHED",
|
||||
QueryPath::Forwarded => "FORWARD",
|
||||
QueryPath::Recursive => "RECURSIVE",
|
||||
QueryPath::Coalesced => "COALESCED",
|
||||
QueryPath::Blocked => "BLOCKED",
|
||||
QueryPath::Overridden => "OVERRIDE",
|
||||
QueryPath::UpstreamError => "SERVFAIL",
|
||||
@@ -135,8 +45,6 @@ impl QueryPath {
|
||||
Some(QueryPath::Forwarded)
|
||||
} else if s.eq_ignore_ascii_case("RECURSIVE") {
|
||||
Some(QueryPath::Recursive)
|
||||
} else if s.eq_ignore_ascii_case("COALESCED") {
|
||||
Some(QueryPath::Coalesced)
|
||||
} else if s.eq_ignore_ascii_case("BLOCKED") {
|
||||
Some(QueryPath::Blocked)
|
||||
} else if s.eq_ignore_ascii_case("OVERRIDE") {
|
||||
@@ -161,7 +69,6 @@ impl ServerStats {
|
||||
queries_total: 0,
|
||||
queries_forwarded: 0,
|
||||
queries_recursive: 0,
|
||||
queries_coalesced: 0,
|
||||
queries_cached: 0,
|
||||
queries_blocked: 0,
|
||||
queries_local: 0,
|
||||
@@ -178,7 +85,6 @@ impl ServerStats {
|
||||
QueryPath::Cached => self.queries_cached += 1,
|
||||
QueryPath::Forwarded => self.queries_forwarded += 1,
|
||||
QueryPath::Recursive => self.queries_recursive += 1,
|
||||
QueryPath::Coalesced => self.queries_coalesced += 1,
|
||||
QueryPath::Blocked => self.queries_blocked += 1,
|
||||
QueryPath::Overridden => self.queries_overridden += 1,
|
||||
QueryPath::UpstreamError => self.upstream_errors += 1,
|
||||
@@ -200,7 +106,6 @@ impl ServerStats {
|
||||
total: self.queries_total,
|
||||
forwarded: self.queries_forwarded,
|
||||
recursive: self.queries_recursive,
|
||||
coalesced: self.queries_coalesced,
|
||||
cached: self.queries_cached,
|
||||
local: self.queries_local,
|
||||
overridden: self.queries_overridden,
|
||||
@@ -216,12 +121,11 @@ impl ServerStats {
|
||||
let secs = uptime.as_secs() % 60;
|
||||
|
||||
log::info!(
|
||||
"STATS | uptime {}h{}m{}s | total {} | fwd {} | recursive {} | coalesced {} | cached {} | local {} | override {} | blocked {} | errors {}",
|
||||
"STATS | uptime {}h{}m{}s | total {} | fwd {} | recursive {} | cached {} | local {} | override {} | blocked {} | errors {}",
|
||||
hours, mins, secs,
|
||||
self.queries_total,
|
||||
self.queries_forwarded,
|
||||
self.queries_recursive,
|
||||
self.queries_coalesced,
|
||||
self.queries_cached,
|
||||
self.queries_local,
|
||||
self.queries_overridden,
|
||||
@@ -236,7 +140,6 @@ pub struct StatsSnapshot {
|
||||
pub total: u64,
|
||||
pub forwarded: u64,
|
||||
pub recursive: u64,
|
||||
pub coalesced: u64,
|
||||
pub cached: u64,
|
||||
pub local: u64,
|
||||
pub overridden: u64,
|
||||
|
||||
1241
src/system_dns.rs
1241
src/system_dns.rs
File diff suppressed because it is too large
Load Diff
30
src/tls.rs
30
src/tls.rs
@@ -13,13 +13,6 @@ use time::{Duration, OffsetDateTime};
|
||||
const CA_VALIDITY_DAYS: i64 = 3650; // 10 years
|
||||
const CERT_VALIDITY_DAYS: i64 = 365; // 1 year
|
||||
|
||||
/// Common Name on Numa's local CA. Referenced by trust-store helpers
|
||||
/// (`security`, `certutil`) when locating the cert for removal.
|
||||
pub const CA_COMMON_NAME: &str = "Numa Local CA";
|
||||
|
||||
/// Filename of the CA certificate inside the data dir.
|
||||
pub const CA_FILE_NAME: &str = "ca.pem";
|
||||
|
||||
/// Collect all service + LAN peer names and regenerate the TLS cert.
|
||||
pub fn regenerate_tls(ctx: &ServerCtx) {
|
||||
let tls = match &ctx.tls_config {
|
||||
@@ -31,7 +24,7 @@ pub fn regenerate_tls(ctx: &ServerCtx) {
|
||||
names.extend(ctx.lan_peers.lock().unwrap().names());
|
||||
let names: Vec<String> = names.into_iter().collect();
|
||||
|
||||
match build_tls_config(&ctx.proxy_tld, &names, Vec::new(), &ctx.data_dir) {
|
||||
match build_tls_config(&ctx.proxy_tld, &names) {
|
||||
Ok(new_config) => {
|
||||
tls.store(new_config);
|
||||
info!("TLS cert regenerated for {} services", names.len());
|
||||
@@ -43,26 +36,17 @@ pub fn regenerate_tls(ctx: &ServerCtx) {
|
||||
/// Build a TLS config with a cert covering all provided service names.
|
||||
/// Wildcards under single-label TLDs (*.numa) are rejected by browsers,
|
||||
/// so we list each service explicitly as a SAN.
|
||||
/// `alpn` is advertised in the TLS ServerHello — pass empty for the proxy
|
||||
/// (which accepts any ALPN), or `[b"dot"]` for DoT (RFC 7858 §3.2).
|
||||
/// `data_dir` is where the CA material is stored — taken from
|
||||
/// `[server] data_dir` in numa.toml (defaults to `crate::data_dir()`).
|
||||
pub fn build_tls_config(
|
||||
tld: &str,
|
||||
service_names: &[String],
|
||||
alpn: Vec<Vec<u8>>,
|
||||
data_dir: &Path,
|
||||
) -> crate::Result<Arc<ServerConfig>> {
|
||||
let (ca_cert, ca_key) = ensure_ca(data_dir)?;
|
||||
pub fn build_tls_config(tld: &str, service_names: &[String]) -> crate::Result<Arc<ServerConfig>> {
|
||||
let dir = crate::data_dir();
|
||||
let (ca_cert, ca_key) = ensure_ca(&dir)?;
|
||||
let (cert_chain, key) = generate_service_cert(&ca_cert, &ca_key, tld, service_names)?;
|
||||
|
||||
// Ensure a crypto provider is installed (rustls needs one)
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
|
||||
let mut config = ServerConfig::builder()
|
||||
let config = ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(cert_chain, key)?;
|
||||
config.alpn_protocols = alpn;
|
||||
|
||||
info!(
|
||||
"TLS configured for {} .{} domains",
|
||||
@@ -74,7 +58,7 @@ pub fn build_tls_config(
|
||||
|
||||
fn ensure_ca(dir: &Path) -> crate::Result<(rcgen::Certificate, KeyPair)> {
|
||||
let ca_key_path = dir.join("ca.key");
|
||||
let ca_cert_path = dir.join(CA_FILE_NAME);
|
||||
let ca_cert_path = dir.join("ca.pem");
|
||||
|
||||
if ca_key_path.exists() && ca_cert_path.exists() {
|
||||
let key_pem = std::fs::read_to_string(&ca_key_path)?;
|
||||
@@ -93,7 +77,7 @@ fn ensure_ca(dir: &Path) -> crate::Result<(rcgen::Certificate, KeyPair)> {
|
||||
let mut params = CertificateParams::default();
|
||||
params
|
||||
.distinguished_name
|
||||
.push(DnType::CommonName, CA_COMMON_NAME);
|
||||
.push(DnType::CommonName, "Numa Local CA");
|
||||
params.is_ca = IsCa::Ca(BasicConstraints::Unconstrained);
|
||||
params.key_usages = vec![KeyUsagePurpose::KeyCertSign, KeyUsagePurpose::CrlSign];
|
||||
params.not_before = OffsetDateTime::now_utc();
|
||||
|
||||
@@ -1,123 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Cross-distro CA trust contract test for issue #35.
|
||||
#
|
||||
# Runs the exact shell commands `src/system_dns.rs::trust_ca_linux` would run
|
||||
# on each Linux trust-store family (Debian, Fedora pki, Arch p11-kit), and
|
||||
# asserts the certificate ends up in (and is removed from) the system bundle.
|
||||
#
|
||||
# This is a contract test, not an integration test: it doesn't drive the Rust
|
||||
# code (that would need systemd-in-container). It verifies the assumptions in
|
||||
# `LINUX_TRUST_STORES` against the real distro behavior. If you change that
|
||||
# table in src/system_dns.rs, update the per-distro cases below to match.
|
||||
#
|
||||
# Requirements: docker, openssl (host).
|
||||
# Usage: ./tests/docker/install-trust.sh
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
cd "$(dirname "$0")/../.."
|
||||
|
||||
GREEN="\033[32m"; RED="\033[31m"; RESET="\033[0m"
|
||||
|
||||
# Self-signed CA fixture, mounted into each container as ca.pem.
|
||||
# basicConstraints=CA:TRUE is required — without it, Debian's
|
||||
# update-ca-certificates silently skips the cert during bundle build.
|
||||
FIXTURE_DIR=$(mktemp -d)
|
||||
trap 'rm -rf "$FIXTURE_DIR"' EXIT
|
||||
openssl req -x509 -newkey rsa:2048 -nodes -days 1 \
|
||||
-keyout "$FIXTURE_DIR/ca.key" \
|
||||
-out "$FIXTURE_DIR/ca.pem" \
|
||||
-subj "/CN=Numa Local CA Test $(date +%s)" \
|
||||
-addext "basicConstraints=critical,CA:TRUE" \
|
||||
-addext "keyUsage=critical,keyCertSign,cRLSign" >/dev/null 2>&1
|
||||
|
||||
# Distro bundles store certs differently — Debian writes raw PEM only,
|
||||
# Fedora prepends "# CN" comment headers, Arch via extract-compat is
|
||||
# raw PEM. To detect cert presence uniformly we grep for a deterministic
|
||||
# substring of the base64 body (first base64 line is unique per cert).
|
||||
CERT_TAG=$(sed -n '2p' "$FIXTURE_DIR/ca.pem")
|
||||
|
||||
PASSED=0; FAILED=0
|
||||
|
||||
run_case() {
|
||||
local distro="$1"; shift
|
||||
local image="$1"; shift
|
||||
local platform="$1"; shift
|
||||
local script="$1"
|
||||
|
||||
printf "── %s (%s) ──\n" "$distro" "$image"
|
||||
if docker run --rm \
|
||||
--platform "$platform" \
|
||||
--security-opt seccomp=unconfined \
|
||||
-e CERT_TAG="$CERT_TAG" \
|
||||
-e DEBIAN_FRONTEND=noninteractive \
|
||||
-v "$FIXTURE_DIR/ca.pem:/fixture/ca.pem:ro" \
|
||||
"$image" bash -c "$script"; then
|
||||
printf "${GREEN}✓${RESET} %s\n\n" "$distro"
|
||||
PASSED=$((PASSED + 1))
|
||||
else
|
||||
printf "${RED}✗${RESET} %s\n\n" "$distro"
|
||||
FAILED=$((FAILED + 1))
|
||||
fi
|
||||
}
|
||||
|
||||
# Debian / Ubuntu / Mint — anchor: /usr/local/share/ca-certificates/*.crt
|
||||
run_case "debian" "debian:stable" "linux/amd64" '
|
||||
set -e
|
||||
apt-get update -qq
|
||||
apt-get install -qq -y ca-certificates >/dev/null
|
||||
install -m 0644 /fixture/ca.pem /usr/local/share/ca-certificates/numa-local-ca.crt
|
||||
update-ca-certificates >/dev/null 2>&1
|
||||
grep -q "$CERT_TAG" /etc/ssl/certs/ca-certificates.crt
|
||||
echo " install: cert present in bundle"
|
||||
rm /usr/local/share/ca-certificates/numa-local-ca.crt
|
||||
update-ca-certificates --fresh >/dev/null 2>&1
|
||||
if grep -q "$CERT_TAG" /etc/ssl/certs/ca-certificates.crt; then
|
||||
echo " uninstall: cert STILL present (regression)" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo " uninstall: cert removed from bundle"
|
||||
'
|
||||
|
||||
# Fedora / RHEL / CentOS / SUSE — anchor: /etc/pki/ca-trust/source/anchors/*.pem
|
||||
run_case "fedora" "fedora:latest" "linux/amd64" '
|
||||
set -e
|
||||
dnf install -q -y ca-certificates >/dev/null
|
||||
install -m 0644 /fixture/ca.pem /etc/pki/ca-trust/source/anchors/numa-local-ca.pem
|
||||
update-ca-trust extract
|
||||
grep -q "$CERT_TAG" /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
|
||||
echo " install: cert present in bundle"
|
||||
rm /etc/pki/ca-trust/source/anchors/numa-local-ca.pem
|
||||
update-ca-trust extract
|
||||
if grep -q "$CERT_TAG" /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem; then
|
||||
echo " uninstall: cert STILL present (regression)" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo " uninstall: cert removed from bundle"
|
||||
'
|
||||
|
||||
# Arch / Manjaro — anchor: /etc/ca-certificates/trust-source/anchors/*.pem
|
||||
# archlinux:latest is x86_64-only; --platform forces emulation on Apple Silicon.
|
||||
run_case "arch" "archlinux:latest" "linux/amd64" '
|
||||
set -e
|
||||
# pacman 7+ filters syscalls in its own sandbox; disable for Rosetta/qemu emulation.
|
||||
sed -i "s/^#DisableSandboxSyscalls/DisableSandboxSyscalls/" /etc/pacman.conf
|
||||
pacman -Sy --noconfirm --needed ca-certificates p11-kit >/dev/null 2>&1
|
||||
install -m 0644 /fixture/ca.pem /etc/ca-certificates/trust-source/anchors/numa-local-ca.pem
|
||||
trust extract-compat
|
||||
grep -q "$CERT_TAG" /etc/ssl/certs/ca-certificates.crt
|
||||
echo " install: cert present in bundle"
|
||||
rm /etc/ca-certificates/trust-source/anchors/numa-local-ca.pem
|
||||
trust extract-compat
|
||||
if grep -q "$CERT_TAG" /etc/ssl/certs/ca-certificates.crt; then
|
||||
echo " uninstall: cert STILL present (regression)" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo " uninstall: cert removed from bundle"
|
||||
'
|
||||
|
||||
printf "── summary ──\n"
|
||||
printf " ${GREEN}passed${RESET}: %d\n" "$PASSED"
|
||||
printf " ${RED}failed${RESET}: %d\n" "$FAILED"
|
||||
[ "$FAILED" -eq 0 ]
|
||||
@@ -1,147 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Arch Linux compatibility smoke test.
|
||||
#
|
||||
# Builds numa from source inside an archlinux:latest container, runs it
|
||||
# in forward mode on port 5354, and verifies a single DNS query returns
|
||||
# an A record. Validates the "Arch compatible" claim end-to-end before
|
||||
# release announcements.
|
||||
#
|
||||
# Dogfooding: the test numa forwards to the host's running numa via
|
||||
# host.docker.internal (Docker Desktop's host gateway). This avoids the
|
||||
# Docker NAT/UDP issues with public resolvers and exercises the realistic
|
||||
# numa-on-numa shape. Requires the host to be running numa on port 53.
|
||||
#
|
||||
# First run is slow (~8-12 min): image pull + pacman + cold cargo build.
|
||||
# No caching across runs.
|
||||
#
|
||||
# Requirements: docker, host running numa on 0.0.0.0:53
|
||||
# Usage: ./tests/docker/smoke-arch.sh
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
cd "$(dirname "$0")/../.."
|
||||
|
||||
GREEN="\033[32m"; RED="\033[31m"; RESET="\033[0m"
|
||||
|
||||
# Precondition: the test numa-on-arch forwards to the host numa as its
|
||||
# upstream (dogfood pattern). Fail fast with a clear error if there is
|
||||
# no working DNS on the host, rather than letting the dig inside the
|
||||
# container time out with "deadline has elapsed".
|
||||
if ! dig @127.0.0.1 google.com A +short +time=1 +tries=1 >/dev/null 2>&1; then
|
||||
printf "${RED}error:${RESET} host numa is not answering on 127.0.0.1:53\n" >&2
|
||||
echo " This test forwards to the host numa via host.docker.internal." >&2
|
||||
echo " Start numa on the host first (sudo numa install), then rerun." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "── building + running numa on archlinux:latest ──"
|
||||
echo " (first run is slow: image pull + pacman + cold cargo build, ~8-12 min)"
|
||||
echo
|
||||
|
||||
docker run --rm \
|
||||
--platform linux/amd64 \
|
||||
--security-opt seccomp=unconfined \
|
||||
-v "$PWD:/src:ro" \
|
||||
-v numa-arch-cargo:/root/.cargo \
|
||||
-v numa-arch-target:/work/target \
|
||||
archlinux:latest bash -c '
|
||||
set -e
|
||||
|
||||
# pacman 7+ filters syscalls in its own sandbox; disable for Rosetta/qemu
|
||||
sed -i "s/^#DisableSandboxSyscalls/DisableSandboxSyscalls/" /etc/pacman.conf
|
||||
|
||||
echo "── pacman: installing build + runtime deps ──"
|
||||
pacman -Sy --noconfirm --needed rust gcc pkgconf cmake make perl bind 2>&1 | tail -3
|
||||
echo
|
||||
|
||||
# Copy source to a writable workdir, skipping target/ + .git so we
|
||||
# do not pull in the host (macOS) build artifacts.
|
||||
mkdir -p /work
|
||||
tar -C /src --exclude=./target --exclude=./.git -cf - . | tar -C /work -xf -
|
||||
cd /work
|
||||
|
||||
echo "── cargo build --release --locked ──"
|
||||
cargo build --release --locked 2>&1 | tail -5
|
||||
echo
|
||||
|
||||
# Dogfood: forward to the host numa via host.docker.internal.
|
||||
# numa parses upstream.address as a literal SocketAddr, so we resolve
|
||||
# the hostname to an IPv4 address first (force v4 — getent hosts may
|
||||
# return IPv6 first, and IPv6 addresses need bracketed addr:port form).
|
||||
HOST_IP=$(getent ahostsv4 host.docker.internal | awk "/STREAM/ {print \$1; exit}")
|
||||
if [ -z "$HOST_IP" ]; then
|
||||
echo " ✗ could not resolve host.docker.internal to IPv4 (not on Docker Desktop?)"
|
||||
exit 1
|
||||
fi
|
||||
echo "── starting numa on :5354 (forward to host numa at $HOST_IP:53) ──"
|
||||
# Intentionally NOT setting [server] data_dir — we want to exercise the
|
||||
# default code path (data_dir() → daemon_data_dir() → /var/lib/numa) so
|
||||
# the FHS-path assertion below verifies the live wiring, not just the
|
||||
# unit-tested helper.
|
||||
cat > /tmp/numa.toml <<EOF
|
||||
[server]
|
||||
bind_addr = "127.0.0.1:5354"
|
||||
api_port = 5381
|
||||
|
||||
[upstream]
|
||||
mode = "forward"
|
||||
address = "$HOST_IP"
|
||||
port = 53
|
||||
EOF
|
||||
|
||||
./target/release/numa /tmp/numa.toml > /tmp/numa.log 2>&1 &
|
||||
NUMA_PID=$!
|
||||
|
||||
# Poll for readiness — numa is ready when it answers a query
|
||||
READY=0
|
||||
for i in 1 2 3 4 5 6 7 8; do
|
||||
sleep 1
|
||||
if dig @127.0.0.1 -p 5354 google.com A +short +time=1 +tries=1 2>/dev/null \
|
||||
| grep -qE "^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$"; then
|
||||
READY=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$READY" -ne 1 ]; then
|
||||
echo " ✗ numa did not return an A record after 8s"
|
||||
echo " numa log:"
|
||||
cat /tmp/numa.log
|
||||
kill $NUMA_PID 2>/dev/null || true
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "── dig @127.0.0.1 -p 5354 google.com A ──"
|
||||
ANSWER=$(dig @127.0.0.1 -p 5354 google.com A +short +time=2 +tries=1)
|
||||
echo "$ANSWER" | sed "s/^/ /"
|
||||
|
||||
kill $NUMA_PID 2>/dev/null || true
|
||||
|
||||
# FHS path assertion: the default data dir on Linux must be /var/lib/numa
|
||||
# (not the legacy /usr/local/var/numa). The CA cert generated at startup
|
||||
# is the canonical proof that numa wrote to the right place.
|
||||
echo
|
||||
echo "── FHS path check ──"
|
||||
if [ -f /var/lib/numa/ca.pem ]; then
|
||||
echo " ✓ CA cert at /var/lib/numa/ca.pem (FHS path)"
|
||||
else
|
||||
echo " ✗ CA cert NOT at /var/lib/numa/ca.pem"
|
||||
echo " ls /var/lib/numa/:"
|
||||
ls -la /var/lib/numa/ 2>&1 | sed "s/^/ /"
|
||||
echo " ls /usr/local/var/numa/:"
|
||||
ls -la /usr/local/var/numa/ 2>&1 | sed "s/^/ /"
|
||||
exit 1
|
||||
fi
|
||||
if [ -e /usr/local/var/numa ]; then
|
||||
echo " ✗ legacy path /usr/local/var/numa unexpectedly exists on a fresh container"
|
||||
exit 1
|
||||
fi
|
||||
echo " ✓ legacy path /usr/local/var/numa absent (fresh install used FHS)"
|
||||
|
||||
echo
|
||||
echo " ✓ numa built, ran, answered a forward query, and used the FHS data dir on Arch"
|
||||
'
|
||||
|
||||
echo
|
||||
printf "${GREEN}── smoke-arch passed ──${RESET}\n"
|
||||
@@ -404,241 +404,6 @@ check "Cache flushed" \
|
||||
|
||||
kill "$NUMA_PID" 2>/dev/null || true
|
||||
wait "$NUMA_PID" 2>/dev/null || true
|
||||
sleep 1
|
||||
|
||||
# ---- Suite 5: DNS-over-TLS (RFC 7858) ----
|
||||
echo ""
|
||||
echo "╔══════════════════════════════════════════╗"
|
||||
echo "║ Suite 5: DNS-over-TLS (RFC 7858) ║"
|
||||
echo "╚══════════════════════════════════════════╝"
|
||||
|
||||
if ! command -v kdig >/dev/null 2>&1; then
|
||||
printf " ${DIM}skipped — install 'knot' for kdig${RESET}\n"
|
||||
elif ! command -v openssl >/dev/null 2>&1; then
|
||||
printf " ${DIM}skipped — openssl not found${RESET}\n"
|
||||
else
|
||||
DOT_PORT=8853
|
||||
DOT_CERT=/tmp/numa-integration-dot.crt
|
||||
DOT_KEY=/tmp/numa-integration-dot.key
|
||||
|
||||
# Generate a test cert mirroring production self_signed_tls SAN shape
|
||||
# (*.numa wildcard + explicit numa.numa apex).
|
||||
openssl req -x509 -newkey rsa:2048 -nodes -days 1 \
|
||||
-keyout "$DOT_KEY" -out "$DOT_CERT" \
|
||||
-subj "/CN=Numa .numa services" \
|
||||
-addext "subjectAltName=DNS:*.numa,DNS:numa.numa" \
|
||||
>/dev/null 2>&1
|
||||
|
||||
# Suite 5 uses a local zone so it's upstream-independent — the point is
|
||||
# to exercise the DoT transport layer (handshake, ALPN, framing,
|
||||
# persistent connections), not re-test recursive resolution.
|
||||
cat > "$CONFIG" << CONF
|
||||
[server]
|
||||
bind_addr = "127.0.0.1:$PORT"
|
||||
api_port = $API_PORT
|
||||
|
||||
[upstream]
|
||||
mode = "forward"
|
||||
address = "127.0.0.1"
|
||||
port = 65535
|
||||
|
||||
[cache]
|
||||
max_entries = 10000
|
||||
|
||||
[blocking]
|
||||
enabled = false
|
||||
|
||||
[proxy]
|
||||
enabled = false
|
||||
|
||||
[dot]
|
||||
enabled = true
|
||||
port = $DOT_PORT
|
||||
bind_addr = "127.0.0.1"
|
||||
cert_path = "$DOT_CERT"
|
||||
key_path = "$DOT_KEY"
|
||||
|
||||
[[zones]]
|
||||
domain = "dot-test.example"
|
||||
record_type = "A"
|
||||
value = "10.0.0.1"
|
||||
ttl = 60
|
||||
CONF
|
||||
|
||||
RUST_LOG=info "$BINARY" "$CONFIG" > "$LOG" 2>&1 &
|
||||
NUMA_PID=$!
|
||||
sleep 4
|
||||
|
||||
if ! kill -0 "$NUMA_PID" 2>/dev/null; then
|
||||
FAILED=$((FAILED + 1))
|
||||
printf " ${RED}✗${RESET} DoT startup\n"
|
||||
printf " ${DIM}%s${RESET}\n" "$(tail -5 "$LOG")"
|
||||
else
|
||||
echo ""
|
||||
echo "=== Listener ==="
|
||||
|
||||
check "DoT bound on 127.0.0.1:$DOT_PORT" \
|
||||
"DoT listening on 127.0.0.1:$DOT_PORT" \
|
||||
"$(grep 'DoT listening' "$LOG")"
|
||||
|
||||
KDIG="kdig @127.0.0.1 -p $DOT_PORT +tls +tls-ca=$DOT_CERT +tls-hostname=numa.numa +time=5 +retry=0"
|
||||
|
||||
echo ""
|
||||
echo "=== Queries over DoT ==="
|
||||
|
||||
check "DoT local zone A record" \
|
||||
"10.0.0.1" \
|
||||
"$($KDIG +short dot-test.example A 2>/dev/null)"
|
||||
|
||||
# +keepopen reuses one TLS connection for multiple queries — tests
|
||||
# persistent connection handling. kdig applies options left-to-right,
|
||||
# so +short and +keepopen must come before the query specs.
|
||||
check "DoT persistent connection (3 queries, 1 handshake)" \
|
||||
"10.0.0.1" \
|
||||
"$($KDIG +keepopen +short dot-test.example A dot-test.example A dot-test.example A 2>/dev/null | head -1)"
|
||||
|
||||
echo ""
|
||||
echo "=== ALPN ==="
|
||||
|
||||
# Positive case: client offers "dot", server picks it.
|
||||
ALPN_OK=$(echo "" | openssl s_client -connect "127.0.0.1:$DOT_PORT" \
|
||||
-servername numa.numa -alpn dot -CAfile "$DOT_CERT" 2>&1 </dev/null || true)
|
||||
check "DoT negotiates ALPN \"dot\"" \
|
||||
"ALPN protocol: dot" \
|
||||
"$ALPN_OK"
|
||||
|
||||
# Negative case: client offers only "h2", server must reject the
|
||||
# handshake with no_application_protocol alert (cross-protocol
|
||||
# confusion defense, RFC 7858bis §3.2).
|
||||
if echo "" | openssl s_client -connect "127.0.0.1:$DOT_PORT" \
|
||||
-servername numa.numa -alpn h2 -CAfile "$DOT_CERT" \
|
||||
</dev/null >/dev/null 2>&1; then
|
||||
ALPN_MISMATCH="handshake unexpectedly succeeded"
|
||||
else
|
||||
ALPN_MISMATCH="rejected"
|
||||
fi
|
||||
check "DoT rejects non-dot ALPN" \
|
||||
"rejected" \
|
||||
"$ALPN_MISMATCH"
|
||||
fi
|
||||
|
||||
kill "$NUMA_PID" 2>/dev/null || true
|
||||
wait "$NUMA_PID" 2>/dev/null || true
|
||||
rm -f "$DOT_CERT" "$DOT_KEY"
|
||||
fi
|
||||
sleep 1
|
||||
|
||||
# ---- Suite 6: Proxy + DoT coexistence ----
|
||||
echo ""
|
||||
echo "╔══════════════════════════════════════════╗"
|
||||
echo "║ Suite 6: Proxy + DoT Coexistence ║"
|
||||
echo "╚══════════════════════════════════════════╝"
|
||||
|
||||
if ! command -v kdig >/dev/null 2>&1 || ! command -v openssl >/dev/null 2>&1; then
|
||||
printf " ${DIM}skipped — needs kdig + openssl${RESET}\n"
|
||||
else
|
||||
DOT_PORT=8853
|
||||
PROXY_HTTP_PORT=8080
|
||||
PROXY_HTTPS_PORT=8443
|
||||
NUMA_DATA=/tmp/numa-integration-data
|
||||
|
||||
# Fresh data dir so we generate a fresh CA for this suite. Path is set
|
||||
# via [server] data_dir in the TOML below, not an env var — numa treats
|
||||
# its config file as the single source of truth for all knobs.
|
||||
rm -rf "$NUMA_DATA"
|
||||
mkdir -p "$NUMA_DATA"
|
||||
|
||||
cat > "$CONFIG" << CONF
|
||||
[server]
|
||||
bind_addr = "127.0.0.1:$PORT"
|
||||
api_port = $API_PORT
|
||||
data_dir = "$NUMA_DATA"
|
||||
|
||||
[upstream]
|
||||
mode = "forward"
|
||||
address = "127.0.0.1"
|
||||
port = 65535
|
||||
|
||||
[cache]
|
||||
max_entries = 10000
|
||||
|
||||
[blocking]
|
||||
enabled = false
|
||||
|
||||
[proxy]
|
||||
enabled = true
|
||||
port = $PROXY_HTTP_PORT
|
||||
tls_port = $PROXY_HTTPS_PORT
|
||||
tld = "numa"
|
||||
bind_addr = "127.0.0.1"
|
||||
|
||||
[dot]
|
||||
enabled = true
|
||||
port = $DOT_PORT
|
||||
bind_addr = "127.0.0.1"
|
||||
|
||||
[[zones]]
|
||||
domain = "dot-test.example"
|
||||
record_type = "A"
|
||||
value = "10.0.0.1"
|
||||
ttl = 60
|
||||
CONF
|
||||
|
||||
RUST_LOG=info "$BINARY" "$CONFIG" > "$LOG" 2>&1 &
|
||||
NUMA_PID=$!
|
||||
sleep 4
|
||||
|
||||
if ! kill -0 "$NUMA_PID" 2>/dev/null; then
|
||||
FAILED=$((FAILED + 1))
|
||||
printf " ${RED}✗${RESET} Startup with proxy + DoT\n"
|
||||
printf " ${DIM}%s${RESET}\n" "$(tail -5 "$LOG")"
|
||||
else
|
||||
echo ""
|
||||
echo "=== Both listeners ==="
|
||||
|
||||
check "DoT listener bound" \
|
||||
"DoT listening on 127.0.0.1:$DOT_PORT" \
|
||||
"$(grep 'DoT listening' "$LOG")"
|
||||
|
||||
check "HTTPS proxy listener bound" \
|
||||
"HTTPS proxy listening on 127.0.0.1:$PROXY_HTTPS_PORT" \
|
||||
"$(grep 'HTTPS proxy listening' "$LOG")"
|
||||
|
||||
PANIC_COUNT=$(grep -c 'panicked' "$LOG" 2>/dev/null || echo 0)
|
||||
check "No startup panics in log" \
|
||||
"^0$" \
|
||||
"$PANIC_COUNT"
|
||||
|
||||
echo ""
|
||||
echo "=== DoT works with proxy enabled ==="
|
||||
|
||||
# Proxy's build_tls_config runs first and creates the CA in
|
||||
# $NUMA_DATA_DIR. DoT self_signed_tls then loads the same CA and
|
||||
# issues its own leaf cert. One CA trusts both listeners.
|
||||
CA="$NUMA_DATA/ca.pem"
|
||||
KDIG="kdig @127.0.0.1 -p $DOT_PORT +tls +tls-ca=$CA +tls-hostname=numa.numa +time=5 +retry=0"
|
||||
|
||||
check "DoT local zone A (with proxy on)" \
|
||||
"10.0.0.1" \
|
||||
"$($KDIG +short dot-test.example A 2>/dev/null)"
|
||||
|
||||
echo ""
|
||||
echo "=== Proxy TLS works with DoT enabled ==="
|
||||
|
||||
# Proxy cert has SAN numa.numa (auto-added "numa" service). A
|
||||
# successful handshake validates that the proxy's separate
|
||||
# ServerConfig wasn't disturbed by DoT's own cert generation.
|
||||
PROXY_TLS=$(echo "" | openssl s_client -connect "127.0.0.1:$PROXY_HTTPS_PORT" \
|
||||
-servername numa.numa -CAfile "$CA" 2>&1 </dev/null || true)
|
||||
check "Proxy HTTPS TLS handshake succeeds" \
|
||||
"Verify return code: 0 (ok)" \
|
||||
"$PROXY_TLS"
|
||||
fi
|
||||
|
||||
kill "$NUMA_PID" 2>/dev/null || true
|
||||
wait "$NUMA_PID" 2>/dev/null || true
|
||||
rm -rf "$NUMA_DATA"
|
||||
fi
|
||||
|
||||
# Summary
|
||||
echo ""
|
||||
|
||||
@@ -1,94 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Manual macOS CA trust contract test.
|
||||
#
|
||||
# Mirrors src/system_dns.rs::trust_ca_macos / untrust_ca_macos by running
|
||||
# the same `security` shell commands against a fixture cert with a unique
|
||||
# CN. Safe to run alongside a production numa install:
|
||||
#
|
||||
# - Test cert CN = "Numa Local CA Test <pid-ts>", always strictly longer
|
||||
# than the production CN "Numa Local CA". `security find-certificate -c`
|
||||
# does substring matching, so the test's search for $TEST_CN can never
|
||||
# match the production cert (the search term is longer than the prod CN).
|
||||
# - All deletes use `delete-certificate -Z <hash>`, which only touches the
|
||||
# cert with that exact hash. Production and test certs have different
|
||||
# hashes by construction (different key material), so the delete cannot
|
||||
# reach the production cert even if a CN search somehow returned both.
|
||||
#
|
||||
# Mutates the System keychain (briefly). Cleans up on success or interrupt.
|
||||
# Requires sudo for `security add-trusted-cert` and `delete-certificate`.
|
||||
#
|
||||
# Usage: ./tests/manual/install-trust-macos.sh
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if [[ "$OSTYPE" != darwin* ]]; then
|
||||
echo "This test is macOS-only." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
GREEN="\033[32m"; RED="\033[31m"; RESET="\033[0m"
|
||||
|
||||
# Production constant from src/tls.rs::CA_COMMON_NAME — keep in sync.
|
||||
PROD_CN="Numa Local CA"
|
||||
KEYCHAIN="/Library/Keychains/System.keychain"
|
||||
|
||||
# Notice if production numa is already installed. We proceed regardless —
|
||||
# see header for why coexistence is safe (unique CN + by-hash deletion).
|
||||
if security find-certificate -c "$PROD_CN" "$KEYCHAIN" >/dev/null 2>&1; then
|
||||
echo " note: production '$PROD_CN' detected — proceeding alongside (test cert can't touch it)"
|
||||
echo
|
||||
fi
|
||||
|
||||
# Unique CN ensures the test cert can never collide with production.
|
||||
TEST_CN="Numa Local CA Test $$-$(date +%s)"
|
||||
FIXTURE_DIR=$(mktemp -d)
|
||||
|
||||
cleanup() {
|
||||
# Best-effort: remove any test certs by hash if still present.
|
||||
if security find-certificate -c "$TEST_CN" "$KEYCHAIN" >/dev/null 2>&1; then
|
||||
echo " cleanup: removing leftover test cert"
|
||||
security find-certificate -c "$TEST_CN" -a -Z "$KEYCHAIN" 2>/dev/null \
|
||||
| awk '/^SHA-1 hash:/ {print $NF}' \
|
||||
| while read -r hash; do
|
||||
sudo security delete-certificate -Z "$hash" "$KEYCHAIN" >/dev/null 2>&1 || true
|
||||
done
|
||||
fi
|
||||
rm -rf "$FIXTURE_DIR"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
echo "── generating fixture CA ──"
|
||||
openssl req -x509 -newkey rsa:2048 -nodes -days 1 \
|
||||
-keyout "$FIXTURE_DIR/ca.key" \
|
||||
-out "$FIXTURE_DIR/ca.pem" \
|
||||
-subj "/CN=$TEST_CN" \
|
||||
-addext "basicConstraints=critical,CA:TRUE" \
|
||||
-addext "keyUsage=critical,keyCertSign,cRLSign" >/dev/null 2>&1
|
||||
echo " CN: $TEST_CN"
|
||||
echo
|
||||
|
||||
echo "── trust step (mirrors trust_ca_macos) ──"
|
||||
sudo security add-trusted-cert -d -r trustRoot -k "$KEYCHAIN" "$FIXTURE_DIR/ca.pem"
|
||||
if security find-certificate -c "$TEST_CN" "$KEYCHAIN" >/dev/null 2>&1; then
|
||||
printf " ${GREEN}✓${RESET} test cert found in keychain\n"
|
||||
else
|
||||
printf " ${RED}✗${RESET} test cert NOT found after add-trusted-cert\n"
|
||||
exit 1
|
||||
fi
|
||||
echo
|
||||
|
||||
echo "── untrust step (mirrors untrust_ca_macos) ──"
|
||||
security find-certificate -c "$TEST_CN" -a -Z "$KEYCHAIN" 2>/dev/null \
|
||||
| awk '/^SHA-1 hash:/ {print $NF}' \
|
||||
| while read -r hash; do
|
||||
sudo security delete-certificate -Z "$hash" "$KEYCHAIN" >/dev/null
|
||||
done
|
||||
if security find-certificate -c "$TEST_CN" "$KEYCHAIN" >/dev/null 2>&1; then
|
||||
printf " ${RED}✗${RESET} test cert STILL present after delete (regression)\n"
|
||||
exit 1
|
||||
fi
|
||||
printf " ${GREEN}✓${RESET} test cert removed from keychain\n"
|
||||
echo
|
||||
|
||||
printf "${GREEN}all checks passed${RESET}\n"
|
||||
Reference in New Issue
Block a user