Compare commits
20 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
120ba5200e | ||
|
|
45046bcf6e | ||
|
|
b4b939c78b | ||
|
|
9a85e271ec | ||
|
|
7dc1a0686f | ||
|
|
a02722cdf9 | ||
|
|
3b77dcff61 | ||
|
|
7cc110a0a1 | ||
|
|
75fe625f39 | ||
|
|
908d076d9b | ||
|
|
5381e65be4 | ||
|
|
6b0a30d004 | ||
|
|
169679bfe4 | ||
|
|
d3f046da4c | ||
|
|
0bdde40f40 | ||
|
|
155c1c4da0 | ||
|
|
b40004fe5e | ||
|
|
b8ddc16027 | ||
|
|
48f67be2f1 | ||
|
|
ca00846393 |
14
.github/workflows/ci.yml
vendored
14
.github/workflows/ci.yml
vendored
@@ -3,8 +3,22 @@ name: CI
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- 'site/**'
|
||||
- 'blog/**'
|
||||
- 'drafts/**'
|
||||
- '*.md'
|
||||
- 'scripts/serve-site.sh'
|
||||
- 'scripts/generate-blog-index.sh'
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- 'site/**'
|
||||
- 'blog/**'
|
||||
- 'drafts/**'
|
||||
- '*.md'
|
||||
- 'scripts/serve-site.sh'
|
||||
- 'scripts/generate-blog-index.sh'
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
45
.github/workflows/docker.yml
vendored
Normal file
45
.github/workflows/docker.yml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
name: Docker
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- uses: docker/setup-qemu-action@v3
|
||||
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
|
||||
- uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- uses: docker/metadata-action@v5
|
||||
id: meta
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=latest
|
||||
|
||||
- uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
7
.github/workflows/publish-aur.yml
vendored
7
.github/workflows/publish-aur.yml
vendored
@@ -23,6 +23,13 @@ name: Publish - Arch Linux AUR Package
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- 'site/**'
|
||||
- 'blog/**'
|
||||
- 'drafts/**'
|
||||
- '*.md'
|
||||
- 'scripts/serve-site.sh'
|
||||
- 'scripts/generate-blog-index.sh'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -4,3 +4,5 @@ CLAUDE.md
|
||||
docs/
|
||||
site/blog/posts/
|
||||
ios/
|
||||
drafts/
|
||||
site/blog/index.html
|
||||
|
||||
2
Cargo.lock
generated
2
Cargo.lock
generated
@@ -1330,7 +1330,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "numa"
|
||||
version = "0.13.0"
|
||||
version = "0.13.1"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"axum",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "numa"
|
||||
version = "0.13.0"
|
||||
version = "0.13.1"
|
||||
authors = ["razvandimescu <razvan@dimescu.com>"]
|
||||
edition = "2021"
|
||||
description = "Portable DNS resolver in Rust — .numa local domains, ad blocking, developer overrides, DNS-over-HTTPS"
|
||||
|
||||
@@ -6,6 +6,7 @@ RUN mkdir src && echo 'fn main() {}' > src/main.rs && echo '' > src/lib.rs
|
||||
RUN cargo build --release 2>/dev/null || true
|
||||
RUN rm -rf src
|
||||
COPY src/ src/
|
||||
COPY benches/ benches/
|
||||
COPY site/ site/
|
||||
COPY numa.toml com.numa.dns.plist numa.service ./
|
||||
RUN touch src/main.rs src/lib.rs
|
||||
@@ -13,5 +14,6 @@ RUN cargo build --release
|
||||
|
||||
FROM alpine:3.23
|
||||
COPY --from=builder /app/target/release/numa /usr/local/bin/numa
|
||||
RUN mkdir -p /root/.config/numa && printf '[server]\napi_bind_addr = "0.0.0.0"\n\n[proxy]\nenabled = true\nbind_addr = "0.0.0.0"\n' > /root/.config/numa/numa.toml
|
||||
EXPOSE 53/udp 80/tcp 443/tcp 853/tcp 5380/tcp
|
||||
ENTRYPOINT ["numa"]
|
||||
|
||||
13
Makefile
13
Makefile
@@ -32,6 +32,19 @@ blog:
|
||||
pandoc "$$f" --template=site/blog-template.html -o "site/blog/posts/$$name.html"; \
|
||||
echo " $$f → site/blog/posts/$$name.html"; \
|
||||
done
|
||||
@scripts/generate-blog-index.sh
|
||||
|
||||
blog-drafts: blog
|
||||
@if [ -d drafts ] && ls drafts/*.md >/dev/null 2>&1; then \
|
||||
for f in drafts/*.md; do \
|
||||
name=$$(basename "$$f" .md); \
|
||||
pandoc "$$f" --template=site/blog-template.html -o "site/blog/posts/$$name.html"; \
|
||||
echo " $$f → site/blog/posts/$$name.html (draft)"; \
|
||||
done; \
|
||||
BLOG_INCLUDE_DRAFTS=1 scripts/generate-blog-index.sh; \
|
||||
else \
|
||||
echo " No drafts found"; \
|
||||
fi
|
||||
|
||||
release:
|
||||
ifndef VERSION
|
||||
|
||||
2
PKGBUILD
2
PKGBUILD
@@ -9,7 +9,7 @@ url="https://github.com/razvandimescu/numa"
|
||||
license=('MIT')
|
||||
options=('!lto')
|
||||
depends=('gcc-libs' 'glibc')
|
||||
makedepends=('cargo' 'git')
|
||||
makedepends=('cargo' 'git' 'llvm-libs')
|
||||
provides=("$_pkgname")
|
||||
conflicts=("$_pkgname")
|
||||
backup=('etc/numa.toml')
|
||||
|
||||
23
README.md
23
README.md
@@ -27,6 +27,9 @@ yay -S numa-git
|
||||
# Windows — download from GitHub Releases
|
||||
# All platforms
|
||||
cargo install numa
|
||||
|
||||
# Docker
|
||||
docker run -d --name numa --network host ghcr.io/razvandimescu/numa
|
||||
```
|
||||
|
||||
```bash
|
||||
@@ -102,6 +105,26 @@ From Machine B: `curl http://api.numa` → proxied to Machine A's port 8000. Ena
|
||||
|
||||
**Hub mode**: run one instance with `bind_addr = "0.0.0.0:53"` and point other devices' DNS to it — they get ad blocking + `.numa` resolution without installing anything.
|
||||
|
||||
## Docker
|
||||
|
||||
```bash
|
||||
# Recommended — host networking (Linux)
|
||||
docker run -d --name numa --network host ghcr.io/razvandimescu/numa
|
||||
|
||||
# Port mapping (macOS/Windows Docker Desktop)
|
||||
docker run -d --name numa -p 53:53/udp -p 53:53/tcp -p 5380:5380 ghcr.io/razvandimescu/numa
|
||||
```
|
||||
|
||||
Dashboard at `http://localhost:5380`. The image binds the API and proxy to `0.0.0.0` by default. Override with a custom config:
|
||||
|
||||
```bash
|
||||
docker run -d --name numa --network host \
|
||||
-v /path/to/numa.toml:/root/.config/numa/numa.toml \
|
||||
ghcr.io/razvandimescu/numa
|
||||
```
|
||||
|
||||
Multi-arch: `linux/amd64` and `linux/arm64`.
|
||||
|
||||
## How It Compares
|
||||
|
||||
| | Pi-hole | AdGuard Home | Unbound | Numa |
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: I Built a DNS Resolver from Scratch in Rust
|
||||
description: How DNS actually works at the wire level — label compression, TTL tricks, DoH, and what surprised me building a resolver with zero DNS libraries.
|
||||
date: March 2026
|
||||
date: 2026-03-20
|
||||
---
|
||||
|
||||
I wanted to understand how DNS actually works. Not the "it translates domain names to IP addresses" explanation — the actual bytes on the wire. What does a DNS packet look like? How does label compression work? Why is everything crammed into 512 bytes?
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: Implementing DNSSEC from Scratch in Rust
|
||||
description: Recursive resolution from root hints, chain-of-trust validation, NSEC/NSEC3 denial proofs, and what I learned implementing DNSSEC with zero DNS libraries.
|
||||
date: March 2026
|
||||
date: 2026-03-28
|
||||
---
|
||||
|
||||
In the [previous post](/blog/posts/dns-from-scratch.html) I covered how DNS works at the wire level — packet format, label compression, TTL caching, DoH. Numa was a forwarding resolver: it parsed packets, did useful things locally, and relayed the rest to Cloudflare or Quad9.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: DNS-over-TLS from Scratch in Rust
|
||||
description: Building RFC 7858 on top of rustls — length-prefix framing, ALPN cross-protocol defense, and two bugs that only the strict clients caught.
|
||||
date: April 2026
|
||||
date: 2026-04-06
|
||||
---
|
||||
|
||||
The [previous post](/blog/posts/dnssec-from-scratch.html) ended with "DoT — the last encrypted transport we don't support." This post is about building it.
|
||||
|
||||
171
blog/fixing-doh-tail-latency.md
Normal file
171
blog/fixing-doh-tail-latency.md
Normal file
@@ -0,0 +1,171 @@
|
||||
---
|
||||
title: Fixing DNS tail latency with a 5-line config and a 50-line function
|
||||
description: Periodic 40-140ms DoH spikes from hyper's dispatch channel. The fix was reqwest window tuning and request hedging — Dean & Barroso's "The Tail at Scale," applied to a DNS forwarder. Same ideas took cold recursive p99 from 2.3 seconds to 538ms.
|
||||
date: 2026-04-12
|
||||
---
|
||||
|
||||
If you're using reqwest for small HTTP/2 payloads, you probably have a tail latency problem you don't know about. Hyper's default flow control windows are 10,000× oversized for anything under 1 KB, and its dispatch channel adds periodic 40-140ms stalls that don't show up in median benchmarks.
|
||||
|
||||
I hit this building Numa's DoH forwarding path. Median was 10ms, mean was 23ms — the tail was dragging everything.
|
||||
|
||||
<div class="hero-metrics">
|
||||
<div class="metric-card">
|
||||
<div class="metric-vs">DoH forwarding p99</div>
|
||||
<div class="metric-value">113 → 71ms</div>
|
||||
<div class="metric-label">window tuning + request hedging</div>
|
||||
</div>
|
||||
<div class="metric-card">
|
||||
<div class="metric-vs">Cold recursive p99</div>
|
||||
<div class="metric-value">2.3s → 538ms</div>
|
||||
<div class="metric-label">NS caching, serve-stale, parallel queries</div>
|
||||
</div>
|
||||
<div class="metric-card">
|
||||
<div class="metric-vs">Forwarding σ</div>
|
||||
<div class="metric-value">31 → 13ms</div>
|
||||
<div class="metric-label">random spikes become parallel races</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
The fix was a 5-line reqwest config and a 50-line hedging function. This post is also an advertisement for Dean & Barroso's 2013 paper ["The Tail at Scale"](https://research.google/pubs/pub40801/) — a decade-old idea that still demolishes dispatch spikes. The same ideas later took my cold recursive p99 from 2.3 seconds to 538ms.
|
||||
|
||||
---
|
||||
|
||||
## The cause: hyper's dispatch channel
|
||||
|
||||
Reqwest sits on top of hyper, which interposes an mpsc dispatch channel and a separate `ClientTask` between `.send()` and the h2 stream. I instrumented the forwarding path and confirmed: 100% of the spike time lives in the `send()` phase, and a parallel heartbeat task showed zero runtime lag during spikes. The tokio runtime was fine — the stall was internal to hyper's request scheduling.
|
||||
|
||||
Hickory-resolver doesn't have this issue. It holds `h2::SendRequest<Bytes>` directly and calls `ready().await; send_request()` in the caller's task — no channel, no scheduling dependency. I used it as a reference point throughout.
|
||||
|
||||
## Fix #1 — HTTP/2 window sizes
|
||||
|
||||
Reqwest inherits hyper's HTTP/2 defaults: 2 MB stream window, 5 MB connection window. For DNS responses (~200 bytes), that's ~10,000× oversized — unnecessary WINDOW_UPDATE frames, bloated bookkeeping on every poll, and different server-side scheduling behavior.
|
||||
|
||||
Setting both windows to the h2 spec default (64 KB) dropped my median from 13.3ms to 10.1ms:
|
||||
|
||||
```rust
|
||||
reqwest::Client::builder()
|
||||
.use_rustls_tls()
|
||||
.http2_initial_stream_window_size(65_535)
|
||||
.http2_initial_connection_window_size(65_535)
|
||||
.http2_keep_alive_interval(Duration::from_secs(15))
|
||||
.http2_keep_alive_while_idle(true)
|
||||
.http2_keep_alive_timeout(Duration::from_secs(10))
|
||||
.pool_idle_timeout(Duration::from_secs(300))
|
||||
.pool_max_idle_per_host(1)
|
||||
.build()
|
||||
```
|
||||
|
||||
**Any Rust code using reqwest for tiny-payload HTTP/2 workloads — DoH, API polling, metric scraping — is probably hitting this.**
|
||||
|
||||
## Fix #2 — Request hedging
|
||||
|
||||
["The Tail at Scale"](https://research.google/pubs/pub40801/) (Dean & Barroso, 2013): fire a request, and if it doesn't return within your P50 latency, fire the same request in parallel. First response wins.
|
||||
|
||||
The intuition: if 5% of requests spike due to independent random events, two parallel requests means only 0.25% of pairs spike on *both*. The tail collapses.
|
||||
|
||||
**The surprise: hedging against the same upstream works.** HTTP/2 multiplexes streams — two `send_request()` calls on one connection become independent h2 streams. If one stalls in the dispatch channel, the other keeps making progress.
|
||||
|
||||
```rust
|
||||
pub async fn forward_with_hedging_raw(
|
||||
wire: &[u8],
|
||||
primary: &Upstream,
|
||||
secondary: &Upstream,
|
||||
hedge_delay: Duration,
|
||||
timeout_duration: Duration,
|
||||
) -> Result<Vec<u8>> {
|
||||
let primary_fut = forward_query_raw(wire, primary, timeout_duration);
|
||||
tokio::pin!(primary_fut);
|
||||
let delay = sleep(hedge_delay);
|
||||
tokio::pin!(delay);
|
||||
|
||||
// Phase 1: wait for primary to return OR the hedge delay.
|
||||
tokio::select! {
|
||||
result = &mut primary_fut => return result,
|
||||
_ = &mut delay => {}
|
||||
}
|
||||
|
||||
// Phase 2: hedge delay expired — fire secondary, keep primary alive.
|
||||
let secondary_fut = forward_query_raw(wire, secondary, timeout_duration);
|
||||
tokio::pin!(secondary_fut);
|
||||
|
||||
// First successful response wins.
|
||||
tokio::select! {
|
||||
r = primary_fut => r,
|
||||
r = secondary_fut => r,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The [production version](https://github.com/razvandimescu/numa/blob/main/src/forward.rs#L267) adds error handling — if one leg fails, it waits for the other. In production, Numa passes the same `&Upstream` twice when only one is configured. I extended hedging to all protocols — UDP (rescues packet loss on WiFi), DoT (rescues TLS handshake stalls). Configurable via `hedge_ms`; set to 0 to disable.
|
||||
|
||||
**Caveat: hedging hurts on degraded networks.** When latency is consistently high (no random spikes, just slow), the hedge adds overhead with nothing to rescue. Hedging is a variance reducer, not a latency reducer — it only helps when spikes are *random*.
|
||||
|
||||
---
|
||||
|
||||
## Forwarding results
|
||||
|
||||
5 iterations × 101 domains × 10 rounds, 5,050 samples per method. Hickory-resolver included as a reference (it uses h2 directly, no dispatch channel):
|
||||
|
||||
| | Single | **Hedged** | Hickory (ref) |
|
||||
|---|---|---|---|
|
||||
| mean | 17.4ms | **14.3ms** | 16.8ms |
|
||||
| median | 10.4ms | **10.2ms** | 13.3ms |
|
||||
| p95 | 52.5ms | **28.6ms** | 37.7ms |
|
||||
| p99 | 113.4ms | **71.3ms** | 98.1ms |
|
||||
| σ | 30.6ms | **13.2ms** | 19.1ms |
|
||||
|
||||
The internal improvement: hedging cut p95 by 45%, p99 by 37%, σ by 57%. The exact margin vs hickory varies with network conditions; the σ reduction is consistent across runs.
|
||||
|
||||
## Recursive resolution: from 2.3 seconds to 538ms
|
||||
|
||||
Forwarding is one job. Recursive resolution — walking from root hints through TLD nameservers to the authoritative server — is a different one. I started 15× behind Unbound on cold recursive p99 and traced it to four root causes.
|
||||
|
||||
**1. Missing NS delegation caching.** I cached glue records (ns1's IP) but not the delegation itself. Every `.com` query walked from root. Fix: cache NS records from referral authority sections. (10 lines)
|
||||
|
||||
**2. Expired cache entries caused full cold resolutions.** Fix: serve-stale ([RFC 8767](https://www.rfc-editor.org/rfc/rfc8767)) — return expired entries with TTL=1 while revalidating in the background. (20 lines)
|
||||
|
||||
**3. Wasting 1,900ms per unreachable server.** 800ms UDP timeout + unconditional 1,500ms TCP fallback. Fix: 400ms UDP, TCP only for truncation. (5 lines)
|
||||
|
||||
**4. Sequential NS queries on cold starts.** Fix: fire to the top 2 nameservers simultaneously. First response wins, SRTT recorded for both. Same hedging principle. (50 lines)
|
||||
|
||||
<div class="before-after">
|
||||
<div class="ba-item">
|
||||
<div class="ba-label">p99 before</div>
|
||||
<div class="ba-value ba-before">2,367ms</div>
|
||||
</div>
|
||||
<div class="ba-arrow">→</div>
|
||||
<div class="ba-item">
|
||||
<div class="ba-label">p99 after</div>
|
||||
<div class="ba-value ba-after">538ms</div>
|
||||
</div>
|
||||
<div class="ba-item ba-ref">
|
||||
<div class="ba-label">Unbound (ref)</div>
|
||||
<div class="ba-value">748ms</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
Genuine cold benchmarks — unique subdomains, 1 query per domain, 5 iterations, 505 samples per server:
|
||||
|
||||
| | Baseline | Final | Unbound (ref) |
|
||||
|---|---|---|---|
|
||||
| p99 | 2,367ms | **538ms** | 748ms |
|
||||
| σ | 254ms | **114ms** | 457ms |
|
||||
| median | — | 77.6ms | 74.7ms |
|
||||
|
||||
Unbound wins median by ~4%. Where hedging shines is the tail — domains with slow or unreachable nameservers, where parallel queries turn worst-case sequential timeouts into races. Cache hits are tied at 0.1ms across Numa, Unbound, and AdGuard Home.
|
||||
|
||||
What I'm exploring next: persistent SRTT data across restarts (currently cold-starts lose all server timing), aggressive NSEC caching to shortcut negative lookups, and adaptive hedge delays that tune themselves to observed network conditions instead of a fixed 10ms.
|
||||
|
||||
---
|
||||
|
||||
## Takeaways
|
||||
|
||||
The real hero of this post is Dean & Barroso. Hedging works because **spikes are random, and two random draws rarely both lose**. It's effective for any HTTP/2 client, any language, any forwarder topology. Nobody we know of ships it by default.
|
||||
|
||||
If you're building a Rust service that makes many small HTTP/2 requests to the same backend: check your flow control window sizes first, then implement hedging. Don't rewrite the client.
|
||||
|
||||
Benchmarks are in [`benches/recursive_compare.rs`](https://github.com/razvandimescu/numa/blob/main/benches/recursive_compare.rs) — run them yourself. If you're using reqwest for tiny-payload workloads and try the window size fix, I'd love to hear if you see the same improvement.
|
||||
|
||||
---
|
||||
|
||||
Numa is a DNS resolver that runs on your laptop or phone. DoH, DoT, .numa local domains, ad blocking, developer overrides, a REST API, and all the optimization work in this post. [github.com/razvandimescu/numa](https://github.com/razvandimescu/numa).
|
||||
@@ -58,6 +58,14 @@ api_port = 5380
|
||||
# [[forwarding]]
|
||||
# suffix = ["home.local", "home.arpa"] # multiple suffixes → same upstream
|
||||
# upstream = "10.0.0.1" # port 53 default
|
||||
#
|
||||
# [[forwarding]] # DoT upstream: tls://IP[:port]#hostname
|
||||
# suffix = ["google.com", "goog"] # hostname is the TLS SNI / cert name
|
||||
# upstream = "tls://9.9.9.9#dns.quad9.net" # port 853 default
|
||||
#
|
||||
# [[forwarding]] # DoH upstream: full https:// URL
|
||||
# suffix = "example.corp"
|
||||
# upstream = "https://dns.quad9.net/dns-query"
|
||||
|
||||
# [blocking]
|
||||
# enabled = true # set to false to disable ad blocking
|
||||
|
||||
239
scripts/generate-blog-index.sh
Executable file
239
scripts/generate-blog-index.sh
Executable file
@@ -0,0 +1,239 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Generate site/blog/index.html from blog/*.md frontmatter.
|
||||
# Reads title, description, date from YAML frontmatter in each post.
|
||||
# Sorts newest first (by date string — "April 2026" > "March 2026").
|
||||
|
||||
OUT="site/blog/index.html"
|
||||
|
||||
# Extract frontmatter fields from a markdown file
|
||||
extract() {
|
||||
local file="$1" field="$2"
|
||||
sed -n '/^---$/,/^---$/p' "$file" | grep "^${field}:" | sed "s/^${field}: *//"
|
||||
}
|
||||
|
||||
# Collect posts: "date|name|title|description" per line
|
||||
posts=""
|
||||
sources="blog/*.md"
|
||||
if [ "${BLOG_INCLUDE_DRAFTS:-}" = "1" ] && ls drafts/*.md >/dev/null 2>&1; then
|
||||
sources="blog/*.md drafts/*.md"
|
||||
fi
|
||||
for f in $sources; do
|
||||
name=$(basename "$f" .md)
|
||||
title=$(extract "$f" title)
|
||||
desc=$(extract "$f" description)
|
||||
date=$(extract "$f" date)
|
||||
posts+="${date}|${name}|${title}|${desc}"$'\n'
|
||||
done
|
||||
|
||||
# Sort by ISO date (YYYY-MM-DD), newest first
|
||||
posts=$(echo "$posts" | grep -v '^$' | sort -t'|' -k1 -r)
|
||||
|
||||
# Format ISO date (YYYY-MM-DD) to "Month YYYY"
|
||||
format_date() {
|
||||
local months=(January February March April May June July August September October November December)
|
||||
local y="${1%%-*}"
|
||||
local m="${1#*-}"; m="${m%%-*}"; m=$((10#$m))
|
||||
echo "${months[$((m-1))]} $y"
|
||||
}
|
||||
|
||||
# Generate post list items
|
||||
items=""
|
||||
while IFS='|' read -r date name title desc; do
|
||||
display_date=$(format_date "$date")
|
||||
items+=" <li>
|
||||
<a href=\"/blog/posts/${name}.html\">
|
||||
<div class=\"post-title\">${title}</div>
|
||||
<div class=\"post-desc\">${desc}</div>
|
||||
<div class=\"post-date\">${display_date}</div>
|
||||
</a>
|
||||
</li>
|
||||
"
|
||||
done <<< "$posts"
|
||||
|
||||
# Write the full index.html — style matches the existing hand-maintained version
|
||||
cat > "$OUT" << HTMLEOF
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Blog — Numa</title>
|
||||
<meta name="description" content="Technical writing about DNS, Rust, and building infrastructure from scratch.">
|
||||
<link rel="stylesheet" href="/fonts/fonts.css">
|
||||
<style>
|
||||
*, *::before, *::after { margin: 0; padding: 0; box-sizing: border-box; }
|
||||
|
||||
:root {
|
||||
--bg-deep: #f5f0e8;
|
||||
--bg-surface: #ece5da;
|
||||
--bg-card: #faf7f2;
|
||||
--amber: #c0623a;
|
||||
--amber-dim: #9e4e2d;
|
||||
--teal: #6b7c4e;
|
||||
--text-primary: #2c2418;
|
||||
--text-secondary: #6b5e4f;
|
||||
--text-dim: #a39888;
|
||||
--border: rgba(0, 0, 0, 0.08);
|
||||
--font-display: 'Instrument Serif', Georgia, serif;
|
||||
--font-body: 'DM Sans', system-ui, sans-serif;
|
||||
--font-mono: 'JetBrains Mono', monospace;
|
||||
}
|
||||
|
||||
body {
|
||||
background: var(--bg-deep);
|
||||
color: var(--text-primary);
|
||||
font-family: var(--font-body);
|
||||
font-weight: 400;
|
||||
line-height: 1.7;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
}
|
||||
|
||||
body::before {
|
||||
content: '';
|
||||
position: fixed;
|
||||
inset: 0;
|
||||
background-image: url("data:image/svg+xml,%3Csvg viewBox='0 0 256 256' xmlns='http://www.w3.org/2000/svg'%3E%3Cfilter id='n'%3E%3CfeTurbulence type='fractalNoise' baseFrequency='0.9' numOctaves='4' stitchTiles='stitch'/%3E%3C/filter%3E%3Crect width='100%25' height='100%25' filter='url(%23n)' opacity='0.025'/%3E%3C/svg%3E");
|
||||
pointer-events: none;
|
||||
z-index: 9999;
|
||||
}
|
||||
|
||||
.blog-nav {
|
||||
padding: 1.5rem 2rem;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 1.5rem;
|
||||
}
|
||||
|
||||
.blog-nav a {
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.75rem;
|
||||
letter-spacing: 0.08em;
|
||||
text-transform: uppercase;
|
||||
color: var(--text-dim);
|
||||
text-decoration: none;
|
||||
transition: color 0.2s;
|
||||
}
|
||||
.blog-nav a:hover { color: var(--amber); }
|
||||
|
||||
.blog-nav .wordmark {
|
||||
font-family: var(--font-display);
|
||||
font-size: 1.4rem;
|
||||
font-weight: 400;
|
||||
color: var(--text-primary);
|
||||
text-decoration: none;
|
||||
text-transform: none;
|
||||
letter-spacing: -0.02em;
|
||||
}
|
||||
.blog-nav .wordmark:hover { color: var(--amber); }
|
||||
|
||||
.blog-nav .sep {
|
||||
color: var(--text-dim);
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.75rem;
|
||||
}
|
||||
|
||||
.blog-index {
|
||||
max-width: 720px;
|
||||
margin: 0 auto;
|
||||
padding: 3rem 2rem 6rem;
|
||||
}
|
||||
|
||||
.blog-index h1 {
|
||||
font-family: var(--font-display);
|
||||
font-weight: 400;
|
||||
font-size: 2.5rem;
|
||||
margin-bottom: 3rem;
|
||||
}
|
||||
|
||||
.post-list {
|
||||
list-style: none;
|
||||
}
|
||||
|
||||
.post-list li {
|
||||
padding: 1.5rem 0;
|
||||
border-bottom: 1px solid var(--border);
|
||||
}
|
||||
|
||||
.post-list li:first-child {
|
||||
border-top: 1px solid var(--border);
|
||||
}
|
||||
|
||||
.post-list a {
|
||||
text-decoration: none;
|
||||
display: block;
|
||||
}
|
||||
|
||||
.post-list .post-title {
|
||||
font-family: var(--font-display);
|
||||
font-size: 1.4rem;
|
||||
font-weight: 600;
|
||||
color: var(--text-primary);
|
||||
line-height: 1.3;
|
||||
margin-bottom: 0.4rem;
|
||||
transition: color 0.2s;
|
||||
}
|
||||
|
||||
.post-list a:hover .post-title {
|
||||
color: var(--amber);
|
||||
}
|
||||
|
||||
.post-list .post-desc {
|
||||
font-size: 0.95rem;
|
||||
color: var(--text-secondary);
|
||||
line-height: 1.5;
|
||||
margin-bottom: 0.4rem;
|
||||
}
|
||||
|
||||
.post-list .post-date {
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.72rem;
|
||||
color: var(--text-dim);
|
||||
letter-spacing: 0.04em;
|
||||
}
|
||||
|
||||
.blog-footer {
|
||||
text-align: center;
|
||||
padding: 3rem 2rem;
|
||||
border-top: 1px solid var(--border);
|
||||
max-width: 720px;
|
||||
margin: 0 auto;
|
||||
}
|
||||
|
||||
.blog-footer a {
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.75rem;
|
||||
letter-spacing: 0.08em;
|
||||
text-transform: uppercase;
|
||||
color: var(--text-dim);
|
||||
text-decoration: none;
|
||||
margin: 0 1rem;
|
||||
}
|
||||
.blog-footer a:hover { color: var(--amber); }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<nav class="blog-nav">
|
||||
<a href="/" class="wordmark">Numa</a>
|
||||
<span class="sep">/</span>
|
||||
<a href="/blog/">Blog</a>
|
||||
</nav>
|
||||
|
||||
<main class="blog-index">
|
||||
<h1>Blog</h1>
|
||||
<ul class="post-list">
|
||||
${items} </ul>
|
||||
</main>
|
||||
|
||||
<footer class="blog-footer">
|
||||
<a href="https://github.com/razvandimescu/numa">GitHub</a>
|
||||
<a href="/">Home</a>
|
||||
</footer>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
HTMLEOF
|
||||
|
||||
echo " blog/index.html generated ($(echo "$posts" | wc -l | tr -d ' ') posts)"
|
||||
14
scripts/serve-site.sh
Executable file
14
scripts/serve-site.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
PORT="${1:-9000}"
|
||||
|
||||
if [[ "${1:-}" == "--drafts" ]] || [[ "${2:-}" == "--drafts" ]]; then
|
||||
PORT="${PORT//--drafts/9000}" # default port if --drafts was first arg
|
||||
make blog-drafts
|
||||
else
|
||||
make blog
|
||||
fi
|
||||
|
||||
echo "Serving site at http://localhost:$PORT"
|
||||
cd site && python3 -m http.server "$PORT"
|
||||
@@ -267,9 +267,105 @@ body::before {
|
||||
.blog-footer a:hover { color: var(--amber); }
|
||||
|
||||
/* --- Responsive --- */
|
||||
/* Hero metrics cards */
|
||||
.hero-metrics {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(3, 1fr);
|
||||
gap: 1rem;
|
||||
margin: 2rem 0;
|
||||
}
|
||||
.metric-card {
|
||||
background: var(--bg-card);
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 6px;
|
||||
padding: 1.25rem;
|
||||
text-align: center;
|
||||
}
|
||||
.metric-vs {
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.7rem;
|
||||
letter-spacing: 0.08em;
|
||||
text-transform: uppercase;
|
||||
color: var(--text-dim);
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
.metric-value {
|
||||
font-family: var(--font-display);
|
||||
font-size: 2.4rem;
|
||||
font-weight: 400;
|
||||
color: var(--amber);
|
||||
line-height: 1.1;
|
||||
}
|
||||
.metric-label {
|
||||
font-size: 0.82rem;
|
||||
color: var(--text-secondary);
|
||||
margin-top: 0.5rem;
|
||||
line-height: 1.3;
|
||||
}
|
||||
|
||||
/* Before/after progression */
|
||||
.before-after {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
gap: 1.5rem;
|
||||
margin: 2rem 0;
|
||||
padding: 1.5rem;
|
||||
background: var(--bg-card);
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 6px;
|
||||
}
|
||||
.ba-item { text-align: center; }
|
||||
.ba-label {
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.7rem;
|
||||
letter-spacing: 0.08em;
|
||||
text-transform: uppercase;
|
||||
color: var(--text-dim);
|
||||
margin-bottom: 0.3rem;
|
||||
}
|
||||
.ba-value {
|
||||
font-family: var(--font-display);
|
||||
font-size: 1.8rem;
|
||||
font-weight: 400;
|
||||
color: var(--text-secondary);
|
||||
}
|
||||
.ba-before {
|
||||
text-decoration: line-through;
|
||||
text-decoration-color: rgba(192, 98, 58, 0.4);
|
||||
color: var(--text-dim);
|
||||
}
|
||||
.ba-after { color: var(--amber); }
|
||||
.ba-arrow { font-size: 1.5rem; color: var(--text-dim); }
|
||||
.ba-ref {
|
||||
border-left: 1px solid var(--border);
|
||||
padding-left: 1.5rem;
|
||||
}
|
||||
|
||||
/* Spike highlight */
|
||||
.spike {
|
||||
background: rgba(192, 98, 58, 0.12);
|
||||
padding: 0.15em 0.5em;
|
||||
border-radius: 3px;
|
||||
font-weight: 600;
|
||||
color: var(--amber-dim);
|
||||
}
|
||||
|
||||
/* Section dividers */
|
||||
.article hr {
|
||||
border: none;
|
||||
height: 1px;
|
||||
background: var(--border);
|
||||
margin: 3rem auto;
|
||||
max-width: 120px;
|
||||
}
|
||||
|
||||
@media (max-width: 640px) {
|
||||
.article { padding: 2rem 1.25rem 4rem; }
|
||||
.article pre { padding: 1rem; margin-left: -0.5rem; margin-right: -0.5rem; border-radius: 0; border-left: none; border-right: none; }
|
||||
.hero-metrics { grid-template-columns: 1fr; }
|
||||
.before-after { flex-direction: column; gap: 0.75rem; }
|
||||
.ba-ref { border-left: none; border-top: 1px solid var(--border); padding-left: 0; padding-top: 0.75rem; }
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
@@ -168,10 +168,17 @@ body::before {
|
||||
<main class="blog-index">
|
||||
<h1>Blog</h1>
|
||||
<ul class="post-list">
|
||||
<li>
|
||||
<a href="/blog/posts/fixing-doh-tail-latency.html">
|
||||
<div class="post-title">Fixing DNS tail latency with a 5-line config and a 50-line function</div>
|
||||
<div class="post-desc">Periodic 40-140ms DoH spikes from hyper's dispatch channel. The fix was reqwest window tuning and request hedging — Dean & Barroso's "The Tail at Scale," applied to a DNS forwarder. Same ideas took cold recursive p99 from 2.3 seconds to 538ms.</div>
|
||||
<div class="post-date">April 2026</div>
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="/blog/posts/dot-from-scratch.html">
|
||||
<div class="post-title">DNS-over-TLS from Scratch in Rust</div>
|
||||
<div class="post-desc">Building RFC 7858 on top of rustls — length-prefix framing, ALPN cross-protocol defense, iPhone dogfooding, and two bugs that only the strict clients caught.</div>
|
||||
<div class="post-desc">Building RFC 7858 on top of rustls — length-prefix framing, ALPN cross-protocol defense, and two bugs that only the strict clients caught.</div>
|
||||
<div class="post-date">April 2026</div>
|
||||
</a>
|
||||
</li>
|
||||
@@ -185,7 +192,7 @@ body::before {
|
||||
<li>
|
||||
<a href="/blog/posts/dns-from-scratch.html">
|
||||
<div class="post-title">I Built a DNS Resolver from Scratch in Rust</div>
|
||||
<div class="post-desc">How DNS actually works at the wire level — label compression, TTL tricks, DoH implementation, and what I learned building a resolver with zero DNS libraries.</div>
|
||||
<div class="post-desc">How DNS actually works at the wire level — label compression, TTL tricks, DoH, and what surprised me building a resolver with zero DNS libraries.</div>
|
||||
<div class="post-date">March 2026</div>
|
||||
</a>
|
||||
</li>
|
||||
|
||||
45
src/api.rs
45
src/api.rs
@@ -1020,53 +1020,10 @@ mod tests {
|
||||
use super::*;
|
||||
use axum::body::Body;
|
||||
use http::Request;
|
||||
use std::sync::{Mutex, RwLock};
|
||||
use tower::ServiceExt;
|
||||
|
||||
async fn test_ctx() -> Arc<ServerCtx> {
|
||||
let socket = tokio::net::UdpSocket::bind("127.0.0.1:0").await.unwrap();
|
||||
Arc::new(ServerCtx {
|
||||
socket,
|
||||
zone_map: std::collections::HashMap::new(),
|
||||
cache: RwLock::new(crate::cache::DnsCache::new(100, 60, 86400)),
|
||||
refreshing: Mutex::new(std::collections::HashSet::new()),
|
||||
stats: Mutex::new(crate::stats::ServerStats::new()),
|
||||
overrides: RwLock::new(crate::override_store::OverrideStore::new()),
|
||||
blocklist: RwLock::new(crate::blocklist::BlocklistStore::new()),
|
||||
query_log: Mutex::new(crate::query_log::QueryLog::new(100)),
|
||||
services: Mutex::new(crate::service_store::ServiceStore::new()),
|
||||
lan_peers: Mutex::new(crate::lan::PeerStore::new(90)),
|
||||
forwarding_rules: Vec::new(),
|
||||
upstream_pool: Mutex::new(crate::forward::UpstreamPool::new(
|
||||
vec![crate::forward::Upstream::Udp(
|
||||
"127.0.0.1:53".parse().unwrap(),
|
||||
)],
|
||||
vec![],
|
||||
)),
|
||||
upstream_auto: false,
|
||||
upstream_port: 53,
|
||||
lan_ip: Mutex::new(std::net::Ipv4Addr::LOCALHOST),
|
||||
timeout: std::time::Duration::from_secs(3),
|
||||
hedge_delay: std::time::Duration::ZERO,
|
||||
proxy_tld: "numa".to_string(),
|
||||
proxy_tld_suffix: ".numa".to_string(),
|
||||
lan_enabled: false,
|
||||
config_path: "/tmp/test-numa.toml".to_string(),
|
||||
config_found: false,
|
||||
config_dir: std::path::PathBuf::from("/tmp"),
|
||||
data_dir: std::path::PathBuf::from("/tmp"),
|
||||
tls_config: None,
|
||||
upstream_mode: crate::config::UpstreamMode::Forward,
|
||||
root_hints: Vec::new(),
|
||||
srtt: RwLock::new(crate::srtt::SrttCache::new(true)),
|
||||
inflight: Mutex::new(std::collections::HashMap::new()),
|
||||
dnssec_enabled: false,
|
||||
dnssec_strict: false,
|
||||
health_meta: crate::health::HealthMeta::test_fixture(),
|
||||
ca_pem: None,
|
||||
mobile_enabled: false,
|
||||
mobile_port: 8765,
|
||||
})
|
||||
Arc::new(crate::testutil::test_ctx().await)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -46,12 +46,12 @@ pub struct ForwardingRuleConfig {
|
||||
|
||||
impl ForwardingRuleConfig {
|
||||
fn to_runtime_rules(&self) -> Result<Vec<crate::system_dns::ForwardingRule>> {
|
||||
let addr = crate::forward::parse_upstream_addr(&self.upstream, 53)
|
||||
let upstream = crate::forward::parse_upstream(&self.upstream, 53)
|
||||
.map_err(|e| format!("forwarding rule for upstream '{}': {}", self.upstream, e))?;
|
||||
Ok(self
|
||||
.suffix
|
||||
.iter()
|
||||
.map(|s| crate::system_dns::ForwardingRule::new(s.clone(), addr))
|
||||
.map(|s| crate::system_dns::ForwardingRule::new(s.clone(), upstream.clone()))
|
||||
.collect())
|
||||
}
|
||||
}
|
||||
@@ -710,6 +710,10 @@ mod tests {
|
||||
};
|
||||
let runtime = rule.to_runtime_rules().unwrap();
|
||||
assert_eq!(runtime.len(), 1);
|
||||
assert!(matches!(
|
||||
runtime[0].upstream,
|
||||
crate::forward::Upstream::Udp(_)
|
||||
));
|
||||
assert_eq!(runtime[0].upstream.to_string(), "100.90.1.63:5361");
|
||||
assert_eq!(runtime[0].suffix, "home.local");
|
||||
}
|
||||
@@ -733,6 +737,38 @@ mod tests {
|
||||
assert!(rule.to_runtime_rules().is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn forwarding_upstream_accepts_dot_scheme() {
|
||||
let rule = ForwardingRuleConfig {
|
||||
suffix: vec!["google.com".to_string()],
|
||||
upstream: "tls://9.9.9.9#dns.quad9.net".to_string(),
|
||||
};
|
||||
let runtime = rule
|
||||
.to_runtime_rules()
|
||||
.expect("tls:// upstream should parse");
|
||||
assert_eq!(runtime.len(), 1);
|
||||
assert_eq!(
|
||||
runtime[0].upstream.to_string(),
|
||||
"tls://9.9.9.9:853#dns.quad9.net"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn forwarding_upstream_accepts_doh_scheme() {
|
||||
let rule = ForwardingRuleConfig {
|
||||
suffix: vec!["goog".to_string()],
|
||||
upstream: "https://dns.quad9.net/dns-query".to_string(),
|
||||
};
|
||||
let runtime = rule
|
||||
.to_runtime_rules()
|
||||
.expect("https:// upstream should parse");
|
||||
assert_eq!(runtime.len(), 1);
|
||||
assert_eq!(
|
||||
runtime[0].upstream.to_string(),
|
||||
"https://dns.quad9.net/dns-query"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn forwarding_config_rules_take_precedence_over_discovered() {
|
||||
let config_rules = vec![ForwardingRuleConfig {
|
||||
@@ -741,7 +777,7 @@ mod tests {
|
||||
}];
|
||||
let discovered = vec![crate::system_dns::ForwardingRule::new(
|
||||
"home.local".to_string(),
|
||||
"192.168.1.1:53".parse().unwrap(),
|
||||
crate::forward::Upstream::Udp("192.168.1.1:53".parse().unwrap()),
|
||||
)];
|
||||
let merged = merge_forwarding_rules(&config_rules, discovered).unwrap();
|
||||
let picked = crate::system_dns::match_forwarding_rule("host.home.local", &merged)
|
||||
@@ -757,7 +793,7 @@ mod tests {
|
||||
}];
|
||||
let discovered = vec![crate::system_dns::ForwardingRule::new(
|
||||
"corp.example".to_string(),
|
||||
"192.168.1.1:53".parse().unwrap(),
|
||||
crate::forward::Upstream::Udp("192.168.1.1:53".parse().unwrap()),
|
||||
)];
|
||||
let merged = merge_forwarding_rules(&config_rules, discovered).unwrap();
|
||||
assert_eq!(merged.len(), 2);
|
||||
|
||||
235
src/ctx.rs
235
src/ctx.rs
@@ -88,7 +88,7 @@ pub async fn resolve_query(
|
||||
src_addr: SocketAddr,
|
||||
ctx: &Arc<ServerCtx>,
|
||||
transport: Transport,
|
||||
) -> crate::Result<BytePacketBuffer> {
|
||||
) -> crate::Result<(BytePacketBuffer, QueryPath)> {
|
||||
let start = Instant::now();
|
||||
|
||||
let (qname, qtype) = match query.questions.first() {
|
||||
@@ -96,7 +96,8 @@ pub async fn resolve_query(
|
||||
None => return Err("empty question section".into()),
|
||||
};
|
||||
|
||||
// Pipeline: overrides -> .tld interception -> blocklist -> local zones -> cache -> upstream
|
||||
// Pipeline: overrides -> .localhost -> local zones -> special-use (unless forwarded)
|
||||
// -> .tld proxy -> blocklist -> cache -> forwarding -> recursive/upstream
|
||||
// Each lock is scoped to avoid holding MutexGuard across await points.
|
||||
let (response, path, dnssec) = {
|
||||
let override_record = ctx.overrides.read().unwrap().lookup(&qname);
|
||||
@@ -119,8 +120,10 @@ pub async fn resolve_query(
|
||||
let mut resp = DnsPacket::response_from(&query, ResultCode::NOERROR);
|
||||
resp.answers = records.clone();
|
||||
(resp, QueryPath::Local, DnssecStatus::Indeterminate)
|
||||
} else if is_special_use_domain(&qname) {
|
||||
// RFC 6761/8880: private PTR, DDR, NAT64 — answer locally
|
||||
} else if is_special_use_domain(&qname)
|
||||
&& crate::system_dns::match_forwarding_rule(&qname, &ctx.forwarding_rules).is_none()
|
||||
{
|
||||
// RFC 6761/8880: answer locally unless a forwarding rule covers this zone.
|
||||
let resp = special_use_response(&query, &qname, qtype);
|
||||
(resp, QueryPath::Local, DnssecStatus::Indeterminate)
|
||||
} else if !ctx.proxy_tld_suffix.is_empty()
|
||||
@@ -187,13 +190,12 @@ pub async fn resolve_query(
|
||||
resp.header.authed_data = true;
|
||||
}
|
||||
(resp, QueryPath::Cached, cached_dnssec)
|
||||
} else if let Some(fwd_addr) =
|
||||
} else if let Some(upstream) =
|
||||
crate::system_dns::match_forwarding_rule(&qname, &ctx.forwarding_rules)
|
||||
{
|
||||
// Conditional forwarding takes priority over recursive mode
|
||||
// (e.g. Tailscale .ts.net, VPC private zones)
|
||||
let upstream = Upstream::Udp(fwd_addr);
|
||||
match forward_and_cache(raw_wire, &upstream, ctx, &qname, qtype).await {
|
||||
match forward_and_cache(raw_wire, upstream, ctx, &qname, qtype).await {
|
||||
Ok(resp) => (resp, QueryPath::Forwarded, DnssecStatus::Indeterminate),
|
||||
Err(e) => {
|
||||
error!(
|
||||
@@ -373,7 +375,7 @@ pub async fn resolve_query(
|
||||
dnssec,
|
||||
});
|
||||
|
||||
Ok(resp_buffer)
|
||||
Ok((resp_buffer, path))
|
||||
}
|
||||
|
||||
fn cache_and_parse(
|
||||
@@ -457,7 +459,7 @@ pub async fn handle_query(
|
||||
}
|
||||
};
|
||||
match resolve_query(query, &buffer.buf[..raw_len], src_addr, ctx, transport).await {
|
||||
Ok(resp_buffer) => {
|
||||
Ok((resp_buffer, _)) => {
|
||||
ctx.socket.send_to(resp_buffer.filled(), src_addr).await?;
|
||||
}
|
||||
Err(e) => {
|
||||
@@ -1036,4 +1038,219 @@ mod tests {
|
||||
"error message must be preserved for logging"
|
||||
);
|
||||
}
|
||||
|
||||
// ---- Full-pipeline resolve_query tests ----
|
||||
|
||||
/// Send a query through the full resolve_query pipeline and return
|
||||
/// the parsed response + query path.
|
||||
async fn resolve_in_test(
|
||||
ctx: &Arc<ServerCtx>,
|
||||
domain: &str,
|
||||
qtype: QueryType,
|
||||
) -> (DnsPacket, QueryPath) {
|
||||
let query = DnsPacket::query(0xBEEF, domain, qtype);
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
query.write(&mut buf).unwrap();
|
||||
let raw = &buf.buf[..buf.pos];
|
||||
let src: SocketAddr = "127.0.0.1:1234".parse().unwrap();
|
||||
|
||||
let (resp_buf, path) = resolve_query(query, raw, src, ctx, Transport::Udp)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut resp_parse_buf = BytePacketBuffer::from_bytes(resp_buf.filled());
|
||||
let resp = DnsPacket::from_buffer(&mut resp_parse_buf).unwrap();
|
||||
(resp, path)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn special_use_private_ptr_returns_nxdomain() {
|
||||
let ctx = Arc::new(crate::testutil::test_ctx().await);
|
||||
let (resp, path) =
|
||||
resolve_in_test(&ctx, "153.188.168.192.in-addr.arpa", QueryType::PTR).await;
|
||||
assert_eq!(path, QueryPath::Local);
|
||||
assert_eq!(resp.header.rescode, ResultCode::NXDOMAIN);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn forwarding_rule_overrides_special_use_domain() {
|
||||
let mut resp = DnsPacket::new();
|
||||
resp.header.response = true;
|
||||
resp.header.rescode = ResultCode::NOERROR;
|
||||
let upstream_addr = crate::testutil::mock_upstream(resp).await;
|
||||
|
||||
let mut ctx = crate::testutil::test_ctx().await;
|
||||
ctx.forwarding_rules = vec![ForwardingRule::new(
|
||||
"168.192.in-addr.arpa".to_string(),
|
||||
Upstream::Udp(upstream_addr),
|
||||
)];
|
||||
let ctx = Arc::new(ctx);
|
||||
|
||||
let (resp, path) =
|
||||
resolve_in_test(&ctx, "153.188.168.192.in-addr.arpa", QueryType::PTR).await;
|
||||
|
||||
assert_eq!(
|
||||
path,
|
||||
QueryPath::Forwarded,
|
||||
"forwarding rule must take precedence over special-use NXDOMAIN"
|
||||
);
|
||||
assert_eq!(resp.header.rescode, ResultCode::NOERROR);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn pipeline_override_takes_precedence() {
|
||||
let ctx = crate::testutil::test_ctx().await;
|
||||
ctx.overrides
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert("override.test", "1.2.3.4", 60, None)
|
||||
.unwrap();
|
||||
let ctx = Arc::new(ctx);
|
||||
|
||||
let (resp, path) = resolve_in_test(&ctx, "override.test", QueryType::A).await;
|
||||
assert_eq!(path, QueryPath::Overridden);
|
||||
assert_eq!(resp.header.rescode, ResultCode::NOERROR);
|
||||
assert_eq!(resp.answers.len(), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn pipeline_localhost_resolves_to_loopback() {
|
||||
let ctx = Arc::new(crate::testutil::test_ctx().await);
|
||||
|
||||
let (resp, path) = resolve_in_test(&ctx, "localhost", QueryType::A).await;
|
||||
assert_eq!(path, QueryPath::Local);
|
||||
assert_eq!(resp.header.rescode, ResultCode::NOERROR);
|
||||
match &resp.answers[0] {
|
||||
DnsRecord::A { addr, .. } => assert_eq!(*addr, Ipv4Addr::LOCALHOST),
|
||||
other => panic!("expected A record, got {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn pipeline_localhost_subdomain_resolves_to_loopback() {
|
||||
let ctx = Arc::new(crate::testutil::test_ctx().await);
|
||||
|
||||
let (resp, path) = resolve_in_test(&ctx, "app.localhost", QueryType::A).await;
|
||||
assert_eq!(path, QueryPath::Local);
|
||||
assert_eq!(resp.header.rescode, ResultCode::NOERROR);
|
||||
match &resp.answers[0] {
|
||||
DnsRecord::A { addr, .. } => assert_eq!(*addr, Ipv4Addr::LOCALHOST),
|
||||
other => panic!("expected A record, got {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn pipeline_local_zone_returns_configured_record() {
|
||||
let mut ctx = crate::testutil::test_ctx().await;
|
||||
let mut inner = HashMap::new();
|
||||
inner.insert(
|
||||
QueryType::A,
|
||||
vec![DnsRecord::A {
|
||||
domain: "myapp.test".to_string(),
|
||||
addr: Ipv4Addr::new(10, 0, 0, 42),
|
||||
ttl: 300,
|
||||
}],
|
||||
);
|
||||
ctx.zone_map.insert("myapp.test".to_string(), inner);
|
||||
let ctx = Arc::new(ctx);
|
||||
|
||||
let (resp, path) = resolve_in_test(&ctx, "myapp.test", QueryType::A).await;
|
||||
assert_eq!(path, QueryPath::Local);
|
||||
assert_eq!(resp.header.rescode, ResultCode::NOERROR);
|
||||
match &resp.answers[0] {
|
||||
DnsRecord::A { addr, .. } => assert_eq!(*addr, Ipv4Addr::new(10, 0, 0, 42)),
|
||||
other => panic!("expected A record, got {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn pipeline_tld_proxy_resolves_service() {
|
||||
let ctx = crate::testutil::test_ctx().await;
|
||||
ctx.services.lock().unwrap().insert("grafana", 3000);
|
||||
let ctx = Arc::new(ctx);
|
||||
|
||||
let (resp, path) = resolve_in_test(&ctx, "grafana.numa", QueryType::A).await;
|
||||
assert_eq!(path, QueryPath::Local);
|
||||
assert_eq!(resp.header.rescode, ResultCode::NOERROR);
|
||||
match &resp.answers[0] {
|
||||
DnsRecord::A { addr, .. } => assert_eq!(*addr, Ipv4Addr::LOCALHOST),
|
||||
other => panic!("expected A record, got {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn pipeline_blocklist_sinkhole() {
|
||||
let ctx = crate::testutil::test_ctx().await;
|
||||
let mut domains = std::collections::HashSet::new();
|
||||
domains.insert("ads.tracker.test".to_string());
|
||||
ctx.blocklist.write().unwrap().swap_domains(domains, vec![]);
|
||||
let ctx = Arc::new(ctx);
|
||||
|
||||
let (resp, path) = resolve_in_test(&ctx, "ads.tracker.test", QueryType::A).await;
|
||||
assert_eq!(path, QueryPath::Blocked);
|
||||
assert_eq!(resp.header.rescode, ResultCode::NOERROR);
|
||||
match &resp.answers[0] {
|
||||
DnsRecord::A { addr, .. } => assert_eq!(*addr, Ipv4Addr::UNSPECIFIED),
|
||||
other => panic!("expected sinkhole A record, got {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn pipeline_cache_hit() {
|
||||
let ctx = Arc::new(crate::testutil::test_ctx().await);
|
||||
|
||||
// Pre-populate cache with a response
|
||||
let mut pkt = DnsPacket::new();
|
||||
pkt.header.response = true;
|
||||
pkt.header.rescode = ResultCode::NOERROR;
|
||||
pkt.questions.push(crate::question::DnsQuestion {
|
||||
name: "cached.test".to_string(),
|
||||
qtype: QueryType::A,
|
||||
});
|
||||
pkt.answers.push(DnsRecord::A {
|
||||
domain: "cached.test".to_string(),
|
||||
addr: Ipv4Addr::new(5, 5, 5, 5),
|
||||
ttl: 3600,
|
||||
});
|
||||
ctx.cache
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert("cached.test", QueryType::A, &pkt);
|
||||
|
||||
let (resp, path) = resolve_in_test(&ctx, "cached.test", QueryType::A).await;
|
||||
assert_eq!(path, QueryPath::Cached);
|
||||
assert_eq!(resp.header.rescode, ResultCode::NOERROR);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn pipeline_forwarding_returns_upstream_answer() {
|
||||
let mut upstream_resp = DnsPacket::new();
|
||||
upstream_resp.header.response = true;
|
||||
upstream_resp.header.rescode = ResultCode::NOERROR;
|
||||
upstream_resp.answers.push(DnsRecord::A {
|
||||
domain: "internal.corp".to_string(),
|
||||
addr: Ipv4Addr::new(10, 1, 2, 3),
|
||||
ttl: 600,
|
||||
});
|
||||
let upstream_addr = crate::testutil::mock_upstream(upstream_resp).await;
|
||||
|
||||
let mut ctx = crate::testutil::test_ctx().await;
|
||||
ctx.forwarding_rules = vec![ForwardingRule::new(
|
||||
"corp".to_string(),
|
||||
Upstream::Udp(upstream_addr),
|
||||
)];
|
||||
let ctx = Arc::new(ctx);
|
||||
|
||||
let (resp, path) = resolve_in_test(&ctx, "internal.corp", QueryType::A).await;
|
||||
assert_eq!(path, QueryPath::Forwarded);
|
||||
assert_eq!(resp.header.rescode, ResultCode::NOERROR);
|
||||
assert_eq!(resp.answers.len(), 1);
|
||||
match &resp.answers[0] {
|
||||
DnsRecord::A { domain, addr, .. } => {
|
||||
assert_eq!(domain, "internal.corp");
|
||||
assert_eq!(*addr, Ipv4Addr::new(10, 1, 2, 3));
|
||||
}
|
||||
other => panic!("expected A record, got {:?}", other),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,7 +113,7 @@ async fn resolve_doh(
|
||||
let questions = query.questions.clone();
|
||||
|
||||
match resolve_query(query, dns_bytes, src, ctx, Transport::Doh).await {
|
||||
Ok(resp_buffer) => {
|
||||
Ok((resp_buffer, _)) => {
|
||||
let min_ttl = extract_min_ttl(resp_buffer.filled());
|
||||
dns_response(resp_buffer.filled(), min_ttl)
|
||||
}
|
||||
|
||||
56
src/dot.rs
56
src/dot.rs
@@ -211,7 +211,7 @@ async fn handle_dot_connection<S>(
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(resp_buffer) => {
|
||||
Ok((resp_buffer, _)) => {
|
||||
if write_framed(&mut stream, resp_buffer.filled())
|
||||
.await
|
||||
.is_err()
|
||||
@@ -279,7 +279,7 @@ where
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Mutex, RwLock};
|
||||
use std::sync::Mutex;
|
||||
|
||||
use rcgen::{CertificateParams, DnType, KeyPair};
|
||||
use rustls::pki_types::{CertificateDer, PrivateKeyDer, PrivatePkcs8KeyDer, ServerName};
|
||||
@@ -344,13 +344,10 @@ mod tests {
|
||||
async fn spawn_dot_server() -> (SocketAddr, CertificateDer<'static>) {
|
||||
let (server_tls, cert_der) = test_tls_configs();
|
||||
|
||||
let socket = tokio::net::UdpSocket::bind("127.0.0.1:0").await.unwrap();
|
||||
// Bind an unresponsive upstream and leak it so it lives for the test duration.
|
||||
let blackhole = Box::leak(Box::new(std::net::UdpSocket::bind("127.0.0.1:0").unwrap()));
|
||||
let upstream_addr = blackhole.local_addr().unwrap();
|
||||
let ctx = Arc::new(ServerCtx {
|
||||
socket,
|
||||
zone_map: {
|
||||
let upstream_addr = crate::testutil::blackhole_upstream();
|
||||
|
||||
let mut ctx = crate::testutil::test_ctx().await;
|
||||
ctx.zone_map = {
|
||||
let mut m = HashMap::new();
|
||||
let mut inner = HashMap::new();
|
||||
inner.insert(
|
||||
@@ -363,44 +360,13 @@ mod tests {
|
||||
);
|
||||
m.insert("dot-test.example".to_string(), inner);
|
||||
m
|
||||
},
|
||||
cache: RwLock::new(crate::cache::DnsCache::new(100, 60, 86400)),
|
||||
refreshing: Mutex::new(std::collections::HashSet::new()),
|
||||
stats: Mutex::new(crate::stats::ServerStats::new()),
|
||||
overrides: RwLock::new(crate::override_store::OverrideStore::new()),
|
||||
blocklist: RwLock::new(crate::blocklist::BlocklistStore::new()),
|
||||
query_log: Mutex::new(crate::query_log::QueryLog::new(100)),
|
||||
services: Mutex::new(crate::service_store::ServiceStore::new()),
|
||||
lan_peers: Mutex::new(crate::lan::PeerStore::new(90)),
|
||||
forwarding_rules: Vec::new(),
|
||||
upstream_pool: Mutex::new(crate::forward::UpstreamPool::new(
|
||||
};
|
||||
ctx.upstream_pool = Mutex::new(crate::forward::UpstreamPool::new(
|
||||
vec![crate::forward::Upstream::Udp(upstream_addr)],
|
||||
vec![],
|
||||
)),
|
||||
upstream_auto: false,
|
||||
upstream_port: 53,
|
||||
lan_ip: Mutex::new(std::net::Ipv4Addr::LOCALHOST),
|
||||
timeout: Duration::from_millis(200),
|
||||
hedge_delay: Duration::ZERO,
|
||||
proxy_tld: "numa".to_string(),
|
||||
proxy_tld_suffix: ".numa".to_string(),
|
||||
lan_enabled: false,
|
||||
config_path: String::new(),
|
||||
config_found: false,
|
||||
config_dir: std::path::PathBuf::from("/tmp"),
|
||||
data_dir: std::path::PathBuf::from("/tmp"),
|
||||
tls_config: Some(arc_swap::ArcSwap::from(server_tls)),
|
||||
upstream_mode: crate::config::UpstreamMode::Forward,
|
||||
root_hints: Vec::new(),
|
||||
srtt: RwLock::new(crate::srtt::SrttCache::new(true)),
|
||||
inflight: Mutex::new(HashMap::new()),
|
||||
dnssec_enabled: false,
|
||||
dnssec_strict: false,
|
||||
health_meta: crate::health::HealthMeta::test_fixture(),
|
||||
ca_pem: None,
|
||||
mobile_enabled: false,
|
||||
mobile_port: 8765,
|
||||
});
|
||||
));
|
||||
ctx.tls_config = Some(arc_swap::ArcSwap::from(server_tls));
|
||||
let ctx = Arc::new(ctx);
|
||||
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let addr = listener.local_addr().unwrap();
|
||||
|
||||
@@ -36,6 +36,12 @@ impl PartialEq for Upstream {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Upstream {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::Display::fmt(self, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Upstream {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
@@ -49,7 +55,10 @@ impl fmt::Display for Upstream {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_upstream_addr(s: &str, default_port: u16) -> std::result::Result<SocketAddr, String> {
|
||||
pub(crate) fn parse_upstream_addr(
|
||||
s: &str,
|
||||
default_port: u16,
|
||||
) -> std::result::Result<SocketAddr, String> {
|
||||
// Try full socket addr first: "1.2.3.4:5353" or "[::1]:5353"
|
||||
if let Ok(addr) = s.parse::<SocketAddr>() {
|
||||
return Ok(addr);
|
||||
|
||||
@@ -28,6 +28,9 @@ pub mod system_dns;
|
||||
pub mod tls;
|
||||
pub mod wire;
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod testutil;
|
||||
|
||||
pub type Error = Box<dyn std::error::Error + Send + Sync>;
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
|
||||
@@ -2,6 +2,8 @@ use std::net::SocketAddr;
|
||||
|
||||
use log::info;
|
||||
|
||||
use crate::forward::Upstream;
|
||||
|
||||
fn print_recursive_hint() {
|
||||
let is_recursive = crate::config::load_config("numa.toml")
|
||||
.map(|c| c.config.upstream.mode == crate::config::UpstreamMode::Recursive)
|
||||
@@ -22,11 +24,11 @@ fn is_loopback_or_stub(addr: &str) -> bool {
|
||||
pub struct ForwardingRule {
|
||||
pub suffix: String,
|
||||
dot_suffix: String, // pre-computed ".suffix" for zero-alloc matching
|
||||
pub upstream: SocketAddr,
|
||||
pub upstream: Upstream,
|
||||
}
|
||||
|
||||
impl ForwardingRule {
|
||||
pub fn new(suffix: String, upstream: SocketAddr) -> Self {
|
||||
pub fn new(suffix: String, upstream: Upstream) -> Self {
|
||||
let dot_suffix = format!(".{}", suffix);
|
||||
Self {
|
||||
suffix,
|
||||
@@ -233,7 +235,7 @@ fn discover_macos() -> SystemDnsInfo {
|
||||
#[cfg(any(target_os = "macos", target_os = "linux"))]
|
||||
fn make_rule(domain: &str, nameserver: &str) -> Option<ForwardingRule> {
|
||||
let addr = crate::forward::parse_upstream_addr(nameserver, 53).ok()?;
|
||||
Some(ForwardingRule::new(domain.to_string(), addr))
|
||||
Some(ForwardingRule::new(domain.to_string(), Upstream::Udp(addr)))
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
@@ -822,10 +824,13 @@ fn uninstall_windows() -> Result<(), String> {
|
||||
/// Find the upstream for a domain by checking forwarding rules.
|
||||
/// Returns None if no rule matches (use default upstream).
|
||||
/// Zero-allocation on the hot path — dot_suffix is pre-computed.
|
||||
pub fn match_forwarding_rule(domain: &str, rules: &[ForwardingRule]) -> Option<SocketAddr> {
|
||||
pub fn match_forwarding_rule<'a>(
|
||||
domain: &str,
|
||||
rules: &'a [ForwardingRule],
|
||||
) -> Option<&'a Upstream> {
|
||||
for rule in rules {
|
||||
if domain == rule.suffix || domain.ends_with(&rule.dot_suffix) {
|
||||
return Some(rule.upstream);
|
||||
return Some(&rule.upstream);
|
||||
}
|
||||
}
|
||||
None
|
||||
|
||||
95
src/testutil.rs
Normal file
95
src/testutil.rs
Normal file
@@ -0,0 +1,95 @@
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::net::{Ipv4Addr, SocketAddr};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::{Mutex, RwLock};
|
||||
use std::time::Duration;
|
||||
|
||||
use tokio::net::UdpSocket;
|
||||
|
||||
use crate::blocklist::BlocklistStore;
|
||||
use crate::buffer::BytePacketBuffer;
|
||||
use crate::cache::DnsCache;
|
||||
use crate::config::UpstreamMode;
|
||||
use crate::ctx::ServerCtx;
|
||||
use crate::forward::{Upstream, UpstreamPool};
|
||||
use crate::health::HealthMeta;
|
||||
use crate::lan::PeerStore;
|
||||
use crate::override_store::OverrideStore;
|
||||
use crate::packet::DnsPacket;
|
||||
use crate::query_log::QueryLog;
|
||||
use crate::service_store::ServiceStore;
|
||||
use crate::srtt::SrttCache;
|
||||
use crate::stats::ServerStats;
|
||||
/// Minimal `ServerCtx` for tests. Override fields after construction
|
||||
/// (all fields are `pub`), then wrap in `Arc`.
|
||||
pub async fn test_ctx() -> ServerCtx {
|
||||
let socket = UdpSocket::bind("127.0.0.1:0").await.unwrap();
|
||||
ServerCtx {
|
||||
socket,
|
||||
zone_map: HashMap::new(),
|
||||
cache: RwLock::new(DnsCache::new(100, 60, 86400)),
|
||||
refreshing: Mutex::new(HashSet::new()),
|
||||
stats: Mutex::new(ServerStats::new()),
|
||||
overrides: RwLock::new(OverrideStore::new()),
|
||||
blocklist: RwLock::new(BlocklistStore::new()),
|
||||
query_log: Mutex::new(QueryLog::new(100)),
|
||||
services: Mutex::new(ServiceStore::new()),
|
||||
lan_peers: Mutex::new(PeerStore::new(90)),
|
||||
forwarding_rules: Vec::new(),
|
||||
upstream_pool: Mutex::new(UpstreamPool::new(
|
||||
vec![Upstream::Udp("127.0.0.1:53".parse().unwrap())],
|
||||
vec![],
|
||||
)),
|
||||
upstream_auto: false,
|
||||
upstream_port: 53,
|
||||
lan_ip: Mutex::new(Ipv4Addr::LOCALHOST),
|
||||
timeout: Duration::from_millis(200),
|
||||
hedge_delay: Duration::ZERO,
|
||||
proxy_tld: "numa".to_string(),
|
||||
proxy_tld_suffix: ".numa".to_string(),
|
||||
lan_enabled: false,
|
||||
config_path: "/tmp/test-numa.toml".to_string(),
|
||||
config_found: false,
|
||||
config_dir: PathBuf::from("/tmp"),
|
||||
data_dir: PathBuf::from("/tmp"),
|
||||
tls_config: None,
|
||||
upstream_mode: UpstreamMode::Forward,
|
||||
root_hints: Vec::new(),
|
||||
srtt: RwLock::new(SrttCache::new(true)),
|
||||
inflight: Mutex::new(HashMap::new()),
|
||||
dnssec_enabled: false,
|
||||
dnssec_strict: false,
|
||||
health_meta: HealthMeta::test_fixture(),
|
||||
ca_pem: None,
|
||||
mobile_enabled: false,
|
||||
mobile_port: 8765,
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawn a UDP socket that replies to the first DNS query with the given
|
||||
/// response packet (patching the query ID to match). Returns the socket address.
|
||||
pub async fn mock_upstream(response: DnsPacket) -> SocketAddr {
|
||||
let sock = UdpSocket::bind("127.0.0.1:0").await.unwrap();
|
||||
let addr = sock.local_addr().unwrap();
|
||||
tokio::spawn(async move {
|
||||
let mut buf = [0u8; 512];
|
||||
let (_, src) = sock.recv_from(&mut buf).await.unwrap();
|
||||
let query_id = u16::from_be_bytes([buf[0], buf[1]]);
|
||||
let mut resp = response;
|
||||
resp.header.id = query_id;
|
||||
let mut out = BytePacketBuffer::new();
|
||||
resp.write(&mut out).unwrap();
|
||||
sock.send_to(out.filled(), src).await.unwrap();
|
||||
});
|
||||
addr
|
||||
}
|
||||
|
||||
/// UDP socket that accepts connections but never replies.
|
||||
/// Useful as an upstream that triggers timeouts.
|
||||
pub fn blackhole_upstream() -> SocketAddr {
|
||||
let sock = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let addr = sock.local_addr().unwrap();
|
||||
// Leak so it stays bound for the duration of the test process.
|
||||
Box::leak(Box::new(sock));
|
||||
addr
|
||||
}
|
||||
Reference in New Issue
Block a user