1 Commits

Author SHA1 Message Date
Eduard Ghenea
6fc59a7bd5 fix: replace truecolor ANSI codes with 256-color for consistent terminal display
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-17 12:05:22 +03:00
30 changed files with 256 additions and 3536 deletions

View File

@@ -87,26 +87,12 @@ jobs:
sleep 2
curl -sf http://127.0.0.1:5380/health
dig @127.0.0.1 example.com +short +timeout=5 | grep -q '.'
user=$(ps -o user= -p "$(systemctl show -p MainPID --value numa)" | tr -d ' ')
echo "numa running as: $user"
test "$user" != "root"
sudo ./target/release/numa install
sleep 2
curl -sf http://127.0.0.1:5380/health
sudo ./target/release/numa uninstall
sleep 1
! curl -sf http://127.0.0.1:5380/health 2>/dev/null
- name: diagnostics on failure
if: failure()
run: |
echo "=== systemctl status numa ==="
sudo systemctl status numa --no-pager -l || true
echo "=== journalctl -u numa (last 200) ==="
sudo journalctl -u numa --no-pager -n 200 || true
echo "=== ss -tulnp on 53/80/443/853/5380 ==="
sudo ss -tulnp 2>/dev/null | grep -E ':(53|80|443|853|5380)\b' || true
echo "=== systemctl is-active systemd-resolved ==="
systemctl is-active systemd-resolved || true
- name: cleanup
if: always()
run: |

1
.gitignore vendored
View File

@@ -6,3 +6,4 @@ site/blog/posts/
ios/
drafts/
site/blog/index.html
.DS_Store

376
Cargo.lock generated
View File

@@ -8,41 +8,6 @@ version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa"
[[package]]
name = "aead"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0"
dependencies = [
"crypto-common",
"generic-array",
]
[[package]]
name = "aes"
version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0"
dependencies = [
"cfg-if",
"cipher",
"cpufeatures",
]
[[package]]
name = "aes-gcm"
version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1"
dependencies = [
"aead",
"aes",
"cipher",
"ctr",
"ghash",
"subtle",
]
[[package]]
name = "aho-corasick"
version = "1.1.4"
@@ -144,7 +109,7 @@ dependencies = [
"nom",
"num-traits",
"rusticata-macros",
"thiserror 2.0.18",
"thiserror",
"time",
]
@@ -292,15 +257,6 @@ version = "2.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af"
[[package]]
name = "block-buffer"
version = "0.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71"
dependencies = [
"generic-array",
]
[[package]]
name = "bumpalo"
version = "3.20.2"
@@ -343,30 +299,6 @@ version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"
[[package]]
name = "chacha20"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818"
dependencies = [
"cfg-if",
"cipher",
"cpufeatures",
]
[[package]]
name = "chacha20poly1305"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35"
dependencies = [
"aead",
"chacha20",
"cipher",
"poly1305",
"zeroize",
]
[[package]]
name = "ciborium"
version = "0.2.2"
@@ -394,17 +326,6 @@ dependencies = [
"half",
]
[[package]]
name = "cipher"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad"
dependencies = [
"crypto-common",
"inout",
"zeroize",
]
[[package]]
name = "clap"
version = "4.6.0"
@@ -462,15 +383,6 @@ version = "0.4.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75984efb6ed102a0d42db99afb6c1948f0380d1d91808d5529916e6c08b49d8d"
[[package]]
name = "cpufeatures"
version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280"
dependencies = [
"libc",
]
[[package]]
name = "crc32fast"
version = "1.5.0"
@@ -561,51 +473,6 @@ version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5"
[[package]]
name = "crypto-common"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a"
dependencies = [
"generic-array",
"rand_core 0.6.4",
"typenum",
]
[[package]]
name = "ctr"
version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835"
dependencies = [
"cipher",
]
[[package]]
name = "curve25519-dalek"
version = "4.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be"
dependencies = [
"cfg-if",
"cpufeatures",
"curve25519-dalek-derive",
"fiat-crypto",
"rustc_version",
"subtle",
]
[[package]]
name = "curve25519-dalek-derive"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "data-encoding"
version = "2.10.0"
@@ -635,17 +502,6 @@ dependencies = [
"powerfmt",
]
[[package]]
name = "digest"
version = "0.10.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
dependencies = [
"block-buffer",
"crypto-common",
"subtle",
]
[[package]]
name = "displaydoc"
version = "0.2.5"
@@ -720,12 +576,6 @@ dependencies = [
"windows-sys 0.61.2",
]
[[package]]
name = "fiat-crypto"
version = "0.2.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d"
[[package]]
name = "find-msvc-tools"
version = "0.1.9"
@@ -857,16 +707,6 @@ dependencies = [
"slab",
]
[[package]]
name = "generic-array"
version = "0.14.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
dependencies = [
"typenum",
"version_check",
]
[[package]]
name = "getrandom"
version = "0.2.17"
@@ -907,16 +747,6 @@ dependencies = [
"wasip3",
]
[[package]]
name = "ghash"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1"
dependencies = [
"opaque-debug",
"polyval",
]
[[package]]
name = "h2"
version = "0.4.13"
@@ -990,7 +820,7 @@ dependencies = [
"rand",
"ring",
"rustls",
"thiserror 2.0.18",
"thiserror",
"tinyvec",
"tokio",
"tokio-rustls",
@@ -1016,51 +846,13 @@ dependencies = [
"resolv-conf",
"rustls",
"smallvec",
"thiserror 2.0.18",
"thiserror",
"tokio",
"tokio-rustls",
"tracing",
"webpki-roots 0.26.11",
]
[[package]]
name = "hkdf"
version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7"
dependencies = [
"hmac",
]
[[package]]
name = "hmac"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e"
dependencies = [
"digest",
]
[[package]]
name = "hpke"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f65d16b699dd1a1fa2d851c970b0c971b388eeeb40f744252b8de48860980c8f"
dependencies = [
"aead",
"aes-gcm",
"chacha20poly1305",
"digest",
"generic-array",
"hkdf",
"hmac",
"rand_core 0.9.5",
"sha2",
"subtle",
"x25519-dalek",
"zeroize",
]
[[package]]
name = "http"
version = "1.4.0"
@@ -1289,15 +1081,6 @@ dependencies = [
"serde_core",
]
[[package]]
name = "inout"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01"
dependencies = [
"generic-array",
]
[[package]]
name = "ipconfig"
version = "0.3.4"
@@ -1547,7 +1330,7 @@ dependencies = [
[[package]]
name = "numa"
version = "0.14.0"
version = "0.13.1"
dependencies = [
"arc-swap",
"axum",
@@ -1561,9 +1344,7 @@ dependencies = [
"hyper",
"hyper-util",
"log",
"odoh-rs",
"qrcode",
"rand_core 0.9.5",
"rcgen",
"reqwest",
"ring",
@@ -1582,19 +1363,6 @@ dependencies = [
"x509-parser",
]
[[package]]
name = "odoh-rs"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cbb89720b7dfdddc89bc7560669d41a0bb68eb64784a4aebd293308a489f3837"
dependencies = [
"aes-gcm",
"bytes",
"hkdf",
"hpke",
"thiserror 1.0.69",
]
[[package]]
name = "oid-registry"
version = "0.8.1"
@@ -1626,12 +1394,6 @@ version = "11.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e"
[[package]]
name = "opaque-debug"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381"
[[package]]
name = "page_size"
version = "0.6.0"
@@ -1721,29 +1483,6 @@ dependencies = [
"plotters-backend",
]
[[package]]
name = "poly1305"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf"
dependencies = [
"cpufeatures",
"opaque-debug",
"universal-hash",
]
[[package]]
name = "polyval"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25"
dependencies = [
"cfg-if",
"cpufeatures",
"opaque-debug",
"universal-hash",
]
[[package]]
name = "portable-atomic"
version = "1.13.1"
@@ -1822,7 +1561,7 @@ dependencies = [
"rustc-hash",
"rustls",
"socket2",
"thiserror 2.0.18",
"thiserror",
"tokio",
"tracing",
"web-time",
@@ -1843,7 +1582,7 @@ dependencies = [
"rustls",
"rustls-pki-types",
"slab",
"thiserror 2.0.18",
"thiserror",
"tinyvec",
"tracing",
"web-time",
@@ -1891,7 +1630,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1"
dependencies = [
"rand_chacha",
"rand_core 0.9.5",
"rand_core",
]
[[package]]
@@ -1901,16 +1640,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb"
dependencies = [
"ppv-lite86",
"rand_core 0.9.5",
]
[[package]]
name = "rand_core"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
dependencies = [
"getrandom 0.2.17",
"rand_core",
]
[[package]]
@@ -2059,15 +1789,6 @@ version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d"
[[package]]
name = "rustc_version"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92"
dependencies = [
"semver",
]
[[package]]
name = "rusticata-macros"
version = "4.1.0"
@@ -2232,17 +1953,6 @@ dependencies = [
"serde",
]
[[package]]
name = "sha2"
version = "0.10.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283"
dependencies = [
"cfg-if",
"cpufeatures",
"digest",
]
[[package]]
name = "shlex"
version = "1.3.0"
@@ -2336,33 +2046,13 @@ version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417"
[[package]]
name = "thiserror"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52"
dependencies = [
"thiserror-impl 1.0.69",
]
[[package]]
name = "thiserror"
version = "2.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4"
dependencies = [
"thiserror-impl 2.0.18",
]
[[package]]
name = "thiserror-impl"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
dependencies = [
"proc-macro2",
"quote",
"syn",
"thiserror-impl",
]
[[package]]
@@ -2608,12 +2298,6 @@ version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b"
[[package]]
name = "typenum"
version = "1.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "40ce102ab67701b8526c123c1bab5cbe42d7040ccfd0f64af1a385808d2f43de"
[[package]]
name = "unicode-ident"
version = "1.0.24"
@@ -2626,16 +2310,6 @@ version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853"
[[package]]
name = "universal-hash"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea"
dependencies = [
"crypto-common",
"subtle",
]
[[package]]
name = "untrusted"
version = "0.9.0"
@@ -2677,12 +2351,6 @@ dependencies = [
"wasm-bindgen",
]
[[package]]
name = "version_check"
version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
[[package]]
name = "walkdir"
version = "2.5.0"
@@ -3192,16 +2860,6 @@ version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9"
[[package]]
name = "x25519-dalek"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277"
dependencies = [
"curve25519-dalek",
"rand_core 0.6.4",
]
[[package]]
name = "x509-parser"
version = "0.18.1"
@@ -3216,7 +2874,7 @@ dependencies = [
"oid-registry",
"ring",
"rusticata-macros",
"thiserror 2.0.18",
"thiserror",
"time",
]
@@ -3298,20 +2956,6 @@ name = "zeroize"
version = "1.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0"
dependencies = [
"zeroize_derive",
]
[[package]]
name = "zeroize_derive"
version = "1.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "zerotrie"

View File

@@ -1,6 +1,6 @@
[package]
name = "numa"
version = "0.14.0"
version = "0.13.1"
authors = ["razvandimescu <razvan@dimescu.com>"]
edition = "2021"
description = "Portable DNS resolver in Rust — .numa local domains, ad blocking, developer overrides, DNS-over-HTTPS"
@@ -29,10 +29,6 @@ rustls = "0.23"
tokio-rustls = "0.26"
arc-swap = "1"
ring = "0.17"
odoh-rs = "1"
# rand_core 0.9 matches the version odoh-rs (via hpke 0.13) depends on, so we
# share one RngCore trait and OsRng impl across the dep tree.
rand_core = { version = "0.9", features = ["os_rng"] }
rustls-pemfile = "2.2.0"
qrcode = { version = "0.14", default-features = false, features = ["svg"] }
webpki-roots = "1"

View File

@@ -8,39 +8,6 @@ Type=simple
ExecStart={{exe_path}}
Restart=always
RestartSec=2
# Transient system user per start; no PKGBUILD/sysusers setup required.
# systemd remaps the StateDirectory ownership to the dynamic UID on each
# launch, including legacy root-owned trees from pre-drop installs.
DynamicUser=yes
AmbientCapabilities=CAP_NET_BIND_SERVICE
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
StateDirectory=numa
StateDirectoryMode=0750
ConfigurationDirectory=numa
ConfigurationDirectoryMode=0755
# Sandboxing — conservative set known to work with Rust network daemons.
# Aggressive hardening (MemoryDenyWriteExecute, SystemCallFilter, seccomp
# allow-lists) can be layered on once tested in isolation.
NoNewPrivileges=true
ProtectSystem=strict
# DynamicUser= sets ProtectHome=read-only by default — leaves /home
# readable so systemd can exec binaries installed under it (cargo install,
# source builds), while blocking writes to user $HOMEs. Don't set =yes:
# that hides /home entirely and fails with status=203/EXEC.
PrivateTmp=true
PrivateDevices=true
ProtectKernelTunables=true
ProtectKernelModules=true
ProtectControlGroups=true
RestrictRealtime=true
RestrictSUIDSGID=true
# AF_NETLINK for interface enumeration on network changes
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX AF_NETLINK
StandardOutput=journal
StandardError=journal
SyslogIdentifier=numa

View File

@@ -8,16 +8,6 @@ api_port = 5380
# %PROGRAMDATA%\numa on windows. Override for
# containerized deploys or tests that can't
# write to the system path.
# filter_aaaa = true # on IPv4-only networks, answer AAAA queries with
# NODATA (NOERROR + empty answer) so Happy Eyeballs
# clients don't wait on a v6 attempt that can't
# succeed. Also strips `ipv6hint` from HTTPS/SVCB
# records (RFC 9460) so modern browsers (Chrome
# ≥103, Firefox, Safari) don't bypass the AAAA
# filter via SVCB hints. Local zones, overrides,
# and the .numa proxy are NOT filtered — you can
# still configure v6 records for local services.
# Default: false.
# [upstream]
# mode = "forward" # "forward" (default) — relay to upstream
@@ -76,13 +66,6 @@ api_port = 5380
# [[forwarding]] # DoH upstream: full https:// URL
# suffix = "example.corp"
# upstream = "https://dns.quad9.net/dns-query"
#
# [[forwarding]] # array of upstreams → SRTT-aware failover
# suffix = ["google.com", "goog"] # fastest-healthy first, dead one skipped
# upstream = [
# "tls://9.9.9.9#dns.quad9.net",
# "tls://149.112.112.112#dns.quad9.net",
# ]
# [blocking]
# enabled = true # set to false to disable ad blocking

View File

@@ -1,15 +0,0 @@
odoh-relay.example.com {
handle /relay {
reverse_proxy numa-relay:8443
}
handle /health {
reverse_proxy numa-relay:8443
}
respond 404
# Per-request access logs defeat the point of an oblivious relay.
# Aggregate counters are exposed at /health on the relay itself.
log {
output discard
}
}

View File

@@ -1,48 +0,0 @@
# Numa ODoH Relay — Docker deploy
Two-container deploy: Caddy terminates TLS (auto-provisioning a Let's Encrypt
cert via ACME) and reverse-proxies to a Numa relay running on an internal
Docker network. The relay never reads sealed payloads; Caddy never logs them.
## Prerequisites
- A host with public 80/443 reachable from the internet.
- A DNS record (`A` or `AAAA`) pointing your chosen hostname at the host.
- Docker + Docker Compose v2.
## Configure
Edit `Caddyfile` and replace `odoh-relay.example.com` with your hostname.
That hostname is what ACME validates against and what ODoH clients will
configure as their relay URL: `https://<hostname>/relay`.
## Deploy
```sh
docker compose up -d
docker compose logs -f caddy # watch ACME provisioning
```
First boot takes a few seconds while Caddy obtains the cert. Subsequent
restarts reuse the cached cert from the `caddy_data` volume.
## Verify
```sh
curl https://<hostname>/health
# ok
# total 0
# forwarded_ok 0
# forwarded_err 0
# rejected_bad_request 0
```
Then point any ODoH client at `https://<hostname>/relay` and watch the
counters tick.
## Listing on the public ecosystem
DNSCrypt's [v3/odoh-relays.md](https://github.com/DNSCrypt/dnscrypt-resolvers/blob/master/v3/odoh-relays.md)
is the canonical list. The pruned 2025-09-16 commit shows one public ODoH
relay survived the cull — running this compose file doubles global supply.
Open a PR there once your relay has been up for ~24 hours.

View File

@@ -1,26 +0,0 @@
services:
numa-relay:
image: ghcr.io/razvandimescu/numa:latest
command: ["relay", "8443", "0.0.0.0"]
restart: unless-stopped
networks: [internal]
caddy:
image: caddy:2
ports:
- "80:80"
- "443:443"
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile:ro
- caddy_data:/data
- caddy_config:/config
restart: unless-stopped
depends_on: [numa-relay]
networks: [internal]
networks:
internal:
volumes:
caddy_data:
caddy_config:

View File

@@ -228,7 +228,6 @@ body {
.path-bar-fill.tcp { background: var(--violet); }
.path-bar-fill.dot { background: var(--emerald); }
.path-bar-fill.doh { background: var(--teal); }
.path-bar-fill.odoh { background: var(--violet-dim); }
.path-pct {
font-family: var(--font-mono);
font-size: 0.75rem;
@@ -638,26 +637,16 @@ body {
</div>
</div>
<!-- Inbound wire (apps → numa) -->
<!-- Transport breakdown -->
<div class="panel">
<div class="panel-header">
<span class="panel-title">Inbound Wire <span style="color: var(--text-dim); font-weight: normal;">apps → numa</span></span>
<span class="panel-title">Transport</span>
<span class="panel-title" id="transportEncrypted" style="color: var(--text-dim)"></span>
</div>
<div class="panel-body" id="transportBars">
</div>
</div>
<!-- Outbound wire (numa → internet) -->
<div class="panel">
<div class="panel-header">
<span class="panel-title">Outbound Wire <span style="color: var(--text-dim); font-weight: normal;">numa → internet</span></span>
<span class="panel-title" id="upstreamWireEncrypted" style="color: var(--text-dim)"></span>
</div>
<div class="panel-body" id="upstreamWireBars">
</div>
</div>
<!-- Main grid: query log + sidebar -->
<div class="main-grid">
<!-- Query log -->
@@ -971,11 +960,9 @@ function renderBarChart(containerId, defs, data, total) {
}).join('');
}
function encryptionPct(data, encryptedKeys, allKeys) {
const total = allKeys.reduce((s, k) => s + (data[k] || 0), 0);
if (total === 0) return 0;
const encrypted = encryptedKeys.reduce((s, k) => s + (data[k] || 0), 0);
return Math.round((encrypted / total) * 100);
function encryptionPct(transport) {
const total = (transport.udp + transport.tcp + transport.dot + transport.doh) || 1;
return (((transport.dot + transport.doh) / total) * 100).toFixed(0);
}
const PATH_DEFS = [
@@ -1003,25 +990,9 @@ const TRANSPORT_DEFS = [
function renderTransport(transport) {
const total = (transport.udp + transport.tcp + transport.dot + transport.doh) || 1;
renderBarChart('transportBars', TRANSPORT_DEFS, transport, total);
const encPct = encryptionPct(transport, ['dot', 'doh'], ['udp', 'tcp', 'dot', 'doh']);
const encPct = encryptionPct(transport);
const el = document.getElementById('transportEncrypted');
el.textContent = `${encPct}% encrypted inbound`;
el.style.color = encPct >= 80 ? 'var(--emerald)' : encPct >= 50 ? 'var(--amber)' : 'var(--rose)';
}
const UPSTREAM_WIRE_DEFS = [
{ key: 'udp', label: 'UDP', cls: 'udp' },
{ key: 'doh', label: 'DoH', cls: 'doh' },
{ key: 'dot', label: 'DoT', cls: 'dot' },
{ key: 'odoh', label: 'ODoH', cls: 'odoh' },
];
function renderUpstreamWire(ut) {
const total = (ut.udp + ut.doh + ut.dot + ut.odoh) || 0;
renderBarChart('upstreamWireBars', UPSTREAM_WIRE_DEFS, ut, total || 1);
const encPct = encryptionPct(ut, ['doh', 'dot', 'odoh'], ['udp', 'doh', 'dot', 'odoh']);
const el = document.getElementById('upstreamWireEncrypted');
el.textContent = total > 0 ? `${encPct}% encrypted outbound` : '';
el.textContent = `${encPct}% encrypted`;
el.style.color = encPct >= 80 ? 'var(--emerald)' : encPct >= 50 ? 'var(--amber)' : 'var(--rose)';
}
@@ -1263,7 +1234,6 @@ async function refresh() {
// Panels
renderPaths(q);
renderTransport(stats.transport);
renderUpstreamWire(stats.upstream_transport || { udp: 0, doh: 0, dot: 0, odoh: 0 });
renderQueryLog(logs);
renderOverrides(overrides);
renderCache(cache);

View File

@@ -170,7 +170,6 @@ struct StatsResponse {
srtt: bool,
queries: QueriesStats,
transport: TransportStats,
upstream_transport: UpstreamTransportStats,
cache: CacheStats,
overrides: OverrideStats,
blocking: BlockingStatsResponse,
@@ -187,14 +186,6 @@ struct TransportStats {
doh: u64,
}
#[derive(Serialize)]
struct UpstreamTransportStats {
udp: u64,
doh: u64,
dot: u64,
odoh: u64,
}
#[derive(Serialize)]
struct MobileStatsResponse {
enabled: bool,
@@ -575,12 +566,6 @@ async fn stats(State(ctx): State<Arc<ServerCtx>>) -> Json<StatsResponse> {
dot: snap.transport_dot,
doh: snap.transport_doh,
},
upstream_transport: UpstreamTransportStats {
udp: snap.upstream_transport_udp,
doh: snap.upstream_transport_doh,
dot: snap.upstream_transport_dot,
odoh: snap.upstream_transport_odoh,
},
cache: CacheStats {
entries: cache_len,
max_entries: cache_max,

View File

@@ -1,7 +1,7 @@
use std::collections::HashMap;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
use std::net::Ipv4Addr;
use std::net::Ipv6Addr;
use std::path::{Path, PathBuf};
use std::time::Duration;
use serde::Deserialize;
@@ -41,30 +41,17 @@ pub struct Config {
pub struct ForwardingRuleConfig {
#[serde(deserialize_with = "string_or_vec")]
pub suffix: Vec<String>,
#[serde(deserialize_with = "string_or_vec")]
pub upstream: Vec<String>,
pub upstream: String,
}
impl ForwardingRuleConfig {
fn to_runtime_rules(&self) -> Result<Vec<crate::system_dns::ForwardingRule>> {
if self.upstream.is_empty() {
return Err(format!(
"forwarding rule for suffix {:?}: upstream must not be empty",
self.suffix
)
.into());
}
let mut primary = Vec::with_capacity(self.upstream.len());
for s in &self.upstream {
let u = crate::forward::parse_upstream(s, 53)
.map_err(|e| format!("forwarding rule for upstream '{}': {}", s, e))?;
primary.push(u);
}
let pool = crate::forward::UpstreamPool::new(primary, vec![]);
let upstream = crate::forward::parse_upstream(&self.upstream, 53)
.map_err(|e| format!("forwarding rule for upstream '{}': {}", self.upstream, e))?;
Ok(self
.suffix
.iter()
.map(|s| crate::system_dns::ForwardingRule::new(s.clone(), pool.clone()))
.map(|s| crate::system_dns::ForwardingRule::new(s.clone(), upstream.clone()))
.collect())
}
}
@@ -93,12 +80,6 @@ pub struct ServerConfig {
/// Defaults to `crate::data_dir()` (platform-specific system path) if unset.
#[serde(default)]
pub data_dir: Option<PathBuf>,
/// Synthesize NODATA (NOERROR + empty answer) for AAAA queries, and
/// strip `ipv6hint` from HTTPS/SVCB responses (RFC 9460). For IPv4-only
/// networks where Happy Eyeballs fallback adds latency. Local zones,
/// overrides, and the service proxy are not affected. Default false.
#[serde(default)]
pub filter_aaaa: bool,
}
impl Default for ServerConfig {
@@ -108,7 +89,6 @@ impl Default for ServerConfig {
api_port: default_api_port(),
api_bind_addr: default_api_bind_addr(),
data_dir: None,
filter_aaaa: false,
}
}
}
@@ -134,7 +114,6 @@ pub enum UpstreamMode {
#[default]
Forward,
Recursive,
Odoh,
}
impl UpstreamMode {
@@ -143,20 +122,6 @@ impl UpstreamMode {
UpstreamMode::Auto => "auto",
UpstreamMode::Forward => "forward",
UpstreamMode::Recursive => "recursive",
UpstreamMode::Odoh => "odoh",
}
}
/// Hedging duplicates the in-flight query against the same upstream to
/// rescue tail latency. Beneficial for UDP/DoH/DoT (cheap retransmit /
/// h2 stream multiplexing). For ODoH it doubles the relay's HPKE
/// seal/unseal load and the sealed-byte footprint a passive observer
/// can correlate, with no latency win — the relay hop dominates either
/// way. Force-zero in oblivious mode regardless of `hedge_ms`.
pub fn hedge_delay(self, hedge_ms: u64) -> Duration {
match self {
UpstreamMode::Odoh => Duration::ZERO,
_ => Duration::from_millis(hedge_ms),
}
}
}
@@ -169,7 +134,7 @@ pub struct UpstreamConfig {
pub address: Vec<String>,
#[serde(default = "default_upstream_port")]
pub port: u16,
#[serde(default, deserialize_with = "string_or_vec")]
#[serde(default)]
pub fallback: Vec<String>,
#[serde(default = "default_timeout_ms")]
pub timeout_ms: u64,
@@ -181,30 +146,6 @@ pub struct UpstreamConfig {
pub prime_tlds: Vec<String>,
#[serde(default = "default_srtt")]
pub srtt: bool,
/// Only used when `mode = "odoh"`. Full https:// URL of the relay
/// endpoint (including path, e.g. `https://odoh-relay.numa.rs/relay`).
#[serde(default)]
pub relay: Option<String>,
/// Only used when `mode = "odoh"`. Full https:// URL of the target
/// resolver (`https://odoh.cloudflare-dns.com/dns-query`).
#[serde(default)]
pub target: Option<String>,
/// Only used when `mode = "odoh"`. When true (the default), relay failure
/// returns SERVFAIL instead of downgrading to the `fallback` upstream —
/// a user who configured ODoH rarely wants a silent non-oblivious path.
#[serde(default)]
pub strict: Option<bool>,
/// Bootstrap IP for the relay host, used when numa is its own system
/// resolver (otherwise the ODoH HTTPS client loops resolving through
/// itself). TLS still validates the cert against `relay`'s hostname.
#[serde(default)]
pub relay_ip: Option<IpAddr>,
/// Same as `relay_ip` but for the target host.
#[serde(default)]
pub target_ip: Option<IpAddr>,
}
impl Default for UpstreamConfig {
@@ -219,90 +160,10 @@ impl Default for UpstreamConfig {
root_hints: default_root_hints(),
prime_tlds: default_prime_tlds(),
srtt: default_srtt(),
relay: None,
target: None,
strict: None,
relay_ip: None,
target_ip: None,
}
}
}
/// Parsed ODoH config fields. `mode = "odoh"` requires both URLs to be
/// present, to parse as `https://`, and to resolve to distinct hosts.
#[derive(Debug)]
pub struct OdohUpstream {
pub relay_url: String,
pub relay_host: String,
pub target_host: String,
pub target_path: String,
pub strict: bool,
pub relay_bootstrap: Option<SocketAddr>,
pub target_bootstrap: Option<SocketAddr>,
}
impl UpstreamConfig {
/// Validate and extract ODoH-specific fields. Called during `load_config`
/// so misconfigured ODoH fails fast at startup, the same care we take
/// with the DNSSEC strict boot check.
pub fn odoh_upstream(&self) -> Result<OdohUpstream> {
let relay = self
.relay
.as_deref()
.ok_or("mode = \"odoh\" requires upstream.relay")?;
let target = self
.target
.as_deref()
.ok_or("mode = \"odoh\" requires upstream.target")?;
let relay_url = reqwest::Url::parse(relay)
.map_err(|e| format!("upstream.relay invalid URL '{}': {}", relay, e))?;
let target_url = reqwest::Url::parse(target)
.map_err(|e| format!("upstream.target invalid URL '{}': {}", target, e))?;
if relay_url.scheme() != "https" || target_url.scheme() != "https" {
return Err("upstream.relay and upstream.target must both use https://".into());
}
if relay_url.host_str().is_none() || target_url.host_str().is_none() {
return Err("upstream.relay and upstream.target must include a host".into());
}
if relay_url.host_str() == target_url.host_str() {
return Err(format!(
"upstream.relay and upstream.target resolve to the same host ({}); the privacy property requires distinct operators",
relay_url.host_str().unwrap_or("?")
)
.into());
}
let relay_host = relay_url
.host_str()
.ok_or("upstream.relay has no host")?
.to_string();
let target_host = target_url
.host_str()
.ok_or("upstream.target has no host")?
.to_string();
let target_path = if target_url.path().is_empty() {
"/".to_string()
} else {
target_url.path().to_string()
};
let relay_port = relay_url.port_or_known_default().unwrap_or(443);
let target_port = target_url.port_or_known_default().unwrap_or(443);
Ok(OdohUpstream {
relay_url: relay.to_string(),
relay_host,
target_host,
target_path,
strict: self.strict.unwrap_or(true),
relay_bootstrap: self.relay_ip.map(|ip| SocketAddr::new(ip, relay_port)),
target_bootstrap: self.target_ip.map(|ip| SocketAddr::new(ip, target_port)),
})
}
}
fn string_or_vec<'de, D>(deserializer: D) -> std::result::Result<Vec<String>, D::Error>
where
D: serde::Deserializer<'de>,
@@ -706,17 +567,6 @@ mod tests {
assert!(config.lan.enabled);
}
#[test]
fn filter_aaaa_defaults_false() {
assert!(!ServerConfig::default().filter_aaaa);
}
#[test]
fn filter_aaaa_parses_from_server_section() {
let config: Config = toml::from_str("[server]\nfilter_aaaa = true").unwrap();
assert!(config.server.filter_aaaa);
}
#[test]
fn custom_bind_addrs_parse() {
let toml = r#"
@@ -762,22 +612,12 @@ mod tests {
}
#[test]
fn fallback_array_parses() {
fn fallback_parses() {
let config: Config =
toml::from_str("[upstream]\nfallback = [\"8.8.8.8\", \"1.1.1.1\"]").unwrap();
assert_eq!(config.upstream.fallback, vec!["8.8.8.8", "1.1.1.1"]);
}
#[test]
fn fallback_string_parses_as_singleton_vec() {
let config: Config =
toml::from_str("[upstream]\nfallback = \"tls://1.1.1.1#cloudflare-dns.com\"").unwrap();
assert_eq!(
config.upstream.fallback,
vec!["tls://1.1.1.1#cloudflare-dns.com"]
);
}
#[test]
fn empty_address_gives_empty_vec() {
let config: Config = toml::from_str("").unwrap();
@@ -785,169 +625,6 @@ mod tests {
assert!(config.upstream.fallback.is_empty());
}
// ── [upstream] mode = "odoh" ────────────────────────────────────────
#[test]
fn odoh_config_parses_and_validates() {
let toml = r#"
[upstream]
mode = "odoh"
relay = "https://odoh-relay.numa.rs/relay"
target = "https://odoh.cloudflare-dns.com/dns-query"
"#;
let config: Config = toml::from_str(toml).unwrap();
assert!(matches!(config.upstream.mode, UpstreamMode::Odoh));
let odoh = config.upstream.odoh_upstream().unwrap();
assert_eq!(odoh.relay_url, "https://odoh-relay.numa.rs/relay");
assert_eq!(odoh.target_host, "odoh.cloudflare-dns.com");
assert_eq!(odoh.target_path, "/dns-query");
assert!(odoh.strict, "strict defaults to true under mode=odoh");
}
#[test]
fn odoh_strict_false_is_honoured() {
let toml = r#"
[upstream]
mode = "odoh"
relay = "https://odoh-relay.numa.rs/relay"
target = "https://odoh.cloudflare-dns.com/dns-query"
strict = false
"#;
let config: Config = toml::from_str(toml).unwrap();
assert!(!config.upstream.odoh_upstream().unwrap().strict);
}
#[test]
fn odoh_rejects_same_host_relay_and_target() {
let toml = r#"
[upstream]
mode = "odoh"
relay = "https://odoh.example.com/relay"
target = "https://odoh.example.com/dns-query"
"#;
let config: Config = toml::from_str(toml).unwrap();
let err = config.upstream.odoh_upstream().unwrap_err().to_string();
assert!(err.contains("same host"), "got: {err}");
}
#[test]
fn odoh_rejects_non_https() {
let toml = r#"
[upstream]
mode = "odoh"
relay = "http://odoh-relay.numa.rs/relay"
target = "https://odoh.cloudflare-dns.com/dns-query"
"#;
let config: Config = toml::from_str(toml).unwrap();
let err = config.upstream.odoh_upstream().unwrap_err().to_string();
assert!(err.contains("https"), "got: {err}");
}
#[test]
fn odoh_missing_relay_rejected() {
let toml = r#"
[upstream]
mode = "odoh"
target = "https://odoh.cloudflare-dns.com/dns-query"
"#;
let config: Config = toml::from_str(toml).unwrap();
let err = config.upstream.odoh_upstream().unwrap_err().to_string();
assert!(err.contains("upstream.relay"), "got: {err}");
}
#[test]
fn odoh_bootstrap_ips_parse_into_socket_addrs() {
let toml = r#"
[upstream]
mode = "odoh"
relay = "https://odoh-relay.numa.rs/relay"
target = "https://odoh.cloudflare-dns.com/dns-query"
relay_ip = "178.104.229.30"
target_ip = "104.16.249.249"
"#;
let config: Config = toml::from_str(toml).unwrap();
let odoh = config.upstream.odoh_upstream().unwrap();
assert_eq!(odoh.relay_host, "odoh-relay.numa.rs");
assert_eq!(
odoh.relay_bootstrap.unwrap().to_string(),
"178.104.229.30:443"
);
assert_eq!(
odoh.target_bootstrap.unwrap().to_string(),
"104.16.249.249:443"
);
}
#[test]
fn odoh_bootstrap_ips_optional() {
let toml = r#"
[upstream]
mode = "odoh"
relay = "https://odoh-relay.numa.rs/relay"
target = "https://odoh.cloudflare-dns.com/dns-query"
"#;
let config: Config = toml::from_str(toml).unwrap();
let odoh = config.upstream.odoh_upstream().unwrap();
assert!(odoh.relay_bootstrap.is_none());
assert!(odoh.target_bootstrap.is_none());
}
#[test]
fn odoh_bootstrap_ip_rejects_garbage() {
let toml = r#"
[upstream]
mode = "odoh"
relay = "https://odoh-relay.numa.rs/relay"
target = "https://odoh.cloudflare-dns.com/dns-query"
relay_ip = "not-an-ip"
"#;
let err = toml::from_str::<Config>(toml).err().unwrap().to_string();
assert!(err.contains("relay_ip"), "got: {err}");
}
#[test]
fn odoh_bootstrap_uses_url_port_when_non_default() {
let toml = r#"
[upstream]
mode = "odoh"
relay = "https://odoh-relay.numa.rs:8443/relay"
target = "https://odoh.cloudflare-dns.com/dns-query"
relay_ip = "178.104.229.30"
"#;
let config: Config = toml::from_str(toml).unwrap();
let odoh = config.upstream.odoh_upstream().unwrap();
assert_eq!(
odoh.relay_bootstrap.unwrap().to_string(),
"178.104.229.30:8443"
);
}
#[test]
fn hedge_delay_zeroed_for_odoh_mode() {
assert_eq!(
UpstreamMode::Odoh.hedge_delay(50),
Duration::ZERO,
"ODoH mode must zero hedge regardless of configured hedge_ms"
);
assert_eq!(
UpstreamMode::Forward.hedge_delay(50),
Duration::from_millis(50),
"non-ODoH modes honour configured hedge_ms"
);
}
#[test]
fn odoh_missing_target_rejected() {
let toml = r#"
[upstream]
mode = "odoh"
relay = "https://odoh-relay.numa.rs/relay"
"#;
let config: Config = toml::from_str(toml).unwrap();
let err = config.upstream.odoh_upstream().unwrap_err().to_string();
assert!(err.contains("upstream.target"), "got: {err}");
}
// ── issue #82: [[forwarding]] config section ────────────────────────
#[test]
@@ -966,7 +643,7 @@ relay = "https://odoh-relay.numa.rs/relay"
let config: Config = toml::from_str(toml).unwrap();
assert_eq!(config.forwarding.len(), 1);
assert_eq!(config.forwarding[0].suffix, &["home.local"]);
assert_eq!(config.forwarding[0].upstream, vec!["100.90.1.63:5361"]);
assert_eq!(config.forwarding[0].upstream, "100.90.1.63:5361");
}
#[test]
@@ -994,7 +671,7 @@ relay = "https://odoh-relay.numa.rs/relay"
"#;
let config: Config = toml::from_str(toml).unwrap();
assert_eq!(config.forwarding.len(), 2);
assert_eq!(config.forwarding[1].upstream, vec!["10.0.0.1"]);
assert_eq!(config.forwarding[1].upstream, "10.0.0.1");
}
#[test]
@@ -1016,29 +693,28 @@ relay = "https://odoh-relay.numa.rs/relay"
fn forwarding_suffix_array_expands_to_multiple_runtime_rules() {
let rule = ForwardingRuleConfig {
suffix: vec!["168.192.in-addr.arpa".to_string(), "onsite".to_string()],
upstream: vec!["192.168.88.1".to_string()],
upstream: "192.168.88.1".to_string(),
};
let runtime = rule.to_runtime_rules().unwrap();
assert_eq!(runtime.len(), 2);
assert_eq!(runtime[0].suffix, "168.192.in-addr.arpa");
assert_eq!(runtime[1].suffix, "onsite");
assert_eq!(
runtime[0].upstream.preferred(),
runtime[1].upstream.preferred()
);
assert_eq!(runtime[0].upstream, runtime[1].upstream);
}
#[test]
fn forwarding_upstream_with_explicit_port() {
let rule = ForwardingRuleConfig {
suffix: vec!["home.local".to_string()],
upstream: vec!["100.90.1.63:5361".to_string()],
upstream: "100.90.1.63:5361".to_string(),
};
let runtime = rule.to_runtime_rules().unwrap();
assert_eq!(runtime.len(), 1);
let preferred = runtime[0].upstream.preferred().unwrap();
assert!(matches!(preferred, crate::forward::Upstream::Udp(_)));
assert_eq!(preferred.to_string(), "100.90.1.63:5361");
assert!(matches!(
runtime[0].upstream,
crate::forward::Upstream::Udp(_)
));
assert_eq!(runtime[0].upstream.to_string(), "100.90.1.63:5361");
assert_eq!(runtime[0].suffix, "home.local");
}
@@ -1046,20 +722,17 @@ relay = "https://odoh-relay.numa.rs/relay"
fn forwarding_upstream_defaults_to_port_53() {
let rule = ForwardingRuleConfig {
suffix: vec!["home.local".to_string()],
upstream: vec!["100.90.1.63".to_string()],
upstream: "100.90.1.63".to_string(),
};
let runtime = rule.to_runtime_rules().unwrap();
assert_eq!(
runtime[0].upstream.preferred().unwrap().to_string(),
"100.90.1.63:53"
);
assert_eq!(runtime[0].upstream.to_string(), "100.90.1.63:53");
}
#[test]
fn forwarding_invalid_upstream_returns_error() {
let rule = ForwardingRuleConfig {
suffix: vec!["home.local".to_string()],
upstream: vec!["not-a-valid-host".to_string()],
upstream: "not-a-valid-host".to_string(),
};
assert!(rule.to_runtime_rules().is_err());
}
@@ -1068,14 +741,14 @@ relay = "https://odoh-relay.numa.rs/relay"
fn forwarding_upstream_accepts_dot_scheme() {
let rule = ForwardingRuleConfig {
suffix: vec!["google.com".to_string()],
upstream: vec!["tls://9.9.9.9#dns.quad9.net".to_string()],
upstream: "tls://9.9.9.9#dns.quad9.net".to_string(),
};
let runtime = rule
.to_runtime_rules()
.expect("tls:// upstream should parse");
assert_eq!(runtime.len(), 1);
assert_eq!(
runtime[0].upstream.preferred().unwrap().to_string(),
runtime[0].upstream.to_string(),
"tls://9.9.9.9:853#dns.quad9.net"
);
}
@@ -1084,14 +757,14 @@ relay = "https://odoh-relay.numa.rs/relay"
fn forwarding_upstream_accepts_doh_scheme() {
let rule = ForwardingRuleConfig {
suffix: vec!["goog".to_string()],
upstream: vec!["https://dns.quad9.net/dns-query".to_string()],
upstream: "https://dns.quad9.net/dns-query".to_string(),
};
let runtime = rule
.to_runtime_rules()
.expect("https:// upstream should parse");
assert_eq!(runtime.len(), 1);
assert_eq!(
runtime[0].upstream.preferred().unwrap().to_string(),
runtime[0].upstream.to_string(),
"https://dns.quad9.net/dns-query"
);
}
@@ -1100,90 +773,44 @@ relay = "https://odoh-relay.numa.rs/relay"
fn forwarding_config_rules_take_precedence_over_discovered() {
let config_rules = vec![ForwardingRuleConfig {
suffix: vec!["home.local".to_string()],
upstream: vec!["10.0.0.1:53".to_string()],
upstream: "10.0.0.1:53".to_string(),
}];
let discovered = vec![crate::system_dns::ForwardingRule::new(
"home.local".to_string(),
crate::forward::UpstreamPool::new(
vec![crate::forward::Upstream::Udp(
"192.168.1.1:53".parse().unwrap(),
)],
vec![],
),
crate::forward::Upstream::Udp("192.168.1.1:53".parse().unwrap()),
)];
let merged = merge_forwarding_rules(&config_rules, discovered).unwrap();
let picked = crate::system_dns::match_forwarding_rule("host.home.local", &merged)
.expect("rule should match");
assert_eq!(picked.preferred().unwrap().to_string(), "10.0.0.1:53");
assert_eq!(picked.to_string(), "10.0.0.1:53");
}
#[test]
fn forwarding_merge_preserves_non_overlapping_discovered() {
let config_rules = vec![ForwardingRuleConfig {
suffix: vec!["home.local".to_string()],
upstream: vec!["10.0.0.1:53".to_string()],
upstream: "10.0.0.1:53".to_string(),
}];
let discovered = vec![crate::system_dns::ForwardingRule::new(
"corp.example".to_string(),
crate::forward::UpstreamPool::new(
vec![crate::forward::Upstream::Udp(
"192.168.1.1:53".parse().unwrap(),
)],
vec![],
),
crate::forward::Upstream::Udp("192.168.1.1:53".parse().unwrap()),
)];
let merged = merge_forwarding_rules(&config_rules, discovered).unwrap();
assert_eq!(merged.len(), 2);
let picked = crate::system_dns::match_forwarding_rule("host.corp.example", &merged)
.expect("discovered rule should still match");
assert_eq!(picked.preferred().unwrap().to_string(), "192.168.1.1:53");
assert_eq!(picked.to_string(), "192.168.1.1:53");
}
#[test]
fn forwarding_merge_suffix_array_expands_to_multiple_rules() {
let config_rules = vec![ForwardingRuleConfig {
suffix: vec!["a.local".to_string(), "b.local".to_string()],
upstream: vec!["10.0.0.1:53".to_string()],
upstream: "10.0.0.1:53".to_string(),
}];
let merged = merge_forwarding_rules(&config_rules, vec![]).unwrap();
assert_eq!(merged.len(), 2);
}
#[test]
fn forwarding_parses_upstream_array() {
let toml = r#"
[[forwarding]]
suffix = "google.com"
upstream = ["tls://9.9.9.9#dns.quad9.net", "tls://149.112.112.112#dns.quad9.net"]
"#;
let config: Config = toml::from_str(toml).unwrap();
assert_eq!(config.forwarding.len(), 1);
assert_eq!(config.forwarding[0].upstream.len(), 2);
}
#[test]
fn forwarding_upstream_array_builds_pool_with_multiple_primaries() {
let rule = ForwardingRuleConfig {
suffix: vec!["google.com".to_string()],
upstream: vec![
"tls://9.9.9.9#dns.quad9.net".to_string(),
"tls://149.112.112.112#dns.quad9.net".to_string(),
],
};
let runtime = rule.to_runtime_rules().unwrap();
assert_eq!(runtime.len(), 1);
let label = runtime[0].upstream.label();
assert!(label.contains("+1 more"), "label was: {}", label);
}
#[test]
fn forwarding_empty_upstream_array_errors() {
let rule = ForwardingRuleConfig {
suffix: vec!["home.local".to_string()],
upstream: vec![],
};
assert!(rule.to_runtime_rules().is_err());
}
}
pub struct ConfigLoad {

View File

@@ -16,9 +16,7 @@ use crate::blocklist::BlocklistStore;
use crate::buffer::BytePacketBuffer;
use crate::cache::{DnsCache, DnssecStatus};
use crate::config::{UpstreamMode, ZoneMap};
#[cfg(test)]
use crate::forward::Upstream;
use crate::forward::{forward_with_failover_raw, UpstreamPool};
use crate::forward::{forward_query_raw, forward_with_failover_raw, Upstream, UpstreamPool};
use crate::header::ResultCode;
use crate::health::HealthMeta;
use crate::lan::PeerStore;
@@ -77,10 +75,6 @@ pub struct ServerCtx {
pub ca_pem: Option<String>,
pub mobile_enabled: bool,
pub mobile_port: u16,
/// When true, AAAA queries short-circuit with NODATA (NOERROR + empty
/// answer) instead of hitting cache/forwarding/upstream. Local data
/// (overrides, zones, .numa proxy, blocklist sinkhole) is unaffected.
pub filter_aaaa: bool,
}
/// Transport-agnostic DNS resolution. Runs the full pipeline (overrides, blocklist,
@@ -105,7 +99,6 @@ pub async fn resolve_query(
// Pipeline: overrides -> .localhost -> local zones -> special-use (unless forwarded)
// -> .tld proxy -> blocklist -> cache -> forwarding -> recursive/upstream
// Each lock is scoped to avoid holding MutexGuard across await points.
let mut upstream_transport: Option<crate::stats::UpstreamTransport> = None;
let (response, path, dnssec) = {
let override_record = ctx.overrides.read().unwrap().lookup(&qname);
if let Some(record) = override_record {
@@ -177,13 +170,6 @@ pub async fn resolve_query(
60,
));
(resp, QueryPath::Blocked, DnssecStatus::Indeterminate)
} else if qtype == QueryType::AAAA && ctx.filter_aaaa {
// RFC 2308 NODATA: NOERROR with empty answer section. Prevents
// Happy Eyeballs clients from waiting on an AAAA they'll never use
// on IPv4-only networks. NXDOMAIN would be wrong (it'd imply the
// name doesn't exist for A either).
let resp = DnsPacket::response_from(&query, ResultCode::NOERROR);
(resp, QueryPath::Local, DnssecStatus::Indeterminate)
} else {
let cached = ctx.cache.read().unwrap().lookup_with_status(&qname, qtype);
if let Some((cached, cached_dnssec, freshness)) = cached {
@@ -204,32 +190,13 @@ pub async fn resolve_query(
resp.header.authed_data = true;
}
(resp, QueryPath::Cached, cached_dnssec)
} else if let Some(pool) =
} else if let Some(upstream) =
crate::system_dns::match_forwarding_rule(&qname, &ctx.forwarding_rules)
{
// Conditional forwarding takes priority over recursive mode
// (e.g. Tailscale .ts.net, VPC private zones)
upstream_transport = pool.preferred().map(|u| u.transport());
match forward_with_failover_raw(
raw_wire,
pool,
&ctx.srtt,
ctx.timeout,
ctx.hedge_delay,
)
.await
{
Ok(resp_wire) => match cache_and_parse(ctx, &qname, qtype, &resp_wire) {
Ok(resp) => (resp, QueryPath::Forwarded, DnssecStatus::Indeterminate),
Err(e) => {
error!("{} | {:?} {} | PARSE ERROR | {}", src_addr, qtype, qname, e);
(
DnsPacket::response_from(&query, ResultCode::SERVFAIL),
QueryPath::UpstreamError,
DnssecStatus::Indeterminate,
)
}
},
match forward_and_cache(raw_wire, upstream, ctx, &qname, qtype).await {
Ok(resp) => (resp, QueryPath::Forwarded, DnssecStatus::Indeterminate),
Err(e) => {
error!(
"{} | {:?} {} | FORWARD ERROR | {}",
@@ -243,9 +210,6 @@ pub async fn resolve_query(
}
}
} else if ctx.upstream_mode == UpstreamMode::Recursive {
// Recursive resolution makes UDP hops to roots/TLDs/auths;
// tag as Udp so the dashboard can aggregate plaintext-wire
// egress honestly. Only mark on success — errors stay None.
let key = (qname.clone(), qtype);
let (resp, path, err) = resolve_coalesced(&ctx.inflight, key, &query, || {
crate::recursive::resolve_recursive(
@@ -268,8 +232,6 @@ pub async fn resolve_query(
qname,
err.as_deref().unwrap_or("leader failed")
);
} else {
upstream_transport = Some(crate::stats::UpstreamTransport::Udp);
}
(resp, path, DnssecStatus::Indeterminate)
} else {
@@ -284,10 +246,7 @@ pub async fn resolve_query(
.await
{
Ok(resp_wire) => match cache_and_parse(ctx, &qname, qtype, &resp_wire) {
Ok(resp) => {
upstream_transport = pool.preferred().map(|u| u.transport());
(resp, QueryPath::Upstream, DnssecStatus::Indeterminate)
}
Ok(resp) => (resp, QueryPath::Upstream, DnssecStatus::Indeterminate),
Err(e) => {
error!("{} | {:?} {} | PARSE ERROR | {}", src_addr, qtype, qname, e);
(
@@ -355,15 +314,6 @@ pub async fn resolve_query(
strip_dnssec_records(&mut response);
}
// filter_aaaa: also strip ipv6hint from HTTPS/SVCB answers so modern
// browsers (Chrome ≥103 etc.) don't receive v6 address hints via the
// HTTPS record path that bypasses AAAA entirely. Gated on !client_do
// because modifying rdata invalidates any accompanying RRSIG — a DO-bit
// validator downstream would reject the response as Bogus.
if ctx.filter_aaaa && !client_do {
strip_svcb_ipv6_hints(&mut response);
}
// Echo EDNS back if client sent it
if query.edns.is_some() {
response.edns = Some(crate::packet::EdnsOpt {
@@ -407,7 +357,7 @@ pub async fn resolve_query(
// Record stats and query log
{
let mut s = ctx.stats.lock().unwrap();
let total = s.record(path, transport, upstream_transport);
let total = s.record(path, transport);
if total.is_multiple_of(1000) {
s.log_summary();
}
@@ -483,6 +433,17 @@ pub async fn refresh_entry(ctx: &ServerCtx, qname: &str, qtype: QueryType) {
}
}
async fn forward_and_cache(
wire: &[u8],
upstream: &Upstream,
ctx: &ServerCtx,
qname: &str,
qtype: QueryType,
) -> crate::Result<DnsPacket> {
let resp_wire = forward_query_raw(wire, upstream, ctx.timeout).await?;
cache_and_parse(ctx, qname, qtype, &resp_wire)
}
pub async fn handle_query(
mut buffer: BytePacketBuffer,
raw_len: usize,
@@ -521,20 +482,6 @@ fn strip_dnssec_records(pkt: &mut DnsPacket) {
pkt.resources.retain(|r| !is_dnssec_record(r));
}
fn strip_svcb_ipv6_hints(pkt: &mut DnsPacket) {
let https_qtype = QueryType::HTTPS.to_num();
let svcb_qtype = QueryType::SVCB.to_num();
pkt.for_each_record_mut(|rec| {
if let DnsRecord::UNKNOWN { qtype, data, .. } = rec {
if *qtype == https_qtype || *qtype == svcb_qtype {
if let Some(new_data) = crate::svcb::strip_ipv6hint(data) {
*data = new_data;
}
}
}
});
}
fn is_special_use_domain(qname: &str) -> bool {
if qname.ends_with(".in-addr.arpa") {
// RFC 6303: private + loopback + link-local reverse DNS
@@ -1135,7 +1082,7 @@ mod tests {
let mut ctx = crate::testutil::test_ctx().await;
ctx.forwarding_rules = vec![ForwardingRule::new(
"168.192.in-addr.arpa".to_string(),
UpstreamPool::new(vec![Upstream::Udp(upstream_addr)], vec![]),
Upstream::Udp(upstream_addr),
)];
let ctx = Arc::new(ctx);
@@ -1231,201 +1178,6 @@ mod tests {
}
}
#[tokio::test]
async fn pipeline_filter_aaaa_returns_nodata() {
let mut ctx = crate::testutil::test_ctx().await;
ctx.filter_aaaa = true;
let ctx = Arc::new(ctx);
let (resp, path) = resolve_in_test(&ctx, "example.com", QueryType::AAAA).await;
assert_eq!(path, QueryPath::Local);
assert_eq!(resp.header.rescode, ResultCode::NOERROR);
assert!(resp.answers.is_empty(), "AAAA must be filtered to NODATA");
}
#[tokio::test]
async fn pipeline_filter_aaaa_leaves_a_queries_alone() {
let mut upstream_resp = DnsPacket::new();
upstream_resp.header.response = true;
upstream_resp.header.rescode = ResultCode::NOERROR;
upstream_resp.answers.push(DnsRecord::A {
domain: "example.com".to_string(),
addr: Ipv4Addr::new(93, 184, 216, 34),
ttl: 300,
});
let upstream_addr = crate::testutil::mock_upstream(upstream_resp).await;
let mut ctx = crate::testutil::test_ctx().await;
ctx.filter_aaaa = true;
ctx.upstream_pool
.lock()
.unwrap()
.set_primary(vec![Upstream::Udp(upstream_addr)]);
let ctx = Arc::new(ctx);
let (resp, path) = resolve_in_test(&ctx, "example.com", QueryType::A).await;
assert_eq!(path, QueryPath::Upstream);
assert_eq!(resp.answers.len(), 1);
}
#[tokio::test]
async fn pipeline_filter_aaaa_respects_override() {
let mut ctx = crate::testutil::test_ctx().await;
ctx.filter_aaaa = true;
ctx.overrides
.write()
.unwrap()
.insert("v6.test", "2001:db8::1", 60, None)
.unwrap();
let ctx = Arc::new(ctx);
let (resp, path) = resolve_in_test(&ctx, "v6.test", QueryType::AAAA).await;
assert_eq!(path, QueryPath::Overridden);
assert_eq!(resp.answers.len(), 1, "override must win over filter");
}
#[tokio::test]
async fn pipeline_filter_aaaa_strips_ipv6hint_from_https_and_svcb() {
let rdata = crate::svcb::build_rdata(
1,
&[],
&[
(1, vec![0x02, b'h', b'3']),
(
6,
vec![
0x26, 0x06, 0x47, 0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01,
],
),
],
);
let mut pkt = DnsPacket::new();
pkt.header.response = true;
pkt.header.rescode = ResultCode::NOERROR;
pkt.questions.push(crate::question::DnsQuestion {
name: "hints.test".to_string(),
qtype: QueryType::HTTPS,
});
pkt.answers.push(DnsRecord::UNKNOWN {
domain: "hints.test".to_string(),
qtype: 65,
data: rdata.clone(),
ttl: 300,
});
let mut svcb_pkt = pkt.clone();
svcb_pkt.questions[0].name = "svc.test".to_string();
svcb_pkt.questions[0].qtype = QueryType::SVCB;
if let DnsRecord::UNKNOWN { domain, qtype, .. } = &mut svcb_pkt.answers[0] {
*domain = "svc.test".to_string();
*qtype = 64;
}
let mut ctx = crate::testutil::test_ctx().await;
ctx.filter_aaaa = true;
ctx.cache
.write()
.unwrap()
.insert("hints.test", QueryType::HTTPS, &pkt);
ctx.cache
.write()
.unwrap()
.insert("svc.test", QueryType::SVCB, &svcb_pkt);
let ctx = Arc::new(ctx);
for (name, qtype, label) in [
("hints.test", QueryType::HTTPS, "HTTPS"),
("svc.test", QueryType::SVCB, "SVCB"),
] {
let (resp, path) = resolve_in_test(&ctx, name, qtype).await;
assert_eq!(path, QueryPath::Cached, "{label}");
assert_eq!(resp.answers.len(), 1, "{label}");
match &resp.answers[0] {
DnsRecord::UNKNOWN { data, .. } => {
assert!(
data.len() < rdata.len(),
"{label}: ipv6hint (20 bytes) must be removed"
);
// Bytes for key=6 must not appear at any 4-byte boundary in the
// params section — cheap structural check.
assert!(
!data.windows(4).any(|w| w == [0, 6, 0, 16]),
"{label}: ipv6hint TLV header must be absent"
);
}
other => panic!("{label}: expected UNKNOWN record, got {other:?}"),
}
}
}
#[tokio::test]
async fn pipeline_filter_aaaa_preserves_ipv6hint_for_dnssec_clients() {
// Regression guard for the DO-bit gate in resolve_query: modifying
// HTTPS rdata invalidates any accompanying RRSIG, so a DO=1 client
// must receive the record untouched even when filter_aaaa is on.
let rdata = crate::svcb::build_rdata(
1,
&[],
&[(
6,
vec![
0x26, 0x06, 0x47, 0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01,
],
)],
);
let mut pkt = DnsPacket::new();
pkt.header.response = true;
pkt.header.rescode = ResultCode::NOERROR;
pkt.questions.push(crate::question::DnsQuestion {
name: "hints.test".to_string(),
qtype: QueryType::HTTPS,
});
pkt.answers.push(DnsRecord::UNKNOWN {
domain: "hints.test".to_string(),
qtype: 65,
data: rdata.clone(),
ttl: 300,
});
let mut ctx = crate::testutil::test_ctx().await;
ctx.filter_aaaa = true;
ctx.cache
.write()
.unwrap()
.insert("hints.test", QueryType::HTTPS, &pkt);
let ctx = Arc::new(ctx);
// Build a query with EDNS DO bit set — can't use resolve_in_test
// because it constructs a plain query without EDNS.
let mut query = DnsPacket::query(0xBEEF, "hints.test", QueryType::HTTPS);
query.edns = Some(crate::packet::EdnsOpt {
do_bit: true,
..Default::default()
});
let mut buf = BytePacketBuffer::new();
query.write(&mut buf).unwrap();
let raw = &buf.buf[..buf.pos];
let src: SocketAddr = "127.0.0.1:1234".parse().unwrap();
let (resp_buf, _) = resolve_query(query, raw, src, &ctx, Transport::Udp)
.await
.unwrap();
let mut resp_parse_buf = BytePacketBuffer::from_bytes(resp_buf.filled());
let resp = DnsPacket::from_buffer(&mut resp_parse_buf).unwrap();
match &resp.answers[0] {
DnsRecord::UNKNOWN { data, .. } => {
assert_eq!(
data, &rdata,
"ipv6hint must be preserved for DO-bit clients"
);
}
other => panic!("expected UNKNOWN record, got {:?}", other),
}
}
#[tokio::test]
async fn pipeline_blocklist_sinkhole() {
let ctx = crate::testutil::test_ctx().await;
@@ -1485,7 +1237,7 @@ mod tests {
let mut ctx = crate::testutil::test_ctx().await;
ctx.forwarding_rules = vec![ForwardingRule::new(
"corp".to_string(),
UpstreamPool::new(vec![Upstream::Udp(upstream_addr)], vec![]),
Upstream::Udp(upstream_addr),
)];
let ctx = Arc::new(ctx);
@@ -1502,37 +1254,6 @@ mod tests {
}
}
#[tokio::test]
async fn pipeline_forwarding_fails_over_to_second_upstream() {
let dead = crate::testutil::blackhole_upstream();
let mut live_resp = DnsPacket::new();
live_resp.header.response = true;
live_resp.header.rescode = ResultCode::NOERROR;
live_resp.answers.push(DnsRecord::A {
domain: "internal.corp".to_string(),
addr: Ipv4Addr::new(10, 9, 9, 9),
ttl: 600,
});
let live = crate::testutil::mock_upstream(live_resp).await;
let mut ctx = crate::testutil::test_ctx().await;
ctx.forwarding_rules = vec![ForwardingRule::new(
"corp".to_string(),
UpstreamPool::new(vec![Upstream::Udp(dead), Upstream::Udp(live)], vec![]),
)];
let ctx = Arc::new(ctx);
let (resp, path) = resolve_in_test(&ctx, "internal.corp", QueryType::A).await;
assert_eq!(path, QueryPath::Forwarded);
assert_eq!(resp.header.rescode, ResultCode::NOERROR);
assert_eq!(resp.answers.len(), 1);
match &resp.answers[0] {
DnsRecord::A { addr, .. } => assert_eq!(*addr, Ipv4Addr::new(10, 9, 9, 9)),
other => panic!("expected A record, got {:?}", other),
}
}
#[tokio::test]
async fn pipeline_default_pool_reports_upstream_path() {
let mut upstream_resp = DnsPacket::new();

View File

@@ -1,16 +1,14 @@
use std::fmt;
use std::net::{IpAddr, SocketAddr};
use std::sync::{Arc, RwLock};
use std::sync::RwLock;
use std::time::{Duration, Instant};
use tokio::net::UdpSocket;
use tokio::time::timeout;
use crate::buffer::BytePacketBuffer;
use crate::odoh::{query_through_relay, OdohConfigCache};
use crate::packet::DnsPacket;
use crate::srtt::SrttCache;
use crate::stats::UpstreamTransport;
use crate::Result;
#[derive(Clone)]
@@ -25,36 +23,6 @@ pub enum Upstream {
tls_name: Option<String>,
connector: tokio_rustls::TlsConnector,
},
/// Oblivious DNS-over-HTTPS (RFC 9230). Queries are HPKE-sealed to the
/// target and forwarded through an independent relay. Target host lives
/// on `target_config` (single source of truth — the cache keys on it).
Odoh {
relay_url: String,
target_path: String,
client: reqwest::Client,
target_config: Arc<OdohConfigCache>,
},
}
impl Upstream {
/// IP address to key SRTT tracking on, if the upstream has a stable one.
/// `Doh` and `Odoh` route through a URL + connection pool, so there's no
/// single IP to track; SRTT is skipped for them.
pub fn tracked_ip(&self) -> Option<IpAddr> {
match self {
Upstream::Udp(addr) | Upstream::Dot { addr, .. } => Some(addr.ip()),
Upstream::Doh { .. } | Upstream::Odoh { .. } => None,
}
}
pub fn transport(&self) -> UpstreamTransport {
match self {
Upstream::Udp(_) => UpstreamTransport::Udp,
Upstream::Doh { .. } => UpstreamTransport::Doh,
Upstream::Dot { .. } => UpstreamTransport::Dot,
Upstream::Odoh { .. } => UpstreamTransport::Odoh,
}
}
}
impl PartialEq for Upstream {
@@ -63,20 +31,6 @@ impl PartialEq for Upstream {
(Self::Udp(a), Self::Udp(b)) => a == b,
(Self::Doh { url: a, .. }, Self::Doh { url: b, .. }) => a == b,
(Self::Dot { addr: a, .. }, Self::Dot { addr: b, .. }) => a == b,
(
Self::Odoh {
relay_url: ra,
target_path: pa,
target_config: ca,
..
},
Self::Odoh {
relay_url: rb,
target_path: pb,
target_config: cb,
..
},
) => ra == rb && pa == pb && ca.target_host() == cb.target_host(),
_ => false,
}
}
@@ -97,18 +51,6 @@ impl fmt::Display for Upstream {
Some(name) => write!(f, "tls://{}#{}", addr, name),
None => write!(f, "tls://{}", addr),
},
Upstream::Odoh {
relay_url,
target_path,
target_config,
..
} => write!(
f,
"odoh://{}{} via {}",
target_config.target_host(),
target_path,
relay_url
),
}
}
}
@@ -128,20 +70,22 @@ pub(crate) fn parse_upstream_addr(
Err(format!("invalid upstream address: {}", s))
}
/// Parse a slice of upstream address strings into `Upstream` values, failing
/// on the first invalid entry.
pub fn parse_upstream_list(addrs: &[String], default_port: u16) -> Result<Vec<Upstream>> {
addrs
.iter()
.map(|s| parse_upstream(s, default_port))
.collect()
}
pub fn parse_upstream(s: &str, default_port: u16) -> Result<Upstream> {
if s.starts_with("https://") {
let client = reqwest::Client::builder()
.use_rustls_tls()
.http2_initial_stream_window_size(65_535)
.http2_initial_connection_window_size(65_535)
.http2_keep_alive_interval(Duration::from_secs(15))
.http2_keep_alive_while_idle(true)
.http2_keep_alive_timeout(Duration::from_secs(10))
.pool_idle_timeout(Duration::from_secs(300))
.pool_max_idle_per_host(1)
.build()
.unwrap_or_default();
return Ok(Upstream::Doh {
url: s.to_string(),
client: build_https_client(),
client,
});
}
// tls://IP:PORT#hostname or tls://IP#hostname (default port 853)
@@ -162,50 +106,6 @@ pub fn parse_upstream(s: &str, default_port: u16) -> Result<Upstream> {
Ok(Upstream::Udp(addr))
}
/// HTTP/2 client tuned for DoH/ODoH: small windows for low latency, long-lived
/// keep-alive. Shared by the DoH upstream and the ODoH config-fetcher +
/// seal/open path. Pool defaults to one idle conn per host — good for
/// resolvers that talk to a single upstream; relays that fan out to many
/// targets should use [`build_https_client_with_pool`].
pub fn build_https_client() -> reqwest::Client {
build_https_client_with_pool(1)
}
/// Same shape as [`build_https_client`], but caller picks
/// `pool_max_idle_per_host`. Relay workloads hit many distinct target hosts
/// and benefit from a larger pool so warm connections survive concurrent
/// fan-out.
pub fn build_https_client_with_pool(pool_max_idle_per_host: usize) -> reqwest::Client {
https_client_builder(pool_max_idle_per_host)
.build()
.unwrap_or_default()
}
/// HTTPS client for the ODoH upstream, with bootstrap-IP overrides applied
/// so relay/target hostname resolution can bypass system DNS.
pub fn build_odoh_client(odoh: &crate::config::OdohUpstream) -> reqwest::Client {
let mut builder = https_client_builder(1);
if let Some(addr) = odoh.relay_bootstrap {
builder = builder.resolve(&odoh.relay_host, addr);
}
if let Some(addr) = odoh.target_bootstrap {
builder = builder.resolve(&odoh.target_host, addr);
}
builder.build().unwrap_or_default()
}
fn https_client_builder(pool_max_idle_per_host: usize) -> reqwest::ClientBuilder {
reqwest::Client::builder()
.use_rustls_tls()
.http2_initial_stream_window_size(65_535)
.http2_initial_connection_window_size(65_535)
.http2_keep_alive_interval(Duration::from_secs(15))
.http2_keep_alive_while_idle(true)
.http2_keep_alive_timeout(Duration::from_secs(10))
.pool_idle_timeout(Duration::from_secs(300))
.pool_max_idle_per_host(pool_max_idle_per_host)
}
fn build_dot_connector() -> Result<tokio_rustls::TlsConnector> {
let _ = rustls::crypto::ring::default_provider().install_default();
let mut root_store = rustls::RootCertStore::empty();
@@ -370,22 +270,6 @@ pub async fn forward_query_raw(
tls_name,
connector,
} => forward_dot_raw(wire, *addr, tls_name, connector, timeout_duration).await,
Upstream::Odoh {
relay_url,
target_path,
client,
target_config,
} => {
query_through_relay(
wire,
relay_url,
target_path,
client,
target_config,
timeout_duration,
)
.await
}
}
}
@@ -461,17 +345,18 @@ pub async fn forward_with_failover_raw(
timeout_duration: Duration,
hedge_delay: Duration,
) -> Result<Vec<u8>> {
let mut candidates: Vec<(usize, u64)> = {
let srtt_read = srtt.read().unwrap();
pool.primary
.iter()
.enumerate()
.map(|(i, u)| {
let rtt = u.tracked_ip().map(|ip| srtt_read.get(ip)).unwrap_or(0);
(i, rtt)
})
.collect()
};
let mut candidates: Vec<(usize, u64)> = pool
.primary
.iter()
.enumerate()
.map(|(i, u)| {
let rtt = match u {
Upstream::Udp(addr) => srtt.read().unwrap().get(addr.ip()),
_ => 0,
};
(i, rtt)
})
.collect();
candidates.sort_by_key(|&(_, rtt)| rtt);
let all_upstreams: Vec<&Upstream> = candidates
@@ -495,15 +380,15 @@ pub async fn forward_with_failover_raw(
};
match result {
Ok(resp) => {
if let Some(ip) = upstream.tracked_ip() {
if let Upstream::Udp(addr) = upstream {
let rtt_ms = start.elapsed().as_millis() as u64;
srtt.write().unwrap().record_rtt(ip, rtt_ms, false);
srtt.write().unwrap().record_rtt(addr.ip(), rtt_ms, false);
}
return Ok(resp);
}
Err(e) => {
if let Some(ip) = upstream.tracked_ip() {
srtt.write().unwrap().record_failure(ip);
if let Upstream::Udp(addr) = upstream {
srtt.write().unwrap().record_failure(addr.ip());
}
log::debug!("upstream {} failed: {}", upstream, e);
last_err = Some(e);
@@ -822,62 +707,4 @@ mod tests {
assert!(!pool.maybe_update_primary("not-an-ip", 53));
assert_eq!(pool.preferred().unwrap().to_string(), "1.2.3.4:53");
}
fn tcp_closed_port() -> SocketAddr {
// Bind a TCP listener, grab the port, drop → kernel returns RST on connect.
let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap();
let addr = listener.local_addr().unwrap();
drop(listener);
addr
}
#[tokio::test]
async fn udp_failure_records_in_srtt() {
let blackhole = crate::testutil::blackhole_upstream();
let pool = UpstreamPool::new(vec![Upstream::Udp(blackhole)], vec![]);
let srtt = RwLock::new(SrttCache::new(true));
let _ = forward_with_failover_raw(
&[0u8; 12],
&pool,
&srtt,
Duration::from_millis(100),
Duration::ZERO,
)
.await;
assert!(srtt.read().unwrap().is_known(blackhole.ip()));
}
#[tokio::test]
async fn dot_failure_records_in_srtt() {
let dead1 = tcp_closed_port();
let dead2 = tcp_closed_port();
let connector = build_dot_connector().unwrap();
let pool = UpstreamPool::new(
vec![
Upstream::Dot {
addr: dead1,
tls_name: Some("dns.quad9.net".to_string()),
connector: connector.clone(),
},
Upstream::Dot {
addr: dead2,
tls_name: Some("dns.quad9.net".to_string()),
connector,
},
],
vec![],
);
let srtt = RwLock::new(SrttCache::new(true));
let _ = forward_with_failover_raw(
&[0u8; 12],
&pool,
&srtt,
Duration::from_millis(500),
Duration::ZERO,
)
.await;
let cache = srtt.read().unwrap();
assert!(cache.is_known(dead1.ip()));
assert!(cache.is_known(dead2.ip()));
}
}

View File

@@ -13,7 +13,6 @@ pub mod health;
pub mod lan;
pub mod mobile_api;
pub mod mobileconfig;
pub mod odoh;
pub mod override_store;
pub mod packet;
pub mod proxy;
@@ -21,13 +20,11 @@ pub mod query_log;
pub mod question;
pub mod record;
pub mod recursive;
pub mod relay;
pub mod serve;
pub mod service_store;
pub mod setup_phone;
pub mod srtt;
pub mod stats;
pub mod svcb;
pub mod system_dns;
pub mod tls;
pub mod wire;

View File

@@ -31,16 +31,16 @@ fn main() -> numa::Result<()> {
match arg1.as_str() {
"install" => {
eprintln!("\x1b[1;38;2;192;98;58mNuma\x1b[0m — installing\n");
eprintln!("\x1b[1;38;5;166mNuma\x1b[0m — installing\n");
return install_service().map_err(|e| e.into());
}
"uninstall" => {
eprintln!("\x1b[1;38;2;192;98;58mNuma\x1b[0m — uninstalling\n");
eprintln!("\x1b[1;38;5;166mNuma\x1b[0m — uninstalling\n");
return uninstall_service().map_err(|e| e.into());
}
"service" => {
let sub = std::env::args().nth(2).unwrap_or_default();
eprintln!("\x1b[1;38;2;192;98;58mNuma\x1b[0m — service management\n");
eprintln!("\x1b[1;38;5;166mNuma\x1b[0m — service management\n");
return match sub.as_str() {
"start" => start_service().map_err(|e| e.into()),
"stop" => stop_service().map_err(|e| e.into()),
@@ -60,32 +60,6 @@ fn main() -> numa::Result<()> {
.block_on(numa::setup_phone::run())
.map_err(|e| e.into());
}
"relay" => {
let port: u16 = std::env::args()
.nth(2)
.as_deref()
.and_then(|s| s.parse().ok())
.unwrap_or(8443);
let bind: std::net::IpAddr = std::env::args()
.nth(3)
.as_deref()
.map(|s| {
s.parse().unwrap_or_else(|e| {
eprintln!("invalid bind address '{}': {}", s, e);
std::process::exit(1);
})
})
.unwrap_or(std::net::IpAddr::V4(std::net::Ipv4Addr::LOCALHOST));
let addr = std::net::SocketAddr::new(bind, port);
eprintln!(
"\x1b[1;38;2;192;98;58mNuma\x1b[0m — ODoH relay on {}\n",
addr
);
let runtime = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()?;
return runtime.block_on(numa::relay::run(addr));
}
"lan" => {
let sub = std::env::args().nth(2).unwrap_or_default();
let config_path = std::env::args()
@@ -117,8 +91,6 @@ fn main() -> numa::Result<()> {
eprintln!(" service status Check if the service is running");
eprintln!(" lan on Enable LAN service discovery (mDNS)");
eprintln!(" lan off Disable LAN service discovery");
eprintln!(" relay [PORT] [BIND]");
eprintln!(" Run as an ODoH relay (RFC 9230, default 127.0.0.1:8443)");
eprintln!(" setup-phone Generate a QR code to install Numa DoT on a phone");
eprintln!(" help Show this help");
eprintln!();
@@ -133,7 +105,7 @@ fn main() -> numa::Result<()> {
&& !arg1.ends_with(".toml")
{
eprintln!(
"\x1b[1;38;2;192;98;58mNuma\x1b[0m — unknown command: \x1b[1m{}\x1b[0m\n",
"\x1b[1;38;5;166mNuma\x1b[0m — unknown command: \x1b[1m{}\x1b[0m\n",
arg1
);
eprintln!("Run \x1b[1mnuma help\x1b[0m for a list of commands.");
@@ -213,7 +185,7 @@ fn print_lan_status(enabled: bool) {
let label = if enabled { "enabled" } else { "disabled" };
let color = if enabled { "32" } else { "33" };
eprintln!(
"\x1b[1;38;2;192;98;58mNuma\x1b[0m — LAN discovery \x1b[{}m{}\x1b[0m",
"\x1b[1;38;5;166mNuma\x1b[0m — LAN discovery \x1b[{}m{}\x1b[0m",
color, label
);
if enabled {

View File

@@ -1,489 +0,0 @@
//! ODoH target-config fetcher and TTL cache (RFC 9230 §6).
//!
//! ## Ciphersuite policy
//! `odoh-rs` deserialization rejects any config whose KEM/KDF/AEAD triple is
//! not the mandatory `(X25519, HKDF-SHA256, AES-128-GCM)` (see
//! `ObliviousDoHConfigContents::deserialize`). This is stricter than the
//! plan's "pick the mandatory suite if mixed": a response containing *any*
//! non-mandatory config fails parse entirely. Real-world targets publish a
//! single mandatory config, so this is fine in practice; revisit if a target
//! that matters starts mixing suites.
use std::sync::Arc;
use std::time::{Duration, Instant};
use arc_swap::ArcSwapOption;
use odoh_rs::{
ObliviousDoHConfigContents, ObliviousDoHConfigs, ObliviousDoHMessage,
ObliviousDoHMessagePlaintext,
};
use rand_core::{OsRng, TryRngCore};
use reqwest::header::HeaderMap;
use tokio::sync::Mutex;
use tokio::time::timeout;
use crate::Result;
/// MIME type used for both directions of the ODoH exchange (RFC 9230 §4).
pub(crate) const ODOH_CONTENT_TYPE: &str = "application/oblivious-dns-message";
/// Cap on the response body we read into memory when the relay returns
/// non-success. Protects against a hostile relay streaming a huge body on
/// the error path; keeps enough room to carry a human-readable reason.
const ERROR_BODY_PREVIEW_BYTES: usize = 1024;
/// Fallback TTL when the target's response lacks a usable `Cache-Control`
/// directive. RFC 9230 §6.2 places no hard floor; 24 h matches what Cloudflare
/// publishes in practice.
const DEFAULT_CONFIG_TTL: Duration = Duration::from_secs(24 * 60 * 60);
/// Cap on any TTL we'll honour, regardless of what the target advertises.
/// Keeps a misconfigured server from pinning an old key indefinitely.
const MAX_CONFIG_TTL: Duration = Duration::from_secs(7 * 24 * 60 * 60);
/// After a failed `/.well-known/odohconfigs` fetch, refuse to refetch again
/// within this window — a target that is genuinely broken would otherwise
/// receive one request per query. Queries that arrive during the backoff
/// return the cached error immediately.
const REFRESH_BACKOFF: Duration = Duration::from_secs(60);
/// Parsed ODoH target config plus the freshness metadata needed to age it out.
#[derive(Debug)]
pub struct OdohTargetConfig {
pub contents: ObliviousDoHConfigContents,
pub key_id: Vec<u8>,
expires_at: Instant,
}
impl OdohTargetConfig {
pub fn is_expired(&self) -> bool {
Instant::now() >= self.expires_at
}
}
struct FailedRefresh {
at: Instant,
err: String,
}
/// TTL-gated cache of a single target's HPKE config.
///
/// Reads go through `ArcSwapOption` (lock-free hot path). Refreshes serialize
/// on an async mutex so a burst of simultaneous misses produces a single
/// outbound fetch, and a failed refresh blocks subsequent refetches for
/// [`REFRESH_BACKOFF`] to prevent hot-looping against a broken target.
pub struct OdohConfigCache {
target_host: String,
configs_url: String,
client: reqwest::Client,
current: ArcSwapOption<OdohTargetConfig>,
last_failure: ArcSwapOption<FailedRefresh>,
refresh_lock: Mutex<()>,
}
impl OdohConfigCache {
pub fn new(target_host: String, client: reqwest::Client) -> Self {
let configs_url = format!("https://{}/.well-known/odohconfigs", target_host);
Self {
target_host,
configs_url,
client,
current: ArcSwapOption::from(None),
last_failure: ArcSwapOption::from(None),
refresh_lock: Mutex::new(()),
}
}
pub fn target_host(&self) -> &str {
&self.target_host
}
/// Return a valid config, refetching when the cache is cold or expired.
/// Within [`REFRESH_BACKOFF`] of a failed refresh, returns the cached
/// error without issuing another fetch.
pub async fn get(&self) -> Result<Arc<OdohTargetConfig>> {
if let Some(cfg) = self.current.load_full() {
if !cfg.is_expired() {
return Ok(cfg);
}
}
if let Some(err) = self.backoff_error() {
return Err(err);
}
let _guard = self.refresh_lock.lock().await;
// Another task may have refreshed or failed while we waited.
if let Some(cfg) = self.current.load_full() {
if !cfg.is_expired() {
return Ok(cfg);
}
}
if let Some(err) = self.backoff_error() {
return Err(err);
}
match fetch_odoh_config(&self.client, &self.configs_url).await {
Ok(fresh) => {
let fresh = Arc::new(fresh);
self.current.store(Some(fresh.clone()));
self.last_failure.store(None);
Ok(fresh)
}
Err(e) => {
let msg = format!("ODoH config fetch failed: {e}");
self.last_failure.store(Some(Arc::new(FailedRefresh {
at: Instant::now(),
err: msg.clone(),
})));
Err(msg.into())
}
}
}
/// Drop the cached config. Called after the target rejects ciphertext
/// (key rotation race) so the next `get()` refetches.
pub fn invalidate(&self) {
self.current.store(None);
}
fn backoff_error(&self) -> Option<crate::Error> {
let fail = self.last_failure.load_full()?;
if fail.at.elapsed() < REFRESH_BACKOFF {
Some(format!("{} (backoff active)", fail.err).into())
} else {
None
}
}
}
/// Fetch `/.well-known/odohconfigs` from `configs_url` and parse it into an
/// [`OdohTargetConfig`]. The TTL is taken from the response's
/// `Cache-Control: max-age=`, clamped to [`DEFAULT_CONFIG_TTL`,
/// [`MAX_CONFIG_TTL`]] when absent or obviously wrong.
pub async fn fetch_odoh_config(
client: &reqwest::Client,
configs_url: &str,
) -> Result<OdohTargetConfig> {
let resp = client.get(configs_url).send().await?.error_for_status()?;
let ttl = cache_control_ttl(resp.headers()).unwrap_or(DEFAULT_CONFIG_TTL);
let body = resp.bytes().await?;
parse_odoh_config(&body, ttl)
}
fn parse_odoh_config(body: &[u8], ttl: Duration) -> Result<OdohTargetConfig> {
let mut buf = body;
let configs: ObliviousDoHConfigs = odoh_rs::parse(&mut buf)
.map_err(|e| format!("failed to parse ObliviousDoHConfigs: {e}"))?;
let first = configs
.into_iter()
.next()
.ok_or("target published no ODoH configs with a supported version + ciphersuite")?;
let contents: ObliviousDoHConfigContents = first.into();
let key_id = contents
.identifier()
.map_err(|e| format!("failed to derive key_id from ODoH config: {e}"))?;
Ok(OdohTargetConfig {
contents,
key_id,
expires_at: Instant::now() + ttl.min(MAX_CONFIG_TTL),
})
}
/// Send a DNS wire query through an ODoH relay to a target and return the
/// plaintext DNS wire response.
///
/// Flow: fetch the target's HPKE config (cached), seal the query, POST to the
/// relay with `Targethost`/`Targetpath` headers, then unseal the response.
/// On seal/unseal failure we invalidate the cache and retry once — this
/// handles the benign race where the target rotated its key between our
/// cached config and the POST.
pub async fn query_through_relay(
wire: &[u8],
relay_url: &str,
target_path: &str,
client: &reqwest::Client,
cache: &OdohConfigCache,
timeout_duration: Duration,
) -> Result<Vec<u8>> {
let req = OdohRequest {
wire,
relay_url,
target_path,
client,
cache,
timeout: timeout_duration,
};
match attempt_query(&req).await {
Ok(v) => Ok(v),
Err(AttemptError::KeyRotation(_)) => {
cache.invalidate();
attempt_query(&req).await.map_err(AttemptError::into_error)
}
Err(e) => Err(e.into_error()),
}
}
struct OdohRequest<'a> {
wire: &'a [u8],
relay_url: &'a str,
target_path: &'a str,
client: &'a reqwest::Client,
cache: &'a OdohConfigCache,
timeout: Duration,
}
/// Classification used only by the retry path in [`query_through_relay`].
enum AttemptError {
/// Target signalled the config we used is stale (key rotation race).
/// Callers should invalidate the cache and retry exactly once.
KeyRotation(String),
/// Any other failure — transport, timeout, malformed response.
Other(crate::Error),
}
impl AttemptError {
fn into_error(self) -> crate::Error {
match self {
AttemptError::KeyRotation(m) => format!("ODoH key rotation race: {m}").into(),
AttemptError::Other(e) => e,
}
}
}
async fn attempt_query(req: &OdohRequest<'_>) -> std::result::Result<Vec<u8>, AttemptError> {
let cfg = req.cache.get().await.map_err(AttemptError::Other)?;
let plaintext = ObliviousDoHMessagePlaintext::new(req.wire, 0);
// rand_core 0.9's OsRng is fallible-only; wrap for the infallible bound.
let mut os = OsRng;
let mut rng = os.unwrap_mut();
let (encrypted_query, client_secret) =
odoh_rs::encrypt_query(&plaintext, &cfg.contents, &mut rng)
.map_err(|e| AttemptError::Other(format!("ODoH encrypt failed: {e}").into()))?;
let body = odoh_rs::compose(&encrypted_query)
.map_err(|e| AttemptError::Other(format!("ODoH compose failed: {e}").into()))?
.freeze();
// RFC 9230 §5 and the reference client use URL query parameters, not
// HTTP headers, to carry the target routing. `Targethost`/`Targetpath`
// headers cause relays to treat the request as an unspecified-target and
// reject it.
let (status, resp_body) = timeout(req.timeout, async {
let resp = req
.client
.post(req.relay_url)
.header(reqwest::header::CONTENT_TYPE, ODOH_CONTENT_TYPE)
.header(reqwest::header::ACCEPT, ODOH_CONTENT_TYPE)
.header(reqwest::header::CACHE_CONTROL, "no-cache, no-store")
.query(&[
("targethost", req.cache.target_host()),
("targetpath", req.target_path),
])
.body(body)
.send()
.await?;
let status = resp.status();
let body = resp.bytes().await?;
Ok::<_, reqwest::Error>((status, body))
})
.await
.map_err(|_| AttemptError::Other("ODoH relay request timed out".into()))?
.map_err(|e| AttemptError::Other(format!("ODoH relay request failed: {e}").into()))?;
// RFC 9230 §4.3 expects a target that can't decrypt to reply with a DNS
// error in a sealed 200 response; a 401 from the relay/target is the
// practical signal that our cached HPKE key is stale. Treat 400 as a
// client-side bug (malformed ODoH envelope) — retrying would loop-fail.
if !status.is_success() {
let preview_len = resp_body.len().min(ERROR_BODY_PREVIEW_BYTES);
let body_preview = String::from_utf8_lossy(&resp_body[..preview_len]);
let msg = format!("ODoH relay returned {status}: {}", body_preview.trim());
return Err(if status.as_u16() == 401 {
AttemptError::KeyRotation(msg)
} else {
AttemptError::Other(msg.into())
});
}
let mut buf = resp_body;
let encrypted_response: ObliviousDoHMessage = odoh_rs::parse(&mut buf)
.map_err(|e| AttemptError::Other(format!("ODoH response parse failed: {e}").into()))?;
let plaintext_response =
odoh_rs::decrypt_response(&plaintext, &encrypted_response, client_secret)
.map_err(|e| AttemptError::KeyRotation(format!("ODoH decrypt failed: {e}")))?;
Ok(plaintext_response.into_msg().to_vec())
}
fn cache_control_ttl(headers: &HeaderMap) -> Option<Duration> {
let cc = headers.get(reqwest::header::CACHE_CONTROL)?.to_str().ok()?;
for directive in cc.split(',') {
let directive = directive.trim();
if let Some(rest) = directive.strip_prefix("max-age=") {
if let Ok(secs) = rest.trim().parse::<u64>() {
if secs > 0 {
return Some(Duration::from_secs(secs));
}
}
}
}
None
}
#[cfg(test)]
mod tests {
use super::*;
use odoh_rs::{ObliviousDoHConfig, ObliviousDoHKeyPair};
// RFC 9180 HPKE IDs for the sole ODoH mandatory suite:
// KEM = X25519, KDF = HKDF-SHA256, AEAD = AES-128-GCM.
const KEM_X25519: u16 = 0x0020;
const KDF_SHA256: u16 = 0x0001;
const AEAD_AES128GCM: u16 = 0x0001;
fn synth_configs_bytes() -> Vec<u8> {
let kp = ObliviousDoHKeyPair::from_parameters(
KEM_X25519,
KDF_SHA256,
AEAD_AES128GCM,
&[0u8; 32],
);
let pk = kp.public().clone();
let configs: ObliviousDoHConfigs = vec![ObliviousDoHConfig::from(pk)].into();
odoh_rs::compose(&configs).unwrap().to_vec()
}
#[test]
fn parse_accepts_well_formed_config() {
let bytes = synth_configs_bytes();
let cfg = parse_odoh_config(&bytes, Duration::from_secs(3600)).unwrap();
assert!(!cfg.key_id.is_empty());
assert!(!cfg.is_expired());
}
#[test]
fn parse_rejects_garbage() {
let bytes = [0xffu8; 16];
assert!(parse_odoh_config(&bytes, Duration::from_secs(3600)).is_err());
}
#[test]
fn parse_rejects_empty() {
assert!(parse_odoh_config(&[], Duration::from_secs(3600)).is_err());
}
#[test]
fn ttl_capped_at_max() {
let bytes = synth_configs_bytes();
let cfg = parse_odoh_config(&bytes, Duration::from_secs(100 * 24 * 60 * 60)).unwrap();
let remaining = cfg.expires_at.saturating_duration_since(Instant::now());
assert!(remaining <= MAX_CONFIG_TTL);
assert!(remaining >= MAX_CONFIG_TTL - Duration::from_secs(1));
}
#[test]
fn cache_control_parses_max_age() {
let mut h = HeaderMap::new();
h.insert("cache-control", "public, max-age=86400".parse().unwrap());
assert_eq!(cache_control_ttl(&h), Some(Duration::from_secs(86400)));
}
#[test]
fn cache_control_ignores_max_age_zero() {
let mut h = HeaderMap::new();
h.insert("cache-control", "max-age=0, no-store".parse().unwrap());
assert_eq!(cache_control_ttl(&h), None);
}
#[test]
fn cache_control_missing_falls_back() {
let h = HeaderMap::new();
assert_eq!(cache_control_ttl(&h), None);
}
#[test]
fn is_expired_tracks_ttl() {
let bytes = synth_configs_bytes();
let mut cfg = parse_odoh_config(&bytes, Duration::from_secs(3600)).unwrap();
assert!(!cfg.is_expired());
cfg.expires_at = Instant::now() - Duration::from_secs(1);
assert!(cfg.is_expired());
}
#[tokio::test]
async fn cache_backoff_blocks_refetch_after_failure() {
// Point the cache at a host that does not exist so the fetch fails
// deterministically; this exercises the backoff wiring without a
// network round-trip succeeding.
let cache = OdohConfigCache::new(
"odoh-target.invalid".to_string(),
reqwest::Client::builder()
.timeout(Duration::from_millis(200))
.build()
.unwrap(),
);
let first = cache.get().await;
assert!(first.is_err(), "first fetch must fail against invalid host");
// Within the backoff window, the cached error is returned immediately.
let second = cache.get().await.unwrap_err().to_string();
assert!(
second.contains("backoff active"),
"expected backoff hint, got: {second}"
);
// Reaching past the backoff window allows a fresh attempt — simulate
// by rewinding the recorded failure timestamp.
cache.last_failure.store(Some(Arc::new(FailedRefresh {
at: Instant::now() - (REFRESH_BACKOFF + Duration::from_secs(1)),
err: "prior".to_string(),
})));
let third = cache.get().await.unwrap_err().to_string();
assert!(
!third.contains("backoff active"),
"expected fresh fetch attempt, got: {third}"
);
}
/// Round-trip the HPKE seal/unseal path in isolation from HTTP, using the
/// odoh-rs primitives that `query_through_relay` wires together. Guards
/// against silently breaking the crypto glue if we refactor that path.
#[test]
fn seal_unseal_round_trip() {
use odoh_rs::{decrypt_query, encrypt_response, ResponseNonce};
let kp = ObliviousDoHKeyPair::from_parameters(
KEM_X25519,
KDF_SHA256,
AEAD_AES128GCM,
&[0u8; 32],
);
let query_wire = b"\x12\x34\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\x01\x00\x01";
let query_pt = ObliviousDoHMessagePlaintext::new(query_wire, 0);
let mut os = OsRng;
let mut rng = os.unwrap_mut();
let (query_enc, client_secret) =
odoh_rs::encrypt_query(&query_pt, kp.public(), &mut rng).unwrap();
let (query_back, server_secret) = decrypt_query(&query_enc, &kp).unwrap();
assert_eq!(query_back.into_msg().as_ref(), query_wire);
let response_wire = b"\x12\x34\x81\x80\x00\x01\x00\x01\x00\x00\x00\x00";
let response_pt = ObliviousDoHMessagePlaintext::new(response_wire, 0);
let response_enc = encrypt_response(
&query_pt,
&response_pt,
server_secret,
ResponseNonce::default(),
)
.unwrap();
let response_back =
odoh_rs::decrypt_response(&query_pt, &response_enc, client_secret).unwrap();
assert_eq!(response_back.into_msg().as_ref(), response_wire);
}
}

View File

@@ -85,14 +85,6 @@ impl DnsPacket {
+ self.edns.as_ref().map_or(0, |e| e.options.capacity())
}
/// Apply `f` to every record in the three RR sections (answers,
/// authorities, resources). Does not touch questions or edns.
pub fn for_each_record_mut(&mut self, mut f: impl FnMut(&mut DnsRecord)) {
self.answers.iter_mut().for_each(&mut f);
self.authorities.iter_mut().for_each(&mut f);
self.resources.iter_mut().for_each(&mut f);
}
pub fn response_from(query: &DnsPacket, rescode: crate::header::ResultCode) -> DnsPacket {
let mut resp = DnsPacket::new();
resp.header.id = query.header.id;

View File

@@ -1,66 +1,114 @@
use crate::buffer::BytePacketBuffer;
use crate::Result;
macro_rules! define_qtypes {
( $( $variant:ident = $num:literal, $str:literal ),* $(,)? ) => {
#[derive(PartialEq, Eq, Debug, Clone, Hash, Copy)]
pub enum QueryType {
UNKNOWN(u16),
$( $variant, )*
}
impl QueryType {
pub fn to_num(&self) -> u16 {
match *self {
QueryType::UNKNOWN(x) => x,
$( QueryType::$variant => $num, )*
}
}
pub fn from_num(num: u16) -> QueryType {
match num {
$( $num => QueryType::$variant, )*
_ => QueryType::UNKNOWN(num),
}
}
pub fn as_str(&self) -> &'static str {
match self {
QueryType::UNKNOWN(_) => "UNKNOWN",
$( QueryType::$variant => $str, )*
}
}
pub fn parse_str(s: &str) -> Option<QueryType> {
match s.to_ascii_uppercase().as_str() {
$( $str => Some(QueryType::$variant), )*
_ => None,
}
}
}
};
#[derive(PartialEq, Eq, Debug, Clone, Hash, Copy)]
pub enum QueryType {
UNKNOWN(u16),
A, // 1
NS, // 2
CNAME, // 5
SOA, // 6
PTR, // 12
MX, // 15
TXT, // 16
AAAA, // 28
SRV, // 33
DS, // 43
RRSIG, // 46
NSEC, // 47
DNSKEY, // 48
NSEC3, // 50
OPT, // 41 (EDNS0 pseudo-type)
HTTPS, // 65
}
define_qtypes! {
A = 1, "A",
NS = 2, "NS",
CNAME = 5, "CNAME",
SOA = 6, "SOA",
PTR = 12, "PTR",
MX = 15, "MX",
TXT = 16, "TXT",
AAAA = 28, "AAAA",
LOC = 29, "LOC",
SRV = 33, "SRV",
NAPTR = 35, "NAPTR",
OPT = 41, "OPT",
DS = 43, "DS",
RRSIG = 46, "RRSIG",
NSEC = 47, "NSEC",
DNSKEY = 48, "DNSKEY",
NSEC3 = 50, "NSEC3",
SVCB = 64, "SVCB",
HTTPS = 65, "HTTPS",
impl QueryType {
pub fn to_num(&self) -> u16 {
match *self {
QueryType::UNKNOWN(x) => x,
QueryType::A => 1,
QueryType::NS => 2,
QueryType::CNAME => 5,
QueryType::SOA => 6,
QueryType::PTR => 12,
QueryType::MX => 15,
QueryType::TXT => 16,
QueryType::AAAA => 28,
QueryType::SRV => 33,
QueryType::OPT => 41,
QueryType::DS => 43,
QueryType::RRSIG => 46,
QueryType::NSEC => 47,
QueryType::DNSKEY => 48,
QueryType::NSEC3 => 50,
QueryType::HTTPS => 65,
}
}
pub fn from_num(num: u16) -> QueryType {
match num {
1 => QueryType::A,
2 => QueryType::NS,
5 => QueryType::CNAME,
6 => QueryType::SOA,
12 => QueryType::PTR,
15 => QueryType::MX,
16 => QueryType::TXT,
28 => QueryType::AAAA,
33 => QueryType::SRV,
41 => QueryType::OPT,
43 => QueryType::DS,
46 => QueryType::RRSIG,
47 => QueryType::NSEC,
48 => QueryType::DNSKEY,
50 => QueryType::NSEC3,
65 => QueryType::HTTPS,
_ => QueryType::UNKNOWN(num),
}
}
pub fn as_str(&self) -> &'static str {
match self {
QueryType::A => "A",
QueryType::NS => "NS",
QueryType::CNAME => "CNAME",
QueryType::SOA => "SOA",
QueryType::PTR => "PTR",
QueryType::MX => "MX",
QueryType::TXT => "TXT",
QueryType::AAAA => "AAAA",
QueryType::SRV => "SRV",
QueryType::OPT => "OPT",
QueryType::DS => "DS",
QueryType::RRSIG => "RRSIG",
QueryType::NSEC => "NSEC",
QueryType::DNSKEY => "DNSKEY",
QueryType::NSEC3 => "NSEC3",
QueryType::HTTPS => "HTTPS",
QueryType::UNKNOWN(_) => "UNKNOWN",
}
}
pub fn parse_str(s: &str) -> Option<QueryType> {
match s.to_ascii_uppercase().as_str() {
"A" => Some(QueryType::A),
"NS" => Some(QueryType::NS),
"CNAME" => Some(QueryType::CNAME),
"SOA" => Some(QueryType::SOA),
"PTR" => Some(QueryType::PTR),
"MX" => Some(QueryType::MX),
"TXT" => Some(QueryType::TXT),
"AAAA" => Some(QueryType::AAAA),
"SRV" => Some(QueryType::SRV),
"DS" => Some(QueryType::DS),
"RRSIG" => Some(QueryType::RRSIG),
"DNSKEY" => Some(QueryType::DNSKEY),
"NSEC" => Some(QueryType::NSEC),
"NSEC3" => Some(QueryType::NSEC3),
"HTTPS" => Some(QueryType::HTTPS),
_ => None,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]

View File

@@ -1,342 +0,0 @@
//! ODoH relay (RFC 9230 §5) — the forward-without-reading half of the
//! protocol. Runs `numa relay`; skips all resolver initialisation (no port
//! 53, no cache, no recursion, no dashboard). The relay never reads the
//! HPKE-sealed payload and keeps no per-request logs — only aggregate
//! counters.
use std::net::SocketAddr;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
use std::time::Duration;
use axum::body::Bytes;
use axum::extract::{DefaultBodyLimit, Query, State};
use axum::http::{header, StatusCode};
use axum::response::{IntoResponse, Response};
use axum::routing::{get, post};
use axum::Router;
use log::{error, info};
use serde::Deserialize;
use tokio::net::TcpListener;
use crate::forward::build_https_client_with_pool;
use crate::odoh::ODOH_CONTENT_TYPE;
use crate::Result;
/// Cap on the opaque body we accept from a client. ODoH envelopes are
/// ~100300 bytes in practice; anything larger is malformed or hostile.
const MAX_BODY_BYTES: usize = 4 * 1024;
/// Cap on the body we read back from the target before streaming to client.
/// Slightly larger: target responses carry DNS answers plus HPKE overhead.
const MAX_TARGET_RESPONSE_BYTES: usize = 8 * 1024;
/// Covers the whole client-to-target round trip — not just `.send()` — so a
/// slow-drip target can't hang a worker indefinitely after headers arrive.
const TARGET_REQUEST_TIMEOUT: Duration = Duration::from_secs(5);
/// The relay hits many distinct target hosts on behalf of clients. A
/// per-host idle pool of 4 keeps warm TLS connections available for concurrent
/// fan-out without blowing up memory on a small VPS.
const RELAY_POOL_PER_HOST: usize = 4;
#[derive(Deserialize)]
struct RelayParams {
targethost: String,
targetpath: String,
}
struct RelayState {
client: reqwest::Client,
total_requests: AtomicU64,
forwarded_ok: AtomicU64,
forwarded_err: AtomicU64,
rejected_bad_request: AtomicU64,
}
impl RelayState {
fn new() -> Arc<Self> {
Arc::new(RelayState {
client: build_https_client_with_pool(RELAY_POOL_PER_HOST),
total_requests: AtomicU64::new(0),
forwarded_ok: AtomicU64::new(0),
forwarded_err: AtomicU64::new(0),
rejected_bad_request: AtomicU64::new(0),
})
}
}
/// `DefaultBodyLimit` overrides axum's 2 MiB default so hostile clients
/// can't force the relay to buffer multi-MB bodies before our own cap.
fn build_app(state: Arc<RelayState>) -> Router {
Router::new()
.route("/relay", post(handle_relay))
.layer(DefaultBodyLimit::max(MAX_BODY_BYTES))
.route("/health", get(handle_health))
.with_state(state)
}
pub async fn run(addr: SocketAddr) -> Result<()> {
let app = build_app(RelayState::new());
let listener = TcpListener::bind(addr).await?;
info!("ODoH relay listening on {}", addr);
axum::serve(listener, app).await?;
Ok(())
}
async fn handle_health(State(state): State<Arc<RelayState>>) -> impl IntoResponse {
let body = format!(
"ok\ntotal {}\nforwarded_ok {}\nforwarded_err {}\nrejected_bad_request {}\n",
state.total_requests.load(Ordering::Relaxed),
state.forwarded_ok.load(Ordering::Relaxed),
state.forwarded_err.load(Ordering::Relaxed),
state.rejected_bad_request.load(Ordering::Relaxed),
);
(
StatusCode::OK,
[(header::CONTENT_TYPE, "text/plain; charset=utf-8")],
body,
)
}
async fn handle_relay(
State(state): State<Arc<RelayState>>,
Query(params): Query<RelayParams>,
headers: axum::http::HeaderMap,
body: Bytes,
) -> Response {
state.total_requests.fetch_add(1, Ordering::Relaxed);
if !content_type_matches(&headers, ODOH_CONTENT_TYPE) {
state.rejected_bad_request.fetch_add(1, Ordering::Relaxed);
return (
StatusCode::UNSUPPORTED_MEDIA_TYPE,
"expected application/oblivious-dns-message",
)
.into_response();
}
if body.len() > MAX_BODY_BYTES {
state.rejected_bad_request.fetch_add(1, Ordering::Relaxed);
return (StatusCode::PAYLOAD_TOO_LARGE, "body exceeds 4 KiB cap").into_response();
}
if !is_valid_hostname(&params.targethost) || !params.targetpath.starts_with('/') {
state.rejected_bad_request.fetch_add(1, Ordering::Relaxed);
return (StatusCode::BAD_REQUEST, "invalid targethost or targetpath").into_response();
}
let target_url = format!("https://{}{}", params.targethost, params.targetpath);
match forward_to_target(&state.client, &target_url, body).await {
Ok((status, resp_body)) => {
state.forwarded_ok.fetch_add(1, Ordering::Relaxed);
(
status,
[(header::CONTENT_TYPE, ODOH_CONTENT_TYPE)],
resp_body,
)
.into_response()
}
Err(e) => {
// Log the underlying reason for operators; don't leak reqwest
// internals (which can reveal the target's TLS config, IP, etc.)
// back to arbitrary clients.
error!("relay forward to {} failed: {}", target_url, e);
state.forwarded_err.fetch_add(1, Ordering::Relaxed);
(StatusCode::BAD_GATEWAY, "target unreachable").into_response()
}
}
}
async fn forward_to_target(
client: &reqwest::Client,
url: &str,
body: Bytes,
) -> Result<(StatusCode, Bytes)> {
let response = tokio::time::timeout(TARGET_REQUEST_TIMEOUT, async {
let resp = client
.post(url)
.header(header::CONTENT_TYPE, ODOH_CONTENT_TYPE)
.header(header::ACCEPT, ODOH_CONTENT_TYPE)
.body(body)
.send()
.await?;
let status = StatusCode::from_u16(resp.status().as_u16())?;
let resp_body = resp.bytes().await?;
Ok::<_, crate::Error>((status, resp_body))
})
.await
.map_err(|_| "timed out talking to target")??;
if response.1.len() > MAX_TARGET_RESPONSE_BYTES {
return Err("target response exceeds cap".into());
}
Ok(response)
}
fn content_type_matches(headers: &axum::http::HeaderMap, expected: &str) -> bool {
headers
.get(header::CONTENT_TYPE)
.and_then(|v| v.to_str().ok())
.map(|ct| ct.split(';').next().unwrap_or("").trim() == expected)
.unwrap_or(false)
}
/// Strict DNS-hostname validator, aimed at closing the SSRF surface a naive
/// `contains('.')` check leaves open (e.g. `example.com@internal.host`,
/// `evil.com/../admin`). Requires ASCII letters/digits/dot/dash, at least
/// one dot, no leading dot or dash, length ≤ 253 per RFC 1035.
fn is_valid_hostname(h: &str) -> bool {
if h.is_empty() || h.len() > 253 || !h.contains('.') {
return false;
}
if h.starts_with('.') || h.starts_with('-') || h.ends_with('.') || h.ends_with('-') {
return false;
}
h.chars()
.all(|c| c.is_ascii_alphanumeric() || c == '.' || c == '-')
}
#[cfg(test)]
mod tests {
use super::*;
async fn spawn_relay() -> (SocketAddr, Arc<RelayState>) {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let addr = listener.local_addr().unwrap();
let state = RelayState::new();
let app = build_app(state.clone());
tokio::spawn(async move {
let _ = axum::serve(listener, app).await;
});
(addr, state)
}
#[tokio::test]
async fn rejects_missing_content_type() {
let (addr, state) = spawn_relay().await;
let client = reqwest::Client::new();
let resp = client
.post(format!(
"http://{}/relay?targethost=odoh.example.com&targetpath=/dns-query",
addr
))
.body("body")
.send()
.await
.unwrap();
assert_eq!(resp.status(), reqwest::StatusCode::UNSUPPORTED_MEDIA_TYPE);
assert_eq!(state.rejected_bad_request.load(Ordering::Relaxed), 1);
}
#[tokio::test]
async fn rejects_oversized_body() {
let (addr, _state) = spawn_relay().await;
let big = vec![0u8; MAX_BODY_BYTES + 1];
let client = reqwest::Client::new();
let resp = client
.post(format!(
"http://{}/relay?targethost=odoh.example.com&targetpath=/dns-query",
addr
))
.header(header::CONTENT_TYPE, ODOH_CONTENT_TYPE)
.body(big)
.send()
.await
.unwrap();
// axum's DefaultBodyLimit rejects before our handler runs, so the
// counter doesn't increment — but the status code proves the layer
// enforced the cap. Either status is acceptable evidence.
assert!(matches!(
resp.status(),
reqwest::StatusCode::PAYLOAD_TOO_LARGE | reqwest::StatusCode::BAD_REQUEST
));
}
#[tokio::test]
async fn rejects_targethost_without_dot() {
let (addr, state) = spawn_relay().await;
let client = reqwest::Client::new();
let resp = client
.post(format!(
"http://{}/relay?targethost=localhost&targetpath=/dns-query",
addr
))
.header(header::CONTENT_TYPE, ODOH_CONTENT_TYPE)
.body("body")
.send()
.await
.unwrap();
assert_eq!(resp.status(), reqwest::StatusCode::BAD_REQUEST);
assert_eq!(state.rejected_bad_request.load(Ordering::Relaxed), 1);
}
#[tokio::test]
async fn rejects_userinfo_ssrf_attempt() {
let (addr, state) = spawn_relay().await;
let client = reqwest::Client::new();
// The naive contains('.') check would let this through and reqwest
// would route to `internal.host` using `evil.com` as userinfo.
let resp = client
.post(format!(
"http://{}/relay?targethost=evil.com@internal.host&targetpath=/dns-query",
addr
))
.header(header::CONTENT_TYPE, ODOH_CONTENT_TYPE)
.body("body")
.send()
.await
.unwrap();
assert_eq!(resp.status(), reqwest::StatusCode::BAD_REQUEST);
assert_eq!(state.rejected_bad_request.load(Ordering::Relaxed), 1);
}
#[tokio::test]
async fn rejects_targetpath_without_leading_slash() {
let (addr, state) = spawn_relay().await;
let client = reqwest::Client::new();
let resp = client
.post(format!(
"http://{}/relay?targethost=odoh.example.com&targetpath=dns-query",
addr
))
.header(header::CONTENT_TYPE, ODOH_CONTENT_TYPE)
.body("body")
.send()
.await
.unwrap();
assert_eq!(resp.status(), reqwest::StatusCode::BAD_REQUEST);
assert_eq!(state.rejected_bad_request.load(Ordering::Relaxed), 1);
}
#[tokio::test]
async fn health_endpoint_reports_counters() {
let (addr, _state) = spawn_relay().await;
let client = reqwest::Client::new();
let resp = client
.get(format!("http://{}/health", addr))
.send()
.await
.unwrap();
assert_eq!(resp.status(), reqwest::StatusCode::OK);
let body = resp.text().await.unwrap();
assert!(body.contains("ok\n"));
assert!(body.contains("forwarded_ok 0"));
}
#[test]
fn hostname_validator_accepts_and_rejects() {
assert!(is_valid_hostname("odoh.cloudflare-dns.com"));
assert!(is_valid_hostname("a.b"));
assert!(!is_valid_hostname(""));
assert!(!is_valid_hostname("localhost"));
assert!(!is_valid_hostname(".leading.dot"));
assert!(!is_valid_hostname("trailing.dot."));
assert!(!is_valid_hostname("-leading.dash"));
assert!(!is_valid_hostname("evil.com@internal.host"));
assert!(!is_valid_hostname("evil.com/../admin"));
assert!(!is_valid_hostname(&"a".repeat(254)));
}
}

View File

@@ -17,10 +17,7 @@ use crate::buffer::BytePacketBuffer;
use crate::cache::DnsCache;
use crate::config::{build_zone_map, load_config, ConfigLoad};
use crate::ctx::{handle_query, ServerCtx};
use crate::forward::{
build_https_client, build_odoh_client, parse_upstream_list, Upstream, UpstreamPool,
};
use crate::odoh::OdohConfigCache;
use crate::forward::{parse_upstream, Upstream, UpstreamPool};
use crate::override_store::OverrideStore;
use crate::query_log::QueryLog;
use crate::service_store::ServiceStore;
@@ -57,7 +54,10 @@ pub async fn run(config_path: String) -> crate::Result<()> {
(crate::config::UpstreamMode::Recursive, false, pool, label)
} else {
log::warn!("recursive probe failed — falling back to Quad9 DoH");
let client = build_https_client();
let client = reqwest::Client::builder()
.use_rustls_tls()
.build()
.unwrap_or_default();
let url = DOH_FALLBACK.to_string();
let label = url.clone();
let pool = UpstreamPool::new(vec![Upstream::Doh { url, client }], vec![]);
@@ -82,8 +82,16 @@ pub async fn run(config_path: String) -> crate::Result<()> {
config.upstream.address.clone()
};
let primary = parse_upstream_list(&addrs, config.upstream.port)?;
let fallback = parse_upstream_list(&config.upstream.fallback, config.upstream.port)?;
let primary: Vec<Upstream> = addrs
.iter()
.map(|s| parse_upstream(s, config.upstream.port))
.collect::<crate::Result<Vec<_>>>()?;
let fallback: Vec<Upstream> = config
.upstream
.fallback
.iter()
.map(|s| parse_upstream(s, config.upstream.port))
.collect::<crate::Result<Vec<_>>>()?;
let pool = UpstreamPool::new(primary, fallback);
let label = pool.label();
@@ -94,28 +102,6 @@ pub async fn run(config_path: String) -> crate::Result<()> {
label,
)
}
crate::config::UpstreamMode::Odoh => {
let odoh = config.upstream.odoh_upstream()?;
let client = build_odoh_client(&odoh);
let target_config = Arc::new(OdohConfigCache::new(
odoh.target_host.clone(),
client.clone(),
));
let primary = vec![Upstream::Odoh {
relay_url: odoh.relay_url,
target_path: odoh.target_path,
client,
target_config,
}];
let fallback = if odoh.strict {
Vec::new()
} else {
parse_upstream_list(&config.upstream.fallback, config.upstream.port)?
};
let pool = UpstreamPool::new(primary, fallback);
let label = pool.label();
(crate::config::UpstreamMode::Odoh, false, pool, label)
}
};
let api_port = config.server.api_port;
@@ -137,11 +123,7 @@ pub async fn run(config_path: String) -> crate::Result<()> {
for fwd in &config.forwarding {
for suffix in &fwd.suffix {
info!(
"forwarding .{} to {} (config rule)",
suffix,
fwd.upstream.join(", ")
);
info!("forwarding .{} to {} (config rule)", suffix, fwd.upstream);
}
}
let forwarding_rules =
@@ -227,7 +209,7 @@ pub async fn run(config_path: String) -> crate::Result<()> {
upstream_port: config.upstream.port,
lan_ip: Mutex::new(crate::lan::detect_lan_ip().unwrap_or(std::net::Ipv4Addr::LOCALHOST)),
timeout: Duration::from_millis(config.upstream.timeout_ms),
hedge_delay: resolved_mode.hedge_delay(config.upstream.hedge_ms),
hedge_delay: Duration::from_millis(config.upstream.hedge_ms),
proxy_tld_suffix: if config.proxy.tld.is_empty() {
String::new()
} else {
@@ -250,7 +232,6 @@ pub async fn run(config_path: String) -> crate::Result<()> {
ca_pem,
mobile_enabled: config.mobile.enabled,
mobile_port: config.mobile.port,
filter_aaaa: config.server.filter_aaaa,
});
let zone_count: usize = ctx.zone_map.values().map(|m| m.len()).sum();
@@ -294,12 +275,12 @@ pub async fn run(config_path: String) -> crate::Result<()> {
.unwrap_or(30);
let w = (val_w + 12).max(42); // 10 label + 2 padding, min 42 for title
let o = "\x1b[38;2;192;98;58m"; // orange
let g = "\x1b[38;2;107;124;78m"; // green
let d = "\x1b[38;2;163;152;136m"; // dim
let o = "\x1b[38;5;166m"; // orange borders (256-color, ~192,98,58)
let g = "\x1b[38;5;101m"; // khaki/olive labels (256-color, ~107,124,78)
let d = "\x1b[38;5;138m"; // warm grey labels (256-color, ~163,152,136)
let r = "\x1b[0m"; // reset
let b = "\x1b[1;38;2;192;98;58m"; // bold orange
let it = "\x1b[3;38;2;163;152;136m"; // italic dim
let b = "\x1b[1;38;5;166m"; // bold orange title (256-color)
let it = "\x1b[3;38;5;138m"; // italic warm grey subtitle
let bar_top = "".repeat(w);
let bar_mid = "".repeat(w);
@@ -357,7 +338,7 @@ pub async fn run(config_path: String) -> crate::Result<()> {
if let Some(ref label) = proxy_label {
row("Proxy", g, label);
if config.proxy.bind_addr == "127.0.0.1" {
let y = "\x1b[38;2;204;176;59m"; // yellow
let y = "\x1b[33m"; // yellow
row(
"",
y,

View File

@@ -60,7 +60,7 @@ pub async fn run() -> Result<(), String> {
if !api_reachable {
eprintln!();
eprintln!(
" \x1b[1;38;2;192;98;58mNuma\x1b[0m — mobile API is not reachable on port {}.",
" \x1b[1;38;5;166mNuma\x1b[0m — mobile API is not reachable on port {}.",
SETUP_PORT
);
eprintln!();
@@ -77,7 +77,7 @@ pub async fn run() -> Result<(), String> {
let qr = render_qr(&url)?;
eprintln!();
eprintln!(" \x1b[1;38;2;192;98;58mNuma Phone Setup\x1b[0m");
eprintln!(" \x1b[1;38;5;166mNuma Phone Setup\x1b[0m");
eprintln!();
eprintln!(" Profile URL: \x1b[36m{}\x1b[0m", url);
eprintln!();

View File

@@ -102,10 +102,6 @@ pub struct ServerStats {
transport_tcp: u64,
transport_dot: u64,
transport_doh: u64,
upstream_transport_udp: u64,
upstream_transport_doh: u64,
upstream_transport_dot: u64,
upstream_transport_odoh: u64,
started_at: Instant,
}
@@ -128,31 +124,6 @@ impl Transport {
}
}
/// Wire protocol used for a forwarded upstream call. Orthogonal to
/// `QueryPath`: the path answers "where the answer came from"; this answers
/// "over what wire we spoke to the forwarder." Callers pass
/// `Option<UpstreamTransport>` — `None` for resolutions that never touched
/// a forwarder (cache/local/blocked) or for recursive mode, which has its
/// own counter via `QueryPath::Recursive`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum UpstreamTransport {
Udp,
Doh,
Dot,
Odoh,
}
impl UpstreamTransport {
pub fn as_str(&self) -> &'static str {
match self {
UpstreamTransport::Udp => "UDP",
UpstreamTransport::Doh => "DOH",
UpstreamTransport::Dot => "DOT",
UpstreamTransport::Odoh => "ODOH",
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum QueryPath {
Local,
@@ -231,20 +202,11 @@ impl ServerStats {
transport_tcp: 0,
transport_dot: 0,
transport_doh: 0,
upstream_transport_udp: 0,
upstream_transport_doh: 0,
upstream_transport_dot: 0,
upstream_transport_odoh: 0,
started_at: Instant::now(),
}
}
pub fn record(
&mut self,
path: QueryPath,
transport: Transport,
upstream_transport: Option<UpstreamTransport>,
) -> u64 {
pub fn record(&mut self, path: QueryPath, transport: Transport) -> u64 {
self.queries_total += 1;
match path {
QueryPath::Local => self.queries_local += 1,
@@ -263,14 +225,6 @@ impl ServerStats {
Transport::Dot => self.transport_dot += 1,
Transport::Doh => self.transport_doh += 1,
}
if let Some(ut) = upstream_transport {
match ut {
UpstreamTransport::Udp => self.upstream_transport_udp += 1,
UpstreamTransport::Doh => self.upstream_transport_doh += 1,
UpstreamTransport::Dot => self.upstream_transport_dot += 1,
UpstreamTransport::Odoh => self.upstream_transport_odoh += 1,
}
}
self.queries_total
}
@@ -299,10 +253,6 @@ impl ServerStats {
transport_tcp: self.transport_tcp,
transport_dot: self.transport_dot,
transport_doh: self.transport_doh,
upstream_transport_udp: self.upstream_transport_udp,
upstream_transport_doh: self.upstream_transport_doh,
upstream_transport_dot: self.upstream_transport_dot,
upstream_transport_odoh: self.upstream_transport_odoh,
}
}
@@ -313,7 +263,7 @@ impl ServerStats {
let secs = uptime.as_secs() % 60;
log::info!(
"STATS | uptime {}h{}m{}s | total {} | fwd {} | upstream {} | recursive {} | coalesced {} | cached {} | local {} | override {} | blocked {} | errors {} | up-udp {} | up-doh {} | up-dot {} | up-odoh {}",
"STATS | uptime {}h{}m{}s | total {} | fwd {} | upstream {} | recursive {} | coalesced {} | cached {} | local {} | override {} | blocked {} | errors {}",
hours, mins, secs,
self.queries_total,
self.queries_forwarded,
@@ -325,10 +275,6 @@ impl ServerStats {
self.queries_overridden,
self.queries_blocked,
self.upstream_errors,
self.upstream_transport_udp,
self.upstream_transport_doh,
self.upstream_transport_dot,
self.upstream_transport_odoh,
);
}
}
@@ -349,8 +295,4 @@ pub struct StatsSnapshot {
pub transport_tcp: u64,
pub transport_dot: u64,
pub transport_doh: u64,
pub upstream_transport_udp: u64,
pub upstream_transport_doh: u64,
pub upstream_transport_dot: u64,
pub upstream_transport_odoh: u64,
}

View File

@@ -1,179 +0,0 @@
//! Minimal SVCB/HTTPS (RFC 9460) RDATA parser — just enough to strip
//! the `ipv6hint` SvcParam. Used by the `filter_aaaa` feature so
//! HTTPS-record-aware clients (Chrome ≥103, Firefox, Safari) don't
//! receive v6 address hints on IPv4-only networks.
/// SvcParamKey = 6 (RFC 9460 §14.3.2).
const IPV6_HINT_KEY: u16 = 6;
/// Strip the `ipv6hint` SvcParam from an HTTPS/SVCB RDATA blob.
///
/// Returns `Some(new_rdata)` if `ipv6hint` was present and removed.
/// Returns `None` if the record had no `ipv6hint`, or if the RDATA
/// couldn't be parsed — in both cases the caller should keep the
/// original bytes untouched.
///
/// SVCB RDATA (RFC 9460 §2.2):
/// SvcPriority (u16)
/// TargetName (uncompressed DNS name — labels terminated by 0 octet)
/// SvcParams (series of {u16 key, u16 len, opaque[len] value}, sorted by key)
pub fn strip_ipv6hint(rdata: &[u8]) -> Option<Vec<u8>> {
if rdata.len() < 2 {
return None;
}
let mut pos = 2;
// TargetName — uncompressed per RFC 9460 §2.2
loop {
let len = *rdata.get(pos)? as usize;
pos += 1;
if len == 0 {
break;
}
if len & 0xC0 != 0 {
// Pointer: forbidden in SVCB but defend against a broken upstream.
return None;
}
pos = pos.checked_add(len)?;
if pos > rdata.len() {
return None;
}
}
// Scan params once to decide whether we need to rebuild.
let params_start = pos;
let mut scan = pos;
let mut has_ipv6hint = false;
while scan < rdata.len() {
if scan + 4 > rdata.len() {
return None;
}
let key = u16::from_be_bytes([rdata[scan], rdata[scan + 1]]);
let vlen = u16::from_be_bytes([rdata[scan + 2], rdata[scan + 3]]) as usize;
let end = scan.checked_add(4)?.checked_add(vlen)?;
if end > rdata.len() {
return None;
}
if key == IPV6_HINT_KEY {
has_ipv6hint = true;
}
scan = end;
}
if scan != rdata.len() || !has_ipv6hint {
return None;
}
// Rebuild without ipv6hint, preserving param order (RFC 9460 requires
// ascending key order, which we preserve by filtering in place).
let mut out = Vec::with_capacity(rdata.len());
out.extend_from_slice(&rdata[..params_start]);
let mut pos = params_start;
while pos < rdata.len() {
let key = u16::from_be_bytes([rdata[pos], rdata[pos + 1]]);
let vlen = u16::from_be_bytes([rdata[pos + 2], rdata[pos + 3]]) as usize;
let end = pos + 4 + vlen;
if key != IPV6_HINT_KEY {
out.extend_from_slice(&rdata[pos..end]);
}
pos = end;
}
Some(out)
}
/// Build an SVCB RDATA blob from a priority, target labels, and
/// (key, value) param pairs. Shared by `svcb` unit tests and `ctx`
/// pipeline tests that need to seed the cache with a synthetic HTTPS RR.
#[cfg(test)]
pub(crate) fn build_rdata(priority: u16, target: &[&str], params: &[(u16, Vec<u8>)]) -> Vec<u8> {
let mut out = Vec::new();
out.extend_from_slice(&priority.to_be_bytes());
for label in target {
out.push(label.len() as u8);
out.extend_from_slice(label.as_bytes());
}
out.push(0);
for (key, value) in params {
out.extend_from_slice(&key.to_be_bytes());
out.extend_from_slice(&(value.len() as u16).to_be_bytes());
out.extend_from_slice(value);
}
out
}
#[cfg(test)]
mod tests {
use super::*;
fn alpn_h3() -> (u16, Vec<u8>) {
// alpn = ["h3"]: one length-prefixed ALPN id
(1, vec![0x02, b'h', b'3'])
}
fn ipv4hint_single() -> (u16, Vec<u8>) {
(4, vec![93, 184, 216, 34])
}
fn ipv6hint_single() -> (u16, Vec<u8>) {
// 2606:4700::1
(
6,
vec![
0x26, 0x06, 0x47, 0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01,
],
)
}
#[test]
fn strips_ipv6hint_and_keeps_other_params() {
let rdata = build_rdata(1, &[], &[alpn_h3(), ipv4hint_single(), ipv6hint_single()]);
let stripped = strip_ipv6hint(&rdata).expect("ipv6hint present → stripped");
let expected = build_rdata(1, &[], &[alpn_h3(), ipv4hint_single()]);
assert_eq!(stripped, expected);
}
#[test]
fn no_ipv6hint_returns_none() {
let rdata = build_rdata(1, &[], &[alpn_h3(), ipv4hint_single()]);
assert!(strip_ipv6hint(&rdata).is_none());
}
#[test]
fn alias_mode_empty_params_returns_none() {
let rdata = build_rdata(0, &["example", "com"], &[]);
assert!(strip_ipv6hint(&rdata).is_none());
}
#[test]
fn only_ipv6hint_yields_empty_param_section() {
let rdata = build_rdata(1, &[], &[ipv6hint_single()]);
let stripped = strip_ipv6hint(&rdata).expect("ipv6hint present → stripped");
let expected = build_rdata(1, &[], &[]);
assert_eq!(stripped, expected);
}
#[test]
fn preserves_target_name() {
let rdata = build_rdata(1, &["svc", "example", "net"], &[ipv6hint_single()]);
let stripped = strip_ipv6hint(&rdata).unwrap();
assert!(stripped.starts_with(&[0x00, 0x01])); // priority
assert_eq!(&stripped[2..6], b"\x03svc");
}
#[test]
fn truncated_rdata_returns_none() {
// Priority only, no target terminator.
assert!(strip_ipv6hint(&[0, 1, 3, b'c', b'o', b'm']).is_none());
}
#[test]
fn empty_input_returns_none() {
assert!(strip_ipv6hint(&[]).is_none());
}
#[test]
fn param_length_overflow_returns_none() {
// key=6, length=0xFFFF but value is short — malformed.
let rdata = vec![0, 1, 0, 0, 6, 0xFF, 0xFF, 0, 1, 2];
assert!(strip_ipv6hint(&rdata).is_none());
}
}

View File

@@ -2,9 +2,7 @@ use std::net::SocketAddr;
use log::info;
#[cfg(any(target_os = "macos", target_os = "linux"))]
use crate::forward::Upstream;
use crate::forward::UpstreamPool;
fn print_recursive_hint() {
let is_recursive = crate::config::load_config("numa.toml")
@@ -22,15 +20,15 @@ fn is_loopback_or_stub(addr: &str) -> bool {
}
/// A conditional forwarding rule: domains matching `suffix` are forwarded to `upstream`.
#[derive(Clone)]
#[derive(Debug, Clone)]
pub struct ForwardingRule {
pub suffix: String,
dot_suffix: String, // pre-computed ".suffix" for zero-alloc matching
pub upstream: UpstreamPool,
pub upstream: Upstream,
}
impl ForwardingRule {
pub fn new(suffix: String, upstream: UpstreamPool) -> Self {
pub fn new(suffix: String, upstream: Upstream) -> Self {
let dot_suffix = format!(".{}", suffix);
Self {
suffix,
@@ -91,7 +89,7 @@ pub fn try_port53_advisory(bind_addr: &str, err: &std::io::Error) -> Option<Stri
),
_ => return None,
};
let o = "\x1b[1;38;2;192;98;58m"; // bold orange
let o = "\x1b[1;38;5;166m"; // bold orange
let r = "\x1b[0m";
Some(format!(
"
@@ -218,8 +216,7 @@ fn discover_macos() -> SystemDnsInfo {
for rule in &rules {
info!(
"auto-discovered forwarding: *.{} -> {}",
rule.suffix,
rule.upstream.label()
rule.suffix, rule.upstream
);
}
if rules.is_empty() {
@@ -238,8 +235,7 @@ fn discover_macos() -> SystemDnsInfo {
#[cfg(any(target_os = "macos", target_os = "linux"))]
fn make_rule(domain: &str, nameserver: &str) -> Option<ForwardingRule> {
let addr = crate::forward::parse_upstream_addr(nameserver, 53).ok()?;
let pool = UpstreamPool::new(vec![Upstream::Udp(addr)], vec![]);
Some(ForwardingRule::new(domain.to_string(), pool))
Some(ForwardingRule::new(domain.to_string(), Upstream::Udp(addr)))
}
#[cfg(target_os = "linux")]
@@ -1037,7 +1033,7 @@ fn uninstall_windows() -> Result<(), String> {
pub fn match_forwarding_rule<'a>(
domain: &str,
rules: &'a [ForwardingRule],
) -> Option<&'a UpstreamPool> {
) -> Option<&'a Upstream> {
for rule in rules {
if domain == rule.suffix || domain.ends_with(&rule.dot_suffix) {
return Some(&rule.upstream);
@@ -1416,7 +1412,7 @@ pub fn service_status() -> Result<(), String> {
}
}
#[cfg(target_os = "macos")]
#[cfg(any(target_os = "macos", target_os = "linux"))]
fn replace_exe_path(service: &str) -> Result<String, String> {
let exe_path =
std::env::current_exe().map_err(|e| format!("failed to get current exe: {}", e))?;
@@ -1664,78 +1660,10 @@ fn uninstall_linux() -> Result<(), String> {
Ok(())
}
/// Fallback install location when current_exe() sits on a path the
/// dynamic user cannot traverse (e.g. `/home/<user>/` mode 0700).
#[cfg(target_os = "linux")]
fn linux_service_exe_path() -> std::path::PathBuf {
std::path::PathBuf::from("/usr/local/bin/numa")
}
/// True iff every ancestor of `p` (excluding `/`) grants world-execute —
/// i.e. the `DynamicUser=yes` service account can traverse the path and
/// exec the binary without being in any group. Linuxbrew's
/// `/home/linuxbrew` is 0755 (traversable, keep brew's path, upgrades
/// via `brew` propagate). A build tree under `/home/<user>/` (0700) or
/// `~/.cargo/bin/` is not (copy to /usr/local/bin so systemd can reach it).
#[cfg(target_os = "linux")]
fn path_world_traversable_linux(p: &std::path::Path) -> bool {
use std::os::unix::fs::PermissionsExt;
let mut current = p;
while let Some(parent) = current.parent() {
if parent.as_os_str().is_empty() || parent == std::path::Path::new("/") {
break;
}
match std::fs::metadata(parent) {
Ok(m) if m.permissions().mode() & 0o001 != 0 => {}
_ => return false,
}
current = parent;
}
true
}
#[cfg(target_os = "linux")]
fn install_service_binary_linux() -> Result<std::path::PathBuf, String> {
let src = std::env::current_exe().map_err(|e| format!("current_exe(): {}", e))?;
if path_world_traversable_linux(&src) {
return Ok(src);
}
let dst = linux_service_exe_path();
if src == dst {
return Ok(dst);
}
if let Some(parent) = dst.parent() {
std::fs::create_dir_all(parent)
.map_err(|e| format!("failed to create {}: {}", parent.display(), e))?;
}
// Atomic replace via temp + rename. Plain copy fails with ETXTBSY when
// re-installing while the service is running the previous binary —
// rename swaps the path while the running process keeps the old inode.
let tmp = dst.with_extension("new");
std::fs::copy(&src, &tmp).map_err(|e| {
format!(
"failed to copy {} -> {}: {}",
src.display(),
tmp.display(),
e
)
})?;
std::fs::rename(&tmp, &dst).map_err(|e| {
let _ = std::fs::remove_file(&tmp);
format!(
"failed to rename {} -> {}: {}",
tmp.display(),
dst.display(),
e
)
})?;
Ok(dst)
}
#[cfg(target_os = "linux")]
fn install_service_linux() -> Result<(), String> {
let exe = install_service_binary_linux()?;
let unit = include_str!("../numa.service").replace("{{exe_path}}", &exe.to_string_lossy());
let unit = include_str!("../numa.service");
let unit = replace_exe_path(unit)?;
std::fs::write(SYSTEMD_UNIT, unit)
.map_err(|e| format!("failed to write {}: {}", SYSTEMD_UNIT, e))?;
@@ -1747,9 +1675,7 @@ fn install_service_linux() -> Result<(), String> {
eprintln!(" warning: failed to configure system DNS: {}", e);
}
// restart, not start: on re-install the service is already running
// the previous binary; restart picks up the new one.
run_systemctl(&["restart", "numa"])?;
run_systemctl(&["start", "numa"])?;
eprintln!(" Service installed and started.");
eprintln!(" Numa will auto-start on boot and restart if killed.");
@@ -2065,25 +1991,22 @@ Wireless LAN adapter Wi-Fi:
}
#[test]
fn install_templates_contain_exe_path_placeholder() {
// Both files are substituted at install time — plist via
// replace_exe_path on macOS, numa.service via inline .replace
// in install_service_linux. Catch placeholder removal early.
#[cfg(any(target_os = "macos", target_os = "linux"))]
fn replace_exe_path_substitutes_template() {
let plist = include_str!("../com.numa.dns.plist");
let unit = include_str!("../numa.service");
assert!(plist.contains("{{exe_path}}"), "plist missing placeholder");
assert!(
unit.contains("{{exe_path}}"),
"unit file missing placeholder"
);
}
#[test]
#[cfg(target_os = "macos")]
fn replace_exe_path_substitutes_template() {
let plist = include_str!("../com.numa.dns.plist");
let result = replace_exe_path(plist).expect("replace_exe_path failed for plist");
assert!(!result.contains("{{exe_path}}"));
let result = replace_exe_path(unit).expect("replace_exe_path failed for unit");
assert!(!result.contains("{{exe_path}}"));
}
#[test]

View File

@@ -63,7 +63,6 @@ pub async fn test_ctx() -> ServerCtx {
ca_pem: None,
mobile_enabled: false,
mobile_port: 8765,
filter_aaaa: false,
}
}

View File

@@ -49,7 +49,7 @@ pub fn try_data_dir_advisory(err: &crate::Error, data_dir: &Path) -> Option<Stri
if io_err.kind() != std::io::ErrorKind::PermissionDenied {
return None;
}
let o = "\x1b[1;38;2;192;98;58m";
let o = "\x1b[1;38;5;166m";
let r = "\x1b[0m";
Some(format!(
"

View File

@@ -1,288 +0,0 @@
#!/usr/bin/env bash
#
# Systemd service install verification for the DynamicUser-based Linux
# service unit. Stands up a privileged ubuntu:24.04 container with systemd
# as PID 1, builds numa inside, runs three scenarios that CI does not:
#
# A. Fresh install — every advertised port is not just bound but
# functional (DNS resolves on :53, TLS handshake validates against
# numa's CA on :853/:443, HTTP responds on :80, API on :5380).
# B. Upgrade from pre-drop layout (root-owned /var/lib/numa) preserves
# the CA fingerprint — users' browser-installed CA trust survives.
# C. Install from a 0700 source directory stages the binary under
# /usr/local/bin/numa and the service starts from there.
#
# First run is slow (~5-10 min): image pull + apt + cold cargo build.
# Subsequent runs reuse cached docker volumes for cargo + target (~30s).
#
# Requirements: docker
# Usage: ./tests/docker/install-systemd.sh
set -u
set -o pipefail
GREEN="\033[32m"; RED="\033[31m"; RESET="\033[0m"
pass() { printf " ${GREEN}PASS${RESET}: %s\n" "$*"; }
fail() { printf " ${RED}FAIL${RESET}: %s\n" "$*"; FAIL=1; }
# ============================================================
# Mode B: running inside the systemd container — run scenarios
# ============================================================
if [ "${NUMA_INSIDE:-}" = "1" ]; then
set +e # assertions report pass/fail, don't abort
FAIL=0
NUMA=/work/target/release/numa
reset_state() {
"$NUMA" uninstall >/dev/null 2>&1 || true
systemctl reset-failed numa 2>/dev/null || true
rm -rf /var/lib/numa /var/lib/private/numa /etc/numa /home/builder /usr/local/bin/numa
systemctl daemon-reload 2>/dev/null || true
}
main_pid_user() {
local pid
pid=$(systemctl show -p MainPID --value numa)
[ "$pid" != "0" ] || { echo ""; return; }
ps -o user= -p "$pid" 2>/dev/null | tr -d ' '
}
# MainPID + user briefly stabilize after a fresh restart. Retry so we
# don't race the moment systemd flips the service to "active" vs when
# the forked numa process actually owns MainPID.
assert_nonroot() {
local pid user comm n=0
while [ $n -lt 20 ]; do
pid=$(systemctl show -p MainPID --value numa)
if [ "$pid" != "0" ]; then
comm=$(ps -o comm= -p "$pid" 2>/dev/null | tr -d ' ')
user=$(ps -o user= -p "$pid" 2>/dev/null | tr -d ' ')
if [ "$comm" = "numa" ]; then
if [ "$user" = "root" ]; then
fail "daemon runs as root (expected transient UID)"
else
pass "daemon runs as $user (non-root)"
fi
return
fi
fi
sleep 0.2
n=$((n + 1))
done
fail "numa MainPID did not settle (last: pid=${pid:-?} comm=${comm:-?} user=${user:-?})"
}
# Functional DNS check: just "port 53 bound" isn't enough — systemd-resolved
# listens on 127.0.0.53 and would satisfy a bind test. Retries for ~15s
# to tolerate cold-start upstream / blocklist warmup.
assert_dns_works() {
local n=0
while [ $n -lt 15 ]; do
if dig @127.0.0.1 -p 53 example.com +short +timeout=2 +tries=1 2>/dev/null \
| grep -qE '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'; then
pass "DNS resolves on :53 (A record returned)"
return
fi
sleep 1
n=$((n + 1))
done
fail "DNS did not return an A record on :53 within 15s"
}
# TLS handshake: cert must validate against numa's CA when connecting
# to a .numa SNI. Catches port-not-bound, wrong cert, missing CA file.
assert_tls_handshake() {
local port=$1 sni=${2:-numa.numa} out
if out=$(openssl s_client -connect "127.0.0.1:${port}" \
-servername "$sni" \
-CAfile /var/lib/numa/ca.pem \
-verify_return_error </dev/null 2>&1); then
if echo "$out" | grep -q 'Verify return code: 0 (ok)'; then
pass "TLS handshake + cert chain verified on :${port}"
else
fail "TLS handshake on :${port} did not report 'Verify return code: 0'"
fi
else
fail "openssl s_client failed connecting to :${port}"
fi
}
assert_http_responds() {
local code
code=$(curl -s -o /dev/null -w "%{http_code}" --max-time 3 http://127.0.0.1/ || echo 000)
if [ "$code" != "000" ]; then
pass "HTTP responds on :80 (status $code)"
else
fail "HTTP :80 connection failed"
fi
}
assert_api_healthy() {
if curl -sf --max-time 3 http://127.0.0.1:5380/health >/dev/null; then
pass "API /health OK on :5380"
else
fail "API /health failed on :5380"
fi
}
ca_fingerprint() {
openssl x509 -in /var/lib/numa/ca.pem -noout -fingerprint -sha256 2>/dev/null \
| sed 's/.*=//'
}
wait_active() {
local n=0
while [ $n -lt 20 ]; do
systemctl is-active --quiet numa && return 0
sleep 0.5
n=$((n + 1))
done
fail "service did not become active within 10s"
systemctl status numa --no-pager -l 2>&1 | head -20 || true
return 1
}
# ---- Scenario A ----
printf "\n=== Scenario A: fresh install — every advertised port is functional ===\n"
reset_state
"$NUMA" install >/tmp/installA.log 2>&1 || { fail "install failed"; tail -20 /tmp/installA.log; }
wait_active || true
assert_nonroot
assert_dns_works
assert_tls_handshake 853
assert_tls_handshake 443
assert_http_responds
assert_api_healthy
# ---- Scenario B ----
# Pre-drop installs left /var/lib/numa as a plain root-owned tree.
# Flattening the current DynamicUser layout back into that shape
# simulates the upgrade path without needing an actual old binary.
printf "\n=== Scenario B: CA fingerprint survives upgrade from pre-drop layout ===\n"
fp_before=$(ca_fingerprint)
if [ -z "$fp_before" ]; then
fail "could not read initial CA fingerprint (skipping scenario B)"
else
echo " CA fingerprint before: $fp_before"
"$NUMA" uninstall >/dev/null 2>&1 || true
tmp=$(mktemp -d)
cp -a /var/lib/private/numa/. "$tmp"/ 2>/dev/null || true
rm -rf /var/lib/numa /var/lib/private/numa
mv "$tmp" /var/lib/numa
chown -R root:root /var/lib/numa
chmod 755 /var/lib/numa
[ -f /var/lib/numa/ca.pem ] || fail "ca.pem missing from seeded legacy tree"
"$NUMA" install >/tmp/installB.log 2>&1 || { fail "upgrade install failed"; tail -20 /tmp/installB.log; }
wait_active || true
assert_nonroot
fp_after=$(ca_fingerprint)
if [ -z "$fp_after" ]; then
fail "could not read CA fingerprint after upgrade"
elif [ "$fp_before" = "$fp_after" ]; then
pass "CA fingerprint preserved across upgrade"
else
fail "CA fingerprint changed: before=$fp_before after=$fp_after"
fi
assert_dns_works
fi
# ---- Scenario C ----
printf "\n=== Scenario C: install from unreachable source stages binary to /usr/local/bin ===\n"
reset_state
mkdir -p /home/builder
chmod 700 /home/builder
cp "$NUMA" /home/builder/numa
chmod 755 /home/builder/numa
/home/builder/numa install >/tmp/installC.log 2>&1 || { fail "install failed"; tail -20 /tmp/installC.log; }
wait_active || true
if [ -x /usr/local/bin/numa ]; then
pass "binary staged to /usr/local/bin/numa"
else
fail "/usr/local/bin/numa missing after install from 0700 source"
fi
exec_line=$(grep '^ExecStart=' /etc/systemd/system/numa.service 2>/dev/null || echo "ExecStart=<unit missing>")
if echo "$exec_line" | grep -q '/usr/local/bin/numa'; then
pass "unit ExecStart points to staged path"
else
fail "unit ExecStart wrong: $exec_line"
fi
assert_nonroot
assert_dns_works
reset_state
rm -rf /home/builder
echo
if [ "$FAIL" -eq 0 ]; then
printf "${GREEN}── all scenarios passed ──${RESET}\n"
exit 0
else
printf "${RED}── some scenarios failed ──${RESET}\n"
exit 1
fi
fi
# ============================================================
# Mode A: host-side bootstrap
# ============================================================
set -e
cd "$(dirname "$0")/../.."
IMAGE=numa-install-systemd:local
CONTAINER="numa-install-systemd-$$"
trap 'docker rm -f "$CONTAINER" >/dev/null 2>&1 || true' EXIT
echo "── building systemd-in-container image (cached after first run) ──"
docker build --quiet -t "$IMAGE" -f - . <<'DOCKERFILE' >/dev/null
FROM ubuntu:24.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update -qq && apt-get install -y -qq \
systemd systemd-sysv systemd-resolved \
ca-certificates curl build-essential \
pkg-config libssl-dev cmake make perl \
dnsutils iproute2 openssl \
&& rm -rf /var/lib/apt/lists/* \
&& for u in dev-hugepages.mount sys-fs-fuse-connections.mount \
systemd-logind.service getty.target console-getty.service; do \
systemctl mask $u; \
done
STOPSIGNAL SIGRTMIN+3
CMD ["/lib/systemd/systemd"]
DOCKERFILE
echo "── starting systemd container ──"
docker run -d --name "$CONTAINER" \
--privileged --cgroupns=host \
--tmpfs /run --tmpfs /run/lock --tmpfs /tmp:exec \
-v "$PWD:/src:ro" \
-v numa-install-systemd-cargo:/root/.cargo \
-v numa-install-systemd-work:/work \
"$IMAGE" >/dev/null
# Wait for systemd to be up
for _ in $(seq 1 30); do
state=$(docker exec "$CONTAINER" systemctl is-system-running 2>&1 || true)
case "$state" in running|degraded) break ;; esac
sleep 0.5
done
echo "── copying source into /work (writable) ──"
docker exec "$CONTAINER" bash -c '
mkdir -p /work
tar -C /src --exclude=./target --exclude=./.git --exclude=./.claude -cf - . | tar -C /work -xf -
'
echo "── rustup + cargo build --release --locked ──"
docker exec "$CONTAINER" bash -c '
set -e
if ! command -v cargo &>/dev/null; then
curl -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal --quiet
fi
. "$HOME/.cargo/env"
cd /work
cargo build --release --locked 2>&1 | tail -5
'
echo "── running scenarios ──"
docker exec -e NUMA_INSIDE=1 "$CONTAINER" bash /src/tests/docker/install-systemd.sh

View File

@@ -1,10 +1,7 @@
#!/usr/bin/env bash
# Integration test suite for Numa
# Runs a test instance on port 5354, validates all features, exits with status.
# Usage:
# ./tests/integration.sh [release|debug] # all suites
# SUITES=7 ./tests/integration.sh # only Suite 7
# SUITES=1,3,7 ./tests/integration.sh # Suites 1, 3, and 7
# Usage: ./tests/integration.sh [release|debug]
set -euo pipefail
@@ -17,14 +14,6 @@ LOG="/tmp/numa-integration-test.log"
PASSED=0
FAILED=0
# Suite filter: empty runs all; comma list runs a subset.
SUITES="${SUITES:-}"
should_run_suite() {
[ -z "$SUITES" ] && return 0
case ",$SUITES," in *",$1,"*) return 0;; esac
return 1
}
# Colors
GREEN="\033[32m"
RED="\033[31m"
@@ -177,7 +166,6 @@ CONF
}
# ---- Suite 1: Recursive mode + DNSSEC ----
if should_run_suite 1; then
echo ""
echo "╔══════════════════════════════════════════╗"
echo "║ Suite 1: Recursive + DNSSEC + Blocking ║"
@@ -246,10 +234,7 @@ kill "$NUMA_PID" 2>/dev/null || true
wait "$NUMA_PID" 2>/dev/null || true
sleep 1
fi # end Suite 1
# ---- Suite 2: Forward mode (backward compat) ----
if should_run_suite 2; then
echo ""
echo "╔══════════════════════════════════════════╗"
echo "║ Suite 2: Forward (DoH) + Blocking ║"
@@ -276,10 +261,7 @@ enabled = true
enabled = false
"
fi # end Suite 2
# ---- Suite 3: Forward UDP (plain, no DoH) ----
if should_run_suite 3; then
echo ""
echo "╔══════════════════════════════════════════╗"
echo "║ Suite 3: Forward (UDP) + No Blocking ║"
@@ -325,10 +307,7 @@ kill "$NUMA_PID" 2>/dev/null || true
wait "$NUMA_PID" 2>/dev/null || true
sleep 1
fi # end Suite 3
# ---- Suite 4: Local zones + Overrides API ----
if should_run_suite 4; then
echo ""
echo "╔══════════════════════════════════════════╗"
echo "║ Suite 4: Local Zones + Overrides API ║"
@@ -437,10 +416,7 @@ kill "$NUMA_PID" 2>/dev/null || true
wait "$NUMA_PID" 2>/dev/null || true
sleep 1
fi # end Suite 4
# ---- Suite 5: DNS-over-TLS (RFC 7858) ----
if should_run_suite 5; then
echo ""
echo "╔══════════════════════════════════════════╗"
echo "║ Suite 5: DNS-over-TLS (RFC 7858) ║"
@@ -562,10 +538,7 @@ CONF
fi
sleep 1
fi # end Suite 5
# ---- Suite 6: Proxy + DoT coexistence ----
if should_run_suite 6; then
echo ""
echo "╔══════════════════════════════════════════╗"
echo "║ Suite 6: Proxy + DoT Coexistence ║"
@@ -725,332 +698,6 @@ CONF
rm -rf "$NUMA_DATA"
fi
fi # end Suite 6
# ---- Suite 7: filter_aaaa (IPv4-only networks) ----
if should_run_suite 7; then
echo ""
echo "╔══════════════════════════════════════════╗"
echo "║ Suite 7: filter_aaaa ║"
echo "╚══════════════════════════════════════════╝"
# Config A — filter on, with a local AAAA zone to prove local data bypass.
cat > "$CONFIG" << 'CONF'
[server]
bind_addr = "127.0.0.1:5354"
api_port = 5381
filter_aaaa = true
[upstream]
mode = "forward"
address = "9.9.9.9"
port = 53
[cache]
max_entries = 10000
[blocking]
enabled = false
[proxy]
enabled = false
[[zones]]
domain = "v6.test"
record_type = "AAAA"
value = "2001:db8::1"
ttl = 60
CONF
RUST_LOG=info "$BINARY" "$CONFIG" > "$LOG" 2>&1 &
NUMA_PID=$!
sleep 3
DIG="dig @127.0.0.1 -p $PORT +time=5 +tries=1"
echo ""
echo "=== filter_aaaa = true ==="
# A queries must be untouched.
check "A record resolves under filter_aaaa" \
"." \
"$($DIG google.com A +short | head -1)"
# AAAA must be NOERROR (NODATA), not NXDOMAIN, not SERVFAIL.
check "AAAA returns NOERROR (not NXDOMAIN)" \
"status: NOERROR" \
"$($DIG google.com AAAA 2>&1 | grep 'status:')"
check "AAAA returns zero answers (NODATA shape)" \
"ANSWER: 0" \
"$($DIG google.com AAAA 2>&1 | grep -oE 'ANSWER: [0-9]+' | head -1)"
# Local zone AAAA must survive the filter (PR claim: local data bypasses).
check "Local [[zones]] AAAA bypasses filter" \
"2001:db8::1" \
"$($DIG v6.test AAAA +short)"
# HTTPS RR: ipv6hint (SvcParamKey 6) must be stripped. Query as `type65`
# because dig 9.10.6 (macOS) misparses `HTTPS` as a domain name; `type65`
# works on both 9.10.6 and 9.18. Assert on the raw rdata hex (RFC 3597
# generic format), since dig 9.10.6 doesn't pretty-print HTTPS params.
# cloudflare.com's ipv6hint values sit under the 2606:4700 prefix —
# checking that `26064700` is absent from the rdata hex is a precise,
# upstream-stable signal that the TLV was stripped.
HTTPS_OUT=$($DIG cloudflare.com type65 2>&1)
if echo "$HTTPS_OUT" | grep -qE "cloudflare\.com\..*IN[[:space:]]+TYPE65"; then
HTTPS_HEX=$(echo "$HTTPS_OUT" | grep -A5 "IN[[:space:]]*TYPE65" | tr -d " \t\n")
if echo "$HTTPS_HEX" | grep -qi "26064700"; then
check "HTTPS ipv6hint stripped (2606:4700 absent from rdata)" "absent" "present"
else
check "HTTPS ipv6hint stripped (2606:4700 absent from rdata)" "absent" "absent"
fi
else
# Upstream didn't return an HTTPS record — skip rather than false-pass.
printf " ${DIM}~ HTTPS ipv6hint stripped (skipped: no HTTPS RR returned by upstream)${RESET}\n"
fi
kill "$NUMA_PID" 2>/dev/null || true
wait "$NUMA_PID" 2>/dev/null || true
sleep 1
# Config B — filter off. Regression guard: prove AAAA answers come back
# when the flag isn't set, so a network failure in Config A can't silently
# pass as "filter working".
cat > "$CONFIG" << 'CONF'
[server]
bind_addr = "127.0.0.1:5354"
api_port = 5381
[upstream]
mode = "forward"
address = "9.9.9.9"
port = 53
[cache]
max_entries = 10000
[blocking]
enabled = false
[proxy]
enabled = false
CONF
RUST_LOG=info "$BINARY" "$CONFIG" > "$LOG" 2>&1 &
NUMA_PID=$!
sleep 3
echo ""
echo "=== filter_aaaa unset (regression guard) ==="
check "AAAA returns real answers with filter off" \
":" \
"$($DIG google.com AAAA +short | head -1)"
kill "$NUMA_PID" 2>/dev/null || true
wait "$NUMA_PID" 2>/dev/null || true
sleep 1
fi # end Suite 7
# ---- Suite 8: ODoH (Oblivious DoH via public relay + target) ----
# Exercises the full client pipeline: /.well-known/odohconfigs fetch,
# HPKE seal/unseal, URL-query target routing (RFC 9230 §5), dashboard
# QueryPath::Odoh counter. Depends on the public ecosystem being up —
# the probe-odoh-ecosystem.sh script guards against flaky runs.
if should_run_suite 8; then
echo ""
echo "╔══════════════════════════════════════════╗"
echo "║ Suite 8: ODoH (Anonymous DNS) ║"
echo "╚══════════════════════════════════════════╝"
run_test_suite "ODoH via edgecompute.app relay → Cloudflare target" "
[server]
bind_addr = \"127.0.0.1:5354\"
api_port = 5381
[upstream]
mode = \"odoh\"
relay = \"https://odoh-relay.edgecompute.app/proxy\"
target = \"https://odoh.cloudflare-dns.com/dns-query\"
[cache]
max_entries = 10000
min_ttl = 60
max_ttl = 86400
[blocking]
enabled = false
[proxy]
enabled = false
"
# Re-start briefly to assert ODoH-specific observability: the odoh counter
# has to tick above zero after a query, and the stats label has to reflect
# the oblivious path. These guard against silent regressions in the
# QueryPath::Odoh tagging and the /stats serialisation.
RUST_LOG=info "$BINARY" "$CONFIG" > "$LOG" 2>&1 &
NUMA_PID=$!
for _ in $(seq 1 30); do
curl -sf "http://127.0.0.1:$API_PORT/health" >/dev/null 2>&1 && break
sleep 0.1
done
$DIG example.com A +short > /dev/null 2>&1 || true
sleep 1
STATS=$(curl -sf http://127.0.0.1:$API_PORT/stats 2>/dev/null)
# upstream_transport.odoh lives inside the upstream_transport object.
ODOH_COUNT=$(echo "$STATS" | grep -o '"upstream_transport":{[^}]*}' \
| grep -o '"odoh":[0-9]*' | cut -d: -f2)
check "upstream_transport.odoh > 0 after a query" "[1-9]" "${ODOH_COUNT:-0}"
check "Upstream label advertises odoh://" \
"odoh://" \
"$(echo "$STATS" | grep -o '"upstream":"[^"]*"')"
check "Stats mode field is 'odoh'" \
'"mode":"odoh"' \
"$(echo "$STATS" | grep -o '"mode":"odoh"')"
# Strict-mode failure path: a clearly-unreachable relay must produce
# SERVFAIL without silent downgrade. We hijack the config to point at
# an .invalid host so we don't rely on external uptime.
kill "$NUMA_PID" 2>/dev/null || true
wait "$NUMA_PID" 2>/dev/null || true
sleep 1
cat > "$CONFIG" << 'CONF'
[server]
bind_addr = "127.0.0.1:5354"
api_port = 5381
[upstream]
mode = "odoh"
relay = "https://relay.invalid/proxy"
target = "https://odoh.cloudflare-dns.com/dns-query"
strict = true
[cache]
max_entries = 10000
[blocking]
enabled = false
[proxy]
enabled = false
CONF
RUST_LOG=info "$BINARY" "$CONFIG" > "$LOG" 2>&1 &
NUMA_PID=$!
for _ in $(seq 1 30); do
curl -sf "http://127.0.0.1:$API_PORT/health" >/dev/null 2>&1 && break
sleep 0.1
done
check "Strict-mode relay outage returns SERVFAIL" \
"SERVFAIL" \
"$($DIG example.com A 2>&1 | grep 'status:')"
kill "$NUMA_PID" 2>/dev/null || true
wait "$NUMA_PID" 2>/dev/null || true
sleep 1
# Negative: relay and target on the same host must be rejected at startup.
cat > "$CONFIG" << 'CONF'
[server]
bind_addr = "127.0.0.1:5354"
api_port = 5381
[upstream]
mode = "odoh"
relay = "https://odoh.cloudflare-dns.com/proxy"
target = "https://odoh.cloudflare-dns.com/dns-query"
CONF
STARTUP_OUT=$("$BINARY" "$CONFIG" 2>&1 || true)
check "Same-host relay+target rejected at startup" \
"same host" \
"$STARTUP_OUT"
fi # end Suite 8
# ---- Suite 9: Numa's own ODoH relay (--relay-mode) ----
# Exercises `numa relay PORT` as a forwarding proxy to a real ODoH target.
# Validates the RFC 9230 §5 relay behaviour: URL-query routing, content-type
# gating, body-size cap, and /health observability.
if should_run_suite 9; then
echo ""
echo "╔══════════════════════════════════════════╗"
echo "║ Suite 9: Numa ODoH Relay (own) ║"
echo "╚══════════════════════════════════════════╝"
RELAY_PORT=18443
"$BINARY" relay $RELAY_PORT > "$LOG" 2>&1 &
NUMA_PID=$!
for _ in $(seq 1 30); do
curl -sf "http://127.0.0.1:$RELAY_PORT/health" >/dev/null 2>&1 && break
sleep 0.1
done
echo ""
echo "=== Relay Endpoints ==="
check "Health endpoint returns ok" \
"ok" \
"$(curl -sf http://127.0.0.1:$RELAY_PORT/health | head -1)"
# Happy path: forwards arbitrary body to Cloudflare's ODoH target. The
# target will reject the garbage envelope with HTTP 400 — which is exactly
# what proves our relay faithfully forwarded (otherwise we'd see our own
# 4xx from the relay itself).
HAPPY_STATUS=$(curl -sS -o /dev/null -w "%{http_code}" -X POST \
-H "Content-Type: application/oblivious-dns-message" \
--data-binary "garbage-forwarded-end-to-end" \
"http://127.0.0.1:$RELAY_PORT/relay?targethost=odoh.cloudflare-dns.com&targetpath=/dns-query")
check "Relay forwards to target (target rejects garbage → 400)" \
"400" \
"$HAPPY_STATUS"
echo ""
echo "=== Guards ==="
check "Missing content-type → 415" \
"415" \
"$(curl -sS -o /dev/null -w '%{http_code}' -X POST --data-binary 'x' \
'http://127.0.0.1:'$RELAY_PORT'/relay?targethost=odoh.cloudflare-dns.com&targetpath=/dns-query')"
check "Oversized body (>4 KiB) → 413" \
"413" \
"$(head -c 5000 /dev/urandom | curl -sS -o /dev/null -w '%{http_code}' -X POST \
-H 'Content-Type: application/oblivious-dns-message' --data-binary @- \
'http://127.0.0.1:'$RELAY_PORT'/relay?targethost=odoh.cloudflare-dns.com&targetpath=/dns-query')"
check "Invalid targethost (no dot) → 400" \
"400" \
"$(curl -sS -o /dev/null -w '%{http_code}' -X POST \
-H 'Content-Type: application/oblivious-dns-message' --data-binary 'x' \
'http://127.0.0.1:'$RELAY_PORT'/relay?targethost=invalid&targetpath=/dns-query')"
echo ""
echo "=== Counters ==="
HEALTH=$(curl -sf "http://127.0.0.1:$RELAY_PORT/health")
check "Relay counted at least one forwarded_ok" \
"[1-9]" \
"$(echo "$HEALTH" | grep 'forwarded_ok' | awk '{print $2}')"
check "Relay counted at least one rejected_bad_request" \
"[1-9]" \
"$(echo "$HEALTH" | grep 'rejected_bad_request' | awk '{print $2}')"
kill "$NUMA_PID" 2>/dev/null || true
wait "$NUMA_PID" 2>/dev/null || true
sleep 1
fi # end Suite 9
# Summary
echo ""
TOTAL=$((PASSED + FAILED))

View File

@@ -1,101 +0,0 @@
#!/usr/bin/env bash
# Probe the public ODoH ecosystem.
#
# Source of truth: DNSCrypt's curated list at
# https://github.com/DNSCrypt/dnscrypt-resolvers/tree/master/v3
# - v3/odoh-servers.md (ODoH targets)
# - v3/odoh-relays.md (ODoH relays)
#
# As of commit 2025-09-16 ("odohrelay-crypto-sx seems to be the only ODoH
# relay left"), the full public ecosystem is 4 targets + 1 relay. Re-run this
# script against the upstream list before making any "only N public relays"
# claim publicly.
#
# Usage: ./tests/probe-odoh-ecosystem.sh
set -uo pipefail
GREEN="\033[32m"
RED="\033[31m"
YELLOW="\033[33m"
DIM="\033[90m"
RESET="\033[0m"
UP=0
DOWN=0
probe_target() {
local name="$1"
local host="$2"
local url="https://${host}/.well-known/odohconfigs"
local start=$(date +%s%N)
local headers
headers=$(curl -sS -o /tmp/odoh-probe-body -D - --max-time 5 -A "numa-odoh-probe/0.1" "$url" 2>&1) || {
DOWN=$((DOWN + 1))
printf " ${RED}${RESET} %-25s ${DIM}unreachable${RESET}\n" "$name"
return
}
local elapsed_ms=$((($(date +%s%N) - start) / 1000000))
local status
status=$(echo "$headers" | head -1 | awk '{print $2}')
local ctype
ctype=$(echo "$headers" | grep -i '^content-type:' | head -1 | tr -d '\r')
local size
size=$(stat -f%z /tmp/odoh-probe-body 2>/dev/null || stat -c%s /tmp/odoh-probe-body 2>/dev/null || echo 0)
if [[ "$status" == "200" ]] && [[ "$size" -gt 0 ]]; then
UP=$((UP + 1))
printf " ${GREEN}${RESET} %-25s ${DIM}%4dms %s bytes %s${RESET}\n" "$name" "$elapsed_ms" "$size" "$ctype"
else
DOWN=$((DOWN + 1))
printf " ${RED}${RESET} %-25s ${DIM}status=%s size=%s${RESET}\n" "$name" "$status" "$size"
fi
rm -f /tmp/odoh-probe-body
}
probe_relay() {
# Relays don't expose /.well-known/odohconfigs — we just verify TLS reachability
# and that the endpoint responds to a malformed POST with an HTTP error
# (indicating the relay path exists). A real ODoH validation requires HPKE.
local name="$1"
local url="$2"
local start=$(date +%s%N)
local status
status=$(curl -sS -o /dev/null -w "%{http_code}" --max-time 5 -A "numa-odoh-probe/0.1" \
-X POST -H "Content-Type: application/oblivious-dns-message" \
--data-binary "" "$url" 2>&1) || {
DOWN=$((DOWN + 1))
printf " ${RED}${RESET} %-25s ${DIM}unreachable${RESET}\n" "$name"
return
}
local elapsed_ms=$((($(date +%s%N) - start) / 1000000))
# Any 2xx or 4xx means the endpoint is live (TLS works, HTTP responded).
# 5xx or 000 (curl failure) means broken.
if [[ "$status" =~ ^[24] ]]; then
UP=$((UP + 1))
printf " ${GREEN}${RESET} %-25s ${DIM}%4dms status=%s (endpoint live)${RESET}\n" "$name" "$elapsed_ms" "$status"
else
DOWN=$((DOWN + 1))
printf " ${RED}${RESET} %-25s ${DIM}status=%s${RESET}\n" "$name" "$status"
fi
}
echo "ODoH targets:"
probe_target "Cloudflare" "odoh.cloudflare-dns.com"
probe_target "crypto.sx" "odoh.crypto.sx"
probe_target "Snowstorm" "dope.snowstorm.love"
probe_target "Tiarap" "doh.tiarap.org"
echo
echo "ODoH relays:"
probe_relay "Frank Denis (Fastly)" "https://odoh-relay.edgecompute.app/proxy"
echo
TOTAL=$((UP + DOWN))
if [[ "$DOWN" -eq 0 ]]; then
printf "${GREEN}All %d endpoints up${RESET}\n" "$TOTAL"
exit 0
else
printf "${YELLOW}%d/%d up, %d down${RESET}\n" "$UP" "$TOTAL" "$DOWN"
exit 1
fi