Compare commits
123 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1f6bdff8f8 | ||
|
|
643d6b01e1 | ||
|
|
17c8e70aa3 | ||
|
|
389ac09907 | ||
|
|
5308e9648c | ||
|
|
819614fa7d | ||
|
|
fab8b698d8 | ||
|
|
a6f23a5ddb | ||
|
|
27dfaab360 | ||
|
|
b2ed2e6aec | ||
|
|
79ecb73d87 | ||
|
|
bf5565ac26 | ||
|
|
679b346246 | ||
|
|
039254280b | ||
|
|
1b2f682026 | ||
|
|
82cc588c67 | ||
|
|
bc54ea930f | ||
|
|
7001ba2e51 | ||
|
|
6887c8e02e | ||
|
|
7f52bd8a32 | ||
|
|
c98e6c3ea9 | ||
|
|
186e709373 | ||
|
|
bacc49667a | ||
|
|
7d0fe19462 | ||
|
|
1632fc36f2 | ||
|
|
0a73cdf4db | ||
|
|
2b0c4e3d5e | ||
|
|
357c710ec4 | ||
|
|
7742858b7b | ||
|
|
1239ed0e72 | ||
|
|
cb54ab3dfc | ||
|
|
aa8923b2c6 | ||
|
|
14efc51340 | ||
|
|
e4350ae81c | ||
|
|
766935ec97 | ||
|
|
efe3669540 | ||
|
|
ad34fe2d9e | ||
|
|
80fcfd10ae | ||
|
|
e4a8893214 | ||
|
|
d979cd9505 | ||
|
|
8c421b9fa3 | ||
|
|
ad7884f2f6 | ||
|
|
6a70ab0f1b | ||
|
|
0b883d1c0d | ||
|
|
7f46f6271e | ||
|
|
f3ca83246c | ||
|
|
da93a3cde3 | ||
|
|
98da440c84 | ||
|
|
4e5b88496c | ||
|
|
d5f7ce9e2d | ||
|
|
cc704be590 | ||
|
|
ff1200eb10 | ||
|
|
49535568d9 | ||
|
|
cd1beedf38 | ||
|
|
be52e5c305 | ||
|
|
669498e85f | ||
|
|
d325b92e44 | ||
|
|
261fd2e148 | ||
|
|
30e46e549c | ||
|
|
ac49658c2b | ||
|
|
5265f571d0 | ||
|
|
0ebd924825 | ||
|
|
06d4e91cd2 | ||
|
|
71dbb138bc | ||
|
|
fbf3ca6d11 | ||
|
|
a84f2e7f1d | ||
|
|
7aee90c99b | ||
|
|
1304b1c02c | ||
|
|
59397ecce4 | ||
|
|
f849a4d65f | ||
|
|
962b400f4c | ||
|
|
1f4063d5db | ||
|
|
c6bc307f0a | ||
|
|
c5208e934d | ||
|
|
d69b79451e | ||
|
|
0b194256a9 | ||
|
|
e0c1997056 | ||
|
|
9e07064c94 | ||
|
|
43cedf11f7 | ||
|
|
cd6a54c652 | ||
|
|
9f89627c5a | ||
|
|
e7e5c173f2 | ||
|
|
c6b35045d8 | ||
|
|
10f1602803 | ||
|
|
41a97bb930 | ||
|
|
c4e733c8ef | ||
|
|
4020776b8e | ||
|
|
763ba1de91 | ||
|
|
51dc06690e | ||
|
|
fb89b78226 | ||
|
|
64c4d146ec | ||
|
|
9c290b6ef4 | ||
|
|
c836903db5 | ||
|
|
5e5a6544bc | ||
|
|
227af04564 | ||
|
|
4c58ff49b0 | ||
|
|
d261e8bc86 | ||
|
|
2de337ac36 | ||
|
|
5810ee5aac | ||
|
|
06850de728 | ||
|
|
995916d01b | ||
|
|
7aca3b1991 | ||
|
|
b7d64a9707 | ||
|
|
c333705a0e | ||
|
|
50d17ae118 | ||
|
|
5495107c9e | ||
|
|
02e83ccd72 | ||
|
|
ccbf893b92 | ||
|
|
cd90b50d68 | ||
|
|
5866ff1ba1 | ||
|
|
9a3de2f231 | ||
|
|
6fdadd637c | ||
|
|
9041ccc2e1 | ||
|
|
c9f1d98f45 | ||
|
|
6a8e47bbb5 | ||
|
|
de50720834 | ||
|
|
216ec76640 | ||
|
|
08aaebec7e | ||
|
|
3e40f795da | ||
|
|
8dcebaaca6 | ||
|
|
a48809fc25 | ||
|
|
e94e75101f | ||
|
|
32f50cd254 |
19
.SRCINFO
Normal file
19
.SRCINFO
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
pkgbase = numa-git
|
||||||
|
pkgdesc = Portable DNS resolver in Rust — .numa local domains, ad blocking, developer overrides, DNS-over-HTTPS
|
||||||
|
pkgver = 0.10.1.r0.g0000000
|
||||||
|
pkgrel = 1
|
||||||
|
url = https://github.com/razvandimescu/numa
|
||||||
|
arch = x86_64
|
||||||
|
license = MIT
|
||||||
|
options = !lto
|
||||||
|
makedepends = cargo
|
||||||
|
makedepends = git
|
||||||
|
depends = gcc-libs
|
||||||
|
depends = glibc
|
||||||
|
provides = numa
|
||||||
|
conflicts = numa
|
||||||
|
backup = etc/numa.toml
|
||||||
|
source = numa::git+https://github.com/razvandimescu/numa.git
|
||||||
|
sha256sums = SKIP
|
||||||
|
|
||||||
|
pkgname = numa-git
|
||||||
18
.github/workflows/ci.yml
vendored
18
.github/workflows/ci.yml
vendored
@@ -27,6 +27,17 @@ jobs:
|
|||||||
- name: audit
|
- name: audit
|
||||||
run: cargo install cargo-audit && cargo audit
|
run: cargo install cargo-audit && cargo audit
|
||||||
|
|
||||||
|
check-macos:
|
||||||
|
runs-on: macos-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: dtolnay/rust-toolchain@stable
|
||||||
|
- uses: Swatinem/rust-cache@v2
|
||||||
|
- name: clippy
|
||||||
|
run: cargo clippy -- -D warnings
|
||||||
|
- name: test
|
||||||
|
run: cargo test
|
||||||
|
|
||||||
check-windows:
|
check-windows:
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
steps:
|
steps:
|
||||||
@@ -37,3 +48,10 @@ jobs:
|
|||||||
run: cargo build
|
run: cargo build
|
||||||
- name: clippy
|
- name: clippy
|
||||||
run: cargo clippy -- -D warnings
|
run: cargo clippy -- -D warnings
|
||||||
|
- name: test
|
||||||
|
run: cargo test
|
||||||
|
- name: Upload binary
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: numa-windows-x86_64
|
||||||
|
path: target/debug/numa.exe
|
||||||
|
|||||||
76
.github/workflows/homebrew-bump.yml
vendored
Normal file
76
.github/workflows/homebrew-bump.yml
vendored
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
name: Bump Homebrew Tap
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types: [published]
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
version:
|
||||||
|
description: 'Version to bump (e.g. 0.10.0 or v0.10.0)'
|
||||||
|
required: true
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
bump:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Determine version
|
||||||
|
id: ver
|
||||||
|
run: |
|
||||||
|
if [ "${{ github.event_name }}" = "release" ]; then
|
||||||
|
V="${{ github.event.release.tag_name }}"
|
||||||
|
else
|
||||||
|
V="${{ github.event.inputs.version }}"
|
||||||
|
fi
|
||||||
|
V="${V#v}"
|
||||||
|
echo "version=$V" >> "$GITHUB_OUTPUT"
|
||||||
|
|
||||||
|
- name: Fetch sha256 checksums from release assets
|
||||||
|
id: shas
|
||||||
|
env:
|
||||||
|
V: ${{ steps.ver.outputs.version }}
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
base="https://github.com/razvandimescu/numa/releases/download/v${V}"
|
||||||
|
for t in macos-aarch64 macos-x86_64 linux-aarch64 linux-x86_64; do
|
||||||
|
sha=$(curl -fsSL "${base}/numa-${t}.tar.gz.sha256" | awk '{print $1}')
|
||||||
|
if [ -z "$sha" ]; then
|
||||||
|
echo "ERROR: failed to fetch sha256 for $t" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
key=$(echo "$t" | tr '[:lower:]-' '[:upper:]_')
|
||||||
|
echo "SHA_${key}=${sha}" >> "$GITHUB_ENV"
|
||||||
|
done
|
||||||
|
|
||||||
|
- name: Clone homebrew-tap
|
||||||
|
env:
|
||||||
|
HOMEBREW_TAP_GITHUB_TOKEN: ${{ secrets.HOMEBREW_TAP_GITHUB_TOKEN }}
|
||||||
|
run: |
|
||||||
|
git clone "https://x-access-token:${HOMEBREW_TAP_GITHUB_TOKEN}@github.com/razvandimescu/homebrew-tap.git" tap
|
||||||
|
|
||||||
|
- name: Update formula
|
||||||
|
env:
|
||||||
|
VERSION: ${{ steps.ver.outputs.version }}
|
||||||
|
run: |
|
||||||
|
python3 scripts/update-homebrew-formula.py tap/numa.rb
|
||||||
|
echo "--- updated numa.rb ---"
|
||||||
|
cat tap/numa.rb
|
||||||
|
|
||||||
|
- name: Commit and push
|
||||||
|
working-directory: tap
|
||||||
|
env:
|
||||||
|
V: ${{ steps.ver.outputs.version }}
|
||||||
|
run: |
|
||||||
|
if git diff --quiet; then
|
||||||
|
echo "numa.rb already at v${V}, nothing to commit"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
git config user.name "github-actions[bot]"
|
||||||
|
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||||
|
git add numa.rb
|
||||||
|
git commit -m "chore: bump numa to v${V}"
|
||||||
|
git push origin main
|
||||||
159
.github/workflows/publish-aur.yml
vendored
Normal file
159
.github/workflows/publish-aur.yml
vendored
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
# `publish-aur.yml` - Arch Linux AUR Package Workflow
|
||||||
|
# --------------------
|
||||||
|
# This workflow automates the validation and publishing of the 'numa-git' package to the
|
||||||
|
# Arch User Repository (AUR). The AUR is a community-driven repository for Arch Linux users.
|
||||||
|
#
|
||||||
|
# Workflow Overview:
|
||||||
|
# 1. Validate: Builds and tests the package for Arch Linux x86_64 using a clean
|
||||||
|
# Arch Linux container.
|
||||||
|
# 2. Audit: Checks Rust dependencies for known security vulnerabilities using
|
||||||
|
# 'cargo-audit'.
|
||||||
|
# 3. Publish: If on the 'main' branch, it pushes the updated PKGBUILD and
|
||||||
|
# .SRCINFO to the AUR.
|
||||||
|
#
|
||||||
|
# Security Best Practices:
|
||||||
|
# - SHA Pinning: All GitHub Actions are pinned to a full-length commit SHA (e.g., v6.0.2 @ SHA)
|
||||||
|
# to ensure the code is immutable and protects against supply-chain attacks where a tag
|
||||||
|
# might be maliciously moved to a compromised commit.
|
||||||
|
# - SSH Hygiene: Uses ssh-agent to keep the private key in memory rather than on disk.
|
||||||
|
# - Audit: Runs 'cargo audit' to prevent publishing known vulnerable dependencies.
|
||||||
|
|
||||||
|
name: Publish - Arch Linux AUR Package
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [main]
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
# The 'validate' job ensures that the PKGBUILD is correct and the software builds/tests
|
||||||
|
# successfully on Arch Linux before we attempt to publish it.
|
||||||
|
validate:
|
||||||
|
name: Validate PKGBUILD (${{ matrix.arch }})
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
arch: [x86_64]
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||||
|
|
||||||
|
- name: Build and Test Package
|
||||||
|
timeout-minutes: 60
|
||||||
|
env:
|
||||||
|
AUR_PKGNAME: ${{ secrets.AUR_PACKAGE_NAME }}
|
||||||
|
run: |
|
||||||
|
# We use a temporary directory to avoid Docker permission issues with the workspace.
|
||||||
|
mkdir -p build-dir
|
||||||
|
cp PKGBUILD build-dir/
|
||||||
|
|
||||||
|
docker run --rm -v $PWD/build-dir:/pkg -w /pkg archlinux:latest /bin/bash -c "
|
||||||
|
# ARCH LINUX SECURITY REQUIREMENT:
|
||||||
|
# 'makepkg' (the tool that builds Arch packages) refuses to run as root for safety.
|
||||||
|
# We must create a standard user and give them sudo access.
|
||||||
|
|
||||||
|
# Install build-time dependencies.
|
||||||
|
# 'base-devel' includes essential tools like gcc, make, and binutils.
|
||||||
|
# Install 'rust' directly to avoid the interactive virtual-package
|
||||||
|
# prompt for 'cargo' on current Arch images.
|
||||||
|
pacman -Syu --noconfirm --needed base-devel rust git sudo cargo-audit
|
||||||
|
|
||||||
|
useradd -m builduser
|
||||||
|
chown -R builduser:builduser /pkg
|
||||||
|
|
||||||
|
# Allow the build user to install dependencies during the build process.
|
||||||
|
echo 'builduser ALL=(ALL) NOPASSWD: ALL' > /etc/sudoers.d/builduser
|
||||||
|
|
||||||
|
# Fetch the source tree first so pkgver() and cargo-audit have a
|
||||||
|
# real Cargo.lock to inspect.
|
||||||
|
sudo -u builduser makepkg -o --nobuild --nocheck --nodeps --noprepare
|
||||||
|
|
||||||
|
# SECURITY AUDIT:
|
||||||
|
# Fail early if any dependencies have known security vulnerabilities.
|
||||||
|
sudo -u builduser sh -lc 'cd /pkg/src/numa && cargo audit'
|
||||||
|
|
||||||
|
# BUILD & TEST:
|
||||||
|
# 'makepkg -s' will:
|
||||||
|
# 1. Download source files (cloning this repo)
|
||||||
|
# 2. Run prepare(), build(), and check() (running cargo test)
|
||||||
|
# 3. Create the final .pkg.tar.zst package
|
||||||
|
sudo -u builduser makepkg -s --noconfirm
|
||||||
|
"
|
||||||
|
|
||||||
|
# The 'publish' job updates the AUR repository with our latest PKGBUILD and .SRCINFO.
|
||||||
|
publish:
|
||||||
|
name: Publish to AUR
|
||||||
|
needs: validate
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||||
|
|
||||||
|
# Securely configure SSH for AUR access.
|
||||||
|
- name: Configure SSH
|
||||||
|
run: |
|
||||||
|
mkdir -p ~/.ssh
|
||||||
|
# Official AUR Ed25519 fingerprint (prevents Man-in-the-Middle attacks).
|
||||||
|
echo "aur.archlinux.org ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEuBKrPzbawxA/k2g6NcyV5jmqwJ2s+zpgZGZ7tpLIcN" >> ~/.ssh/known_hosts
|
||||||
|
|
||||||
|
# Use ssh-agent to keep the private key in memory rather than writing it to disk.
|
||||||
|
eval $(ssh-agent -s)
|
||||||
|
echo "${{ secrets.AUR_SSH_PRIVATE_KEY }}" | tr -d '\r' | ssh-add -
|
||||||
|
|
||||||
|
# Export the agent socket so subsequent 'git' commands can use it.
|
||||||
|
echo "SSH_AUTH_SOCK=$SSH_AUTH_SOCK" >> $GITHUB_ENV
|
||||||
|
echo "SSH_AGENT_PID=$SSH_AGENT_PID" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Push to AUR
|
||||||
|
env:
|
||||||
|
AUR_PKGNAME: ${{ secrets.AUR_PACKAGE_NAME }}
|
||||||
|
AUR_EMAIL: ${{ secrets.AUR_EMAIL }}
|
||||||
|
AUR_USER: ${{ secrets.AUR_USERNAME }}
|
||||||
|
run: |
|
||||||
|
# AUR repos are managed via Git. Each package has its own repo at:
|
||||||
|
# ssh://aur@aur.archlinux.org/<package-name>.git
|
||||||
|
git clone ssh://aur@aur.archlinux.org/$AUR_PKGNAME.git aur-repo
|
||||||
|
|
||||||
|
cp PKGBUILD aur-repo/
|
||||||
|
cd aur-repo
|
||||||
|
|
||||||
|
# METADATA GENERATION:
|
||||||
|
# '.SRCINFO' is a machine-readable version of the PKGBUILD.
|
||||||
|
# We must run this as a non-root user ('builduser') inside the container.
|
||||||
|
docker run --rm -v $(pwd):/pkg archlinux:latest /bin/bash -c "
|
||||||
|
pacman -Syu --noconfirm --needed binutils git sudo
|
||||||
|
useradd -m builduser
|
||||||
|
chown -R builduser:builduser /pkg
|
||||||
|
cd /pkg
|
||||||
|
sudo -u builduser git config --global --add safe.directory '*'
|
||||||
|
# makepkg -od fetches the source first so pkgver() can calculate the version.
|
||||||
|
# --noprepare skips the prepare() function, which invokes cargo and would
|
||||||
|
# otherwise require a full rust toolchain in this metadata-only container.
|
||||||
|
# pkgver() runs before prepare(), so .SRCINFO still gets the correct version.
|
||||||
|
sudo -u builduser makepkg -od --noprepare && sudo -u builduser makepkg --printsrcinfo > .SRCINFO
|
||||||
|
"
|
||||||
|
|
||||||
|
# Reclaim ownership: the in-container 'chown -R builduser:builduser /pkg'
|
||||||
|
# propagates through the bind mount, leaving .git/ owned by the container's
|
||||||
|
# builduser UID. Without this, subsequent 'git config' on the host fails with
|
||||||
|
# "could not lock config file .git/config: Permission denied".
|
||||||
|
sudo chown -R "$(id -u):$(id -g)" .
|
||||||
|
|
||||||
|
# Set the commit identity using secrets for security and auditability.
|
||||||
|
git config user.name "$AUR_USER"
|
||||||
|
git config user.email "$AUR_EMAIL"
|
||||||
|
|
||||||
|
# Stage and commit both the human-readable PKGBUILD and machine-readable .SRCINFO.
|
||||||
|
git add PKGBUILD .SRCINFO
|
||||||
|
|
||||||
|
if ! git diff --cached --quiet; then
|
||||||
|
git commit -m "chore: update PKGBUILD to ${{ github.sha }}"
|
||||||
|
git push origin master
|
||||||
|
else
|
||||||
|
echo "No changes to commit (metadata and PKGBUILD are already up-to-date)."
|
||||||
|
fi
|
||||||
8
.github/workflows/release.yml
vendored
8
.github/workflows/release.yml
vendored
@@ -103,6 +103,14 @@ jobs:
|
|||||||
- name: Create Release
|
- name: Create Release
|
||||||
uses: softprops/action-gh-release@v2
|
uses: softprops/action-gh-release@v2
|
||||||
with:
|
with:
|
||||||
|
# Use a PAT (not the default GITHUB_TOKEN) so the resulting
|
||||||
|
# `release: published` event propagates to downstream workflows
|
||||||
|
# like homebrew-bump.yml. Events triggered by GITHUB_TOKEN are
|
||||||
|
# deliberately not propagated by GitHub Actions to prevent
|
||||||
|
# infinite loops; PAT-authored events are the documented escape
|
||||||
|
# hatch. Reusing HOMEBREW_TAP_GITHUB_TOKEN (already a PAT used
|
||||||
|
# by homebrew-bump.yml itself) keeps the secret surface flat.
|
||||||
|
token: ${{ secrets.HOMEBREW_TAP_GITHUB_TOKEN }}
|
||||||
generate_release_notes: true
|
generate_release_notes: true
|
||||||
files: |
|
files: |
|
||||||
*.tar.gz
|
*.tar.gz
|
||||||
|
|||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,4 +1,5 @@
|
|||||||
/target
|
/target
|
||||||
|
/build-dir
|
||||||
CLAUDE.md
|
CLAUDE.md
|
||||||
docs/
|
docs/
|
||||||
site/blog/posts/
|
site/blog/posts/
|
||||||
|
|||||||
12
Cargo.lock
generated
12
Cargo.lock
generated
@@ -1143,7 +1143,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "numa"
|
name = "numa"
|
||||||
version = "0.8.0"
|
version = "0.10.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"axum",
|
"axum",
|
||||||
@@ -1159,6 +1159,7 @@ dependencies = [
|
|||||||
"reqwest",
|
"reqwest",
|
||||||
"ring",
|
"ring",
|
||||||
"rustls",
|
"rustls",
|
||||||
|
"rustls-pemfile",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"socket2 0.5.10",
|
"socket2 0.5.10",
|
||||||
@@ -1546,6 +1547,15 @@ dependencies = [
|
|||||||
"zeroize",
|
"zeroize",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rustls-pemfile"
|
||||||
|
version = "2.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50"
|
||||||
|
dependencies = [
|
||||||
|
"rustls-pki-types",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rustls-pki-types"
|
name = "rustls-pki-types"
|
||||||
version = "1.14.0"
|
version = "1.14.0"
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "numa"
|
name = "numa"
|
||||||
version = "0.8.0"
|
version = "0.10.2"
|
||||||
authors = ["razvandimescu <razvan@dimescu.com>"]
|
authors = ["razvandimescu <razvan@dimescu.com>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
description = "Portable DNS resolver in Rust — .numa local domains, ad blocking, developer overrides, DNS-over-HTTPS"
|
description = "Portable DNS resolver in Rust — .numa local domains, ad blocking, developer overrides, DNS-over-HTTPS"
|
||||||
@@ -29,6 +29,7 @@ rustls = "0.23"
|
|||||||
tokio-rustls = "0.26"
|
tokio-rustls = "0.26"
|
||||||
arc-swap = "1"
|
arc-swap = "1"
|
||||||
ring = "0.17"
|
ring = "0.17"
|
||||||
|
rustls-pemfile = "2.2.0"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
criterion = { version = "0.5", features = ["html_reports"] }
|
criterion = { version = "0.5", features = ["html_reports"] }
|
||||||
|
|||||||
@@ -13,5 +13,5 @@ RUN cargo build --release
|
|||||||
|
|
||||||
FROM alpine:3.20
|
FROM alpine:3.20
|
||||||
COPY --from=builder /app/target/release/numa /usr/local/bin/numa
|
COPY --from=builder /app/target/release/numa /usr/local/bin/numa
|
||||||
EXPOSE 53/udp 80/tcp 443/tcp 5380/tcp
|
EXPOSE 53/udp 80/tcp 443/tcp 853/tcp 5380/tcp
|
||||||
ENTRYPOINT ["numa"]
|
ENTRYPOINT ["numa"]
|
||||||
|
|||||||
62
PKGBUILD
Normal file
62
PKGBUILD
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
# Maintainer: razvandimescu <razvan@dimescu.com>
|
||||||
|
pkgname=numa-git
|
||||||
|
_pkgname=numa
|
||||||
|
pkgver=0.10.1.r0.g0000000 # Placeholder — pkgver() rewrites this on each makepkg run
|
||||||
|
pkgrel=1
|
||||||
|
pkgdesc="Portable DNS resolver in Rust — .numa local domains, ad blocking, developer overrides, DNS-over-HTTPS"
|
||||||
|
arch=('x86_64')
|
||||||
|
url="https://github.com/razvandimescu/numa"
|
||||||
|
license=('MIT')
|
||||||
|
options=('!lto')
|
||||||
|
depends=('gcc-libs' 'glibc')
|
||||||
|
makedepends=('cargo' 'git')
|
||||||
|
provides=("$_pkgname")
|
||||||
|
conflicts=("$_pkgname")
|
||||||
|
backup=('etc/numa.toml')
|
||||||
|
source=("$_pkgname::git+$url.git")
|
||||||
|
sha256sums=('SKIP')
|
||||||
|
|
||||||
|
pkgver() {
|
||||||
|
cd "$srcdir/$_pkgname"
|
||||||
|
( set -o pipefail
|
||||||
|
git describe --long --tags 2>/dev/null | sed 's/\([^-]*-g\)/r\1/;s/-/./g' ||
|
||||||
|
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
|
||||||
|
) | sed 's/^v//'
|
||||||
|
}
|
||||||
|
|
||||||
|
prepare() {
|
||||||
|
cd "$srcdir/$_pkgname"
|
||||||
|
# numa v0.10.1+ uses FHS-compliant paths on Linux by default
|
||||||
|
# (/var/lib/numa for data, journalctl for logs), so no source
|
||||||
|
# patching is needed. The earlier sed targeted /usr/local/bin/numa,
|
||||||
|
# which only appears in a comment in current main.
|
||||||
|
export RUSTUP_TOOLCHAIN=stable
|
||||||
|
cargo fetch --locked
|
||||||
|
}
|
||||||
|
|
||||||
|
build() {
|
||||||
|
cd "$srcdir/$_pkgname"
|
||||||
|
export RUSTUP_TOOLCHAIN=stable
|
||||||
|
cargo build --frozen --release
|
||||||
|
}
|
||||||
|
|
||||||
|
check() {
|
||||||
|
cd "$srcdir/$_pkgname"
|
||||||
|
export RUSTUP_TOOLCHAIN=stable
|
||||||
|
cargo test --frozen
|
||||||
|
}
|
||||||
|
|
||||||
|
package() {
|
||||||
|
cd "$srcdir/$_pkgname"
|
||||||
|
install -Dm755 "target/release/$_pkgname" "$pkgdir/usr/bin/$_pkgname"
|
||||||
|
|
||||||
|
# numa.service uses {{exe_path}} as a placeholder substituted by
|
||||||
|
# `numa install` at runtime via replace_exe_path(). For an AUR
|
||||||
|
# package install (no `numa install` step), we substitute it
|
||||||
|
# statically here so systemd gets a real ExecStart path.
|
||||||
|
sed 's|{{exe_path}}|/usr/bin/numa /etc/numa.toml|g' numa.service > numa.service.patched
|
||||||
|
install -Dm644 "numa.service.patched" "$pkgdir/usr/lib/systemd/system/numa.service"
|
||||||
|
|
||||||
|
install -Dm644 "numa.toml" "$pkgdir/etc/numa.toml"
|
||||||
|
install -Dm644 "LICENSE" "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
|
||||||
|
}
|
||||||
48
README.md
48
README.md
@@ -8,23 +8,42 @@
|
|||||||
|
|
||||||
A portable DNS resolver in a single binary. Block ads on any network, name your local services (`frontend.numa`), and override any hostname with auto-revert — all from your laptop, no cloud account or Raspberry Pi required.
|
A portable DNS resolver in a single binary. Block ads on any network, name your local services (`frontend.numa`), and override any hostname with auto-revert — all from your laptop, no cloud account or Raspberry Pi required.
|
||||||
|
|
||||||
Built from scratch in Rust. Zero DNS libraries. RFC 1035 wire protocol parsed by hand. Caching, ad blocking, and local service domains out of the box. Optional recursive resolution from root nameservers with full DNSSEC chain-of-trust validation. One ~8MB binary, everything embedded.
|
Built from scratch in Rust. Zero DNS libraries. RFC 1035 wire protocol parsed by hand. Caching, ad blocking, and local service domains out of the box. Optional recursive resolution from root nameservers with full DNSSEC chain-of-trust validation, plus a DNS-over-TLS listener for encrypted client connections (iOS Private DNS, systemd-resolved, etc.). One ~8MB binary, everything embedded.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## Quick Start
|
## Quick Start
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
# macOS
|
||||||
brew install razvandimescu/tap/numa
|
brew install razvandimescu/tap/numa
|
||||||
# or: cargo install numa
|
|
||||||
# or: curl -fsSL https://raw.githubusercontent.com/razvandimescu/numa/main/install.sh | sh
|
|
||||||
|
|
||||||
sudo numa # port 53 requires root
|
# Linux
|
||||||
|
curl -fsSL https://raw.githubusercontent.com/razvandimescu/numa/main/install.sh | sh
|
||||||
|
|
||||||
|
# Arch Linux (AUR)
|
||||||
|
yay -S numa-git
|
||||||
|
|
||||||
|
# Windows — download from GitHub Releases
|
||||||
|
# All platforms
|
||||||
|
cargo install numa
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo numa # run in foreground (port 53 requires root/admin)
|
||||||
```
|
```
|
||||||
|
|
||||||
Open the dashboard: **http://numa.numa** (or `http://localhost:5380`)
|
Open the dashboard: **http://numa.numa** (or `http://localhost:5380`)
|
||||||
|
|
||||||
Set as system DNS: `sudo numa install`
|
Set as system DNS:
|
||||||
|
|
||||||
|
| Platform | Install | Uninstall |
|
||||||
|
|----------|---------|-----------|
|
||||||
|
| macOS | `sudo numa install` | `sudo numa uninstall` |
|
||||||
|
| Linux | `sudo numa install` | `sudo numa uninstall` |
|
||||||
|
| Windows | `numa install` (admin) + reboot | `numa uninstall` (admin) + reboot |
|
||||||
|
|
||||||
|
On macOS and Linux, numa runs as a system service (launchd/systemd). On Windows, numa auto-starts on login via registry.
|
||||||
|
|
||||||
## Local Services
|
## Local Services
|
||||||
|
|
||||||
@@ -43,7 +62,20 @@ Add path-based routing (`app.numa/api → :5001`), share services across machine
|
|||||||
|
|
||||||
385K+ domains blocked via [Hagezi Pro](https://github.com/hagezi/dns-blocklists). Works on any network — coffee shops, hotels, airports. Travels with your laptop.
|
385K+ domains blocked via [Hagezi Pro](https://github.com/hagezi/dns-blocklists). Works on any network — coffee shops, hotels, airports. Travels with your laptop.
|
||||||
|
|
||||||
By default, Numa forwards to your existing system DNS — everything works as before, just with caching and ad blocking on top. For full privacy, set `mode = "recursive"` — Numa resolves directly from root nameservers. No upstream dependency, no single entity sees your full query pattern. DNSSEC validates the full chain of trust: RRSIG signatures, DNSKEY verification, DS delegation, NSEC/NSEC3 denial proofs. [Read how it works →](https://numa.rs/blog/posts/dnssec-from-scratch.html)
|
Three resolution modes:
|
||||||
|
|
||||||
|
- **`forward`** (default) — transparent proxy to your existing system DNS. Everything works as before, just with caching and ad blocking on top. Captive portals, VPNs, corporate DNS — all respected.
|
||||||
|
- **`recursive`** — resolve directly from root nameservers. No upstream dependency, no single entity sees your full query pattern. Add `[dnssec] enabled = true` for full chain-of-trust validation.
|
||||||
|
- **`auto`** — probe root servers on startup, recursive if reachable, encrypted DoH fallback if blocked.
|
||||||
|
|
||||||
|
DNSSEC validates the full chain of trust: RRSIG signatures, DNSKEY verification, DS delegation, NSEC/NSEC3 denial proofs. [Read how it works →](https://numa.rs/blog/posts/dnssec-from-scratch.html)
|
||||||
|
|
||||||
|
**DNS-over-TLS listener** (RFC 7858) — accept encrypted queries on port 853 from strict clients like iOS Private DNS, systemd-resolved, or stubby. Two modes:
|
||||||
|
|
||||||
|
- **Self-signed** (default) — numa generates a local CA automatically. `numa install` adds it to the system trust store on macOS, Linux (Debian/Ubuntu, Fedora/RHEL/SUSE, Arch), and Windows. On iOS, install the `.mobileconfig` from `numa setup-phone`. Firefox keeps its own NSS store and ignores the system one — trust the CA there manually if you need HTTPS for `.numa` services in Firefox.
|
||||||
|
- **Bring-your-own cert** — point `[dot] cert_path` / `key_path` at a publicly-trusted cert (e.g., Let's Encrypt via DNS-01 challenge on a domain pointing at your numa instance). Clients connect without any trust-store setup — same UX as AdGuard Home or Cloudflare `1.1.1.1`.
|
||||||
|
|
||||||
|
ALPN `"dot"` is advertised and enforced in both modes; a handshake with mismatched ALPN is rejected as a cross-protocol confusion defense.
|
||||||
|
|
||||||
## LAN Discovery
|
## LAN Discovery
|
||||||
|
|
||||||
@@ -74,7 +106,8 @@ From Machine B: `curl http://api.numa` → proxied to Machine A's port 8000. Ena
|
|||||||
| Ad blocking | Yes | Yes | — | 385K+ domains |
|
| Ad blocking | Yes | Yes | — | 385K+ domains |
|
||||||
| Web admin UI | Full | Full | — | Dashboard |
|
| Web admin UI | Full | Full | — | Dashboard |
|
||||||
| Encrypted upstream (DoH) | Needs cloudflared | Yes | — | Native |
|
| Encrypted upstream (DoH) | Needs cloudflared | Yes | — | Native |
|
||||||
| Portable (laptop) | No (appliance) | No (appliance) | Server | Single binary |
|
| Encrypted clients (DoT listener) | Needs stunnel sidecar | Yes | Yes | Native (RFC 7858) |
|
||||||
|
| Portable (laptop) | No (appliance) | No (appliance) | Server | Single binary, macOS/Linux/Windows |
|
||||||
| Community maturity | 56K stars, 10 years | 33K stars | 20 years | New |
|
| Community maturity | 56K stars, 10 years | 33K stars | 20 years | New |
|
||||||
|
|
||||||
## Performance
|
## Performance
|
||||||
@@ -94,6 +127,7 @@ From Machine B: `curl http://api.numa` → proxied to Machine A's port 8000. Ena
|
|||||||
- [x] `.numa` local domains — auto TLS, path routing, WebSocket proxy
|
- [x] `.numa` local domains — auto TLS, path routing, WebSocket proxy
|
||||||
- [x] LAN service discovery — mDNS, cross-machine DNS + proxy
|
- [x] LAN service discovery — mDNS, cross-machine DNS + proxy
|
||||||
- [x] DNS-over-HTTPS — encrypted upstream
|
- [x] DNS-over-HTTPS — encrypted upstream
|
||||||
|
- [x] DNS-over-TLS listener — encrypted client connections (RFC 7858, ALPN strict)
|
||||||
- [x] Recursive resolution + DNSSEC — chain-of-trust, NSEC/NSEC3
|
- [x] Recursive resolution + DNSSEC — chain-of-trust, NSEC/NSEC3
|
||||||
- [x] SRTT-based nameserver selection
|
- [x] SRTT-based nameserver selection
|
||||||
- [ ] pkarr integration — self-sovereign DNS via Mainline DHT
|
- [ ] pkarr integration — self-sovereign DNS via Mainline DHT
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
<string>com.numa.dns</string>
|
<string>com.numa.dns</string>
|
||||||
<key>ProgramArguments</key>
|
<key>ProgramArguments</key>
|
||||||
<array>
|
<array>
|
||||||
<string>/usr/local/bin/numa</string>
|
<string>{{exe_path}}</string>
|
||||||
</array>
|
</array>
|
||||||
<key>RunAtLoad</key>
|
<key>RunAtLoad</key>
|
||||||
<true/>
|
<true/>
|
||||||
|
|||||||
@@ -1,618 +0,0 @@
|
|||||||
# Launch Drafts
|
|
||||||
|
|
||||||
## Lessons Learned
|
|
||||||
|
|
||||||
**r/selfhosted** (0 upvotes, hostile) — "replaces Pi-hole" framing triggered
|
|
||||||
defensive comparisons. Audience protects their stack.
|
|
||||||
|
|
||||||
**r/programare** (26 upvotes, 22 comments, 12K views, 90.6% ratio) — worked
|
|
||||||
because it led with technical achievement. But: "what does this offer over
|
|
||||||
/etc/hosts?" and "mature solutions exist (dnsmasq, nginx)" were the top
|
|
||||||
objections. Tool-replacement angle falls flat with generalist audiences.
|
|
||||||
|
|
||||||
**r/webdev** — removed by moderators (self-promotion rules).
|
|
||||||
|
|
||||||
Key takeaways:
|
|
||||||
|
|
||||||
- Lead with what's *unique*, not what it *replaces*
|
|
||||||
- Write like explaining to a colleague, not marketing copy
|
|
||||||
- Pick ONE hook per community — don't try to be everything
|
|
||||||
- Triple-check the GitHub link works before posting
|
|
||||||
- Authentic tone > polished bullets
|
|
||||||
- Agree with "just use X" — then show what X can't do
|
|
||||||
- Don't oversell the pkarr/token vision — one sentence max
|
|
||||||
- Benchmark request from r/programare (Mydocalm) — warm follow-up content
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Launch Order
|
|
||||||
|
|
||||||
~~0. **r/programare** — done (2026-03-21). 12K views, 26 upvotes, 22 comments.~~
|
|
||||||
~~1. **r/webdev** — removed by moderators.~~
|
|
||||||
|
|
||||||
~~2. **r/degoogle** — done~~
|
|
||||||
~~3. **r/node** — done~~
|
|
||||||
|
|
||||||
4. **r/coolgithubprojects** — zero friction, just post the repo
|
|
||||||
~~5. **r/sideproject** — done (2026-03-29)~~
|
|
||||||
6. **r/dns** — technical DNS audience, recursive + DNSSEC angle
|
|
||||||
7. **Show HN** — Tuesday-Thursday, 9-10 AM ET
|
|
||||||
8. **r/rust** — same day as HN, technical deep-dive
|
|
||||||
9. **r/commandline** — 24h after HN
|
|
||||||
10. **r/selfhosted** — only if HN hits front page, lead with recursive + LAN discovery
|
|
||||||
11. **r/programare follow-up** — benchmark post + recursive/DNSSEC update
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Community Drafts
|
|
||||||
|
|
||||||
### Show HN
|
|
||||||
|
|
||||||
**Title (72 chars):**
|
|
||||||
Show HN: I built a DNS resolver from scratch in Rust – no DNS libraries
|
|
||||||
|
|
||||||
**Body:**
|
|
||||||
|
|
||||||
I wanted to understand how DNS actually works at the wire level, so I built
|
|
||||||
a resolver from scratch. No dns libraries — the RFC 1035 protocol (headers,
|
|
||||||
labels, compression pointers, record types) is all hand-parsed. It started
|
|
||||||
as a learning project and turned into something I use daily as my system DNS.
|
|
||||||
|
|
||||||
What it does today:
|
|
||||||
|
|
||||||
- **Forward mode by default** — transparent proxy to your existing DNS with
|
|
||||||
caching and ad blocking. Changes nothing about your network.
|
|
||||||
- **Full recursive resolver** — set `mode = "recursive"` and it resolves from
|
|
||||||
root nameservers. No upstream dependency. CNAME chasing, TLD priming, SRTT.
|
|
||||||
- **DNSSEC validation** — chain-of-trust verification from root KSK.
|
|
||||||
RSA/SHA-256, ECDSA P-256, Ed25519. Sets the AD bit on verified responses.
|
|
||||||
- **Ad blocking** — ~385K+ domains via Hagezi Pro, works on any network
|
|
||||||
- **DNS-over-HTTPS** — encrypted upstream (Quad9, Cloudflare, or any
|
|
||||||
provider) as an alternative to recursive mode
|
|
||||||
- **`.numa` local domains** — register `frontend.numa → localhost:5173` and
|
|
||||||
it creates both the DNS record and an HTTP/HTTPS reverse proxy with
|
|
||||||
auto-generated TLS certs. WebSocket passthrough works (Vite HMR).
|
|
||||||
- **LAN service discovery** — run Numa on two machines, they find each other
|
|
||||||
via UDP multicast. Zero config.
|
|
||||||
- **Developer overrides** — point any hostname to any IP, auto-reverts
|
|
||||||
after N minutes. REST API for scripting.
|
|
||||||
|
|
||||||
Single binary, macOS + Linux. `sudo numa install` and it's your system DNS —
|
|
||||||
forward mode by default, recursive when you're ready.
|
|
||||||
|
|
||||||
The interesting technical bits: the recursive resolver walks root → TLD →
|
|
||||||
authoritative with iterative queries, caching NS/DS/DNSKEY records at each
|
|
||||||
hop. DNSSEC validation verifies RRSIG signatures against DNSKEY, walks the
|
|
||||||
chain via DS records up to the hardcoded root trust anchor. ECDSA P-256
|
|
||||||
verification takes 174ns (benchmarked with criterion). Cold-cache validation
|
|
||||||
for a new domain is ~90ms, with only 1 network fetch needed (TLD chain is
|
|
||||||
pre-warmed on startup). SRTT-based nameserver selection learns which
|
|
||||||
servers respond fastest — average recursive query drops from 2.8s to
|
|
||||||
237ms after warmup (12x).
|
|
||||||
|
|
||||||
It also handles hostile networks: if your ISP blocks UDP port 53,
|
|
||||||
Numa detects this after 3 failures and switches all
|
|
||||||
queries to TCP automatically. Resets when you change networks. RFC 7816
|
|
||||||
query minimization means root servers only see the TLD, not your full
|
|
||||||
query.
|
|
||||||
|
|
||||||
The DNS cache adjusts TTLs on read (remaining time, not original). Each
|
|
||||||
query is an async tokio task. EDNS0 with DO bit and 1232-byte payload
|
|
||||||
(DNS Flag Day 2020).
|
|
||||||
|
|
||||||
Longer term I want to add pkarr/DHT resolution for self-sovereign DNS,
|
|
||||||
but that's future work.
|
|
||||||
|
|
||||||
https://github.com/razvandimescu/numa
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### r/rust
|
|
||||||
|
|
||||||
**Title:** I built a recursive DNS resolver from scratch in Rust — DNSSEC, no DNS libraries
|
|
||||||
|
|
||||||
**Body:**
|
|
||||||
|
|
||||||
I've been building a DNS resolver in Rust as a learning project that became
|
|
||||||
my daily driver. The entire DNS wire protocol is implemented by hand —
|
|
||||||
no `trust-dns`, no `hickory-dns`, no `simple-dns`. Headers, label sequences,
|
|
||||||
compression pointers, EDNS, all of it.
|
|
||||||
|
|
||||||
Some things I found interesting while building this:
|
|
||||||
|
|
||||||
**Recursive resolution** — iterative queries from root hints, walking
|
|
||||||
root → TLD → authoritative. CNAME chasing, A+AAAA glue extraction from
|
|
||||||
additional sections, referral depth limits. TLD priming pre-warms NS + DS +
|
|
||||||
DNSKEY for 34 gTLDs + EU ccTLDs on startup.
|
|
||||||
|
|
||||||
**DNSSEC chain-of-trust** — the most involved part. Verify RRSIG signatures
|
|
||||||
against DNSKEY, walk DS records up to the hardcoded root KSK (key tag 20326).
|
|
||||||
Uses `ring` for crypto: RSA/SHA-256, ECDSA P-256 (174ns per verify), Ed25519.
|
|
||||||
RFC 3110 RSA keys need converting to PKCS#1 DER for ring — wrote an ASN.1
|
|
||||||
encoder for that. RRSIG time validity checks per RFC 4035 §5.3.1.
|
|
||||||
|
|
||||||
**NSEC/NSEC3 denial proofs** — proving a name *doesn't* exist is harder than
|
|
||||||
proving it does. NSEC uses canonical DNS name ordering to prove gap coverage.
|
|
||||||
NSEC3 uses iterated SHA-1 hashing + base32hex + a 3-part closest encloser
|
|
||||||
proof (RFC 5155 §8.4). Both require authority-section RRSIG verification.
|
|
||||||
|
|
||||||
**Wire protocol parsing** — DNS uses a binary format with label compression
|
|
||||||
(pointers back into the packet via 2-byte offsets). Parsing this correctly
|
|
||||||
is surprisingly tricky because pointers can chain. I use a `BytePacketBuffer`
|
|
||||||
that tracks position and handles jumps.
|
|
||||||
|
|
||||||
**Performance** — TLD chain pre-warming means cold-cache DNSSEC validation
|
|
||||||
needs ~1 DNSKEY fetch (down from 5). Referral DS piggybacking caches DS
|
|
||||||
from authority sections during resolution. ECDSA P-256 verify: 174ns.
|
|
||||||
RSA/SHA-256: 10.9µs. DS verify: 257ns.
|
|
||||||
|
|
||||||
**LAN service discovery** — Numa instances on the same network find each
|
|
||||||
other via UDP multicast. The tricky part was self-filtering: I initially
|
|
||||||
filtered by IP, but two instances on the same host share an IP. Switched to
|
|
||||||
a per-process instance ID (`pid ^ nanos`).
|
|
||||||
|
|
||||||
**Auto TLS** — generates a local CA + per-service certs using `rcgen`.
|
|
||||||
`numa install` trusts the CA in the OS keychain. HTTPS proxy via `rustls` +
|
|
||||||
`tokio-rustls`.
|
|
||||||
|
|
||||||
Single binary, no runtime dependencies. Uses `tokio`, `axum` (REST
|
|
||||||
API/dashboard), `hyper` (reverse proxy), `ring` (DNSSEC crypto), `reqwest`
|
|
||||||
(DoH), `socket2` (multicast), `rcgen` + `rustls` (TLS).
|
|
||||||
|
|
||||||
Happy to discuss any of the implementation decisions.
|
|
||||||
|
|
||||||
https://github.com/razvandimescu/numa
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### r/degoogle
|
|
||||||
|
|
||||||
**Title:** I replaced cloud DNS with a recursive resolver — resolves from root, no upstream, DNSSEC
|
|
||||||
|
|
||||||
**Body:**
|
|
||||||
|
|
||||||
I wanted a DNS setup with zero cloud dependency. No NextDNS account,
|
|
||||||
no Cloudflare dashboard, no Pi-hole appliance, no upstream resolver seeing
|
|
||||||
my queries. Just a single binary on my laptop that resolves everything
|
|
||||||
itself.
|
|
||||||
|
|
||||||
Built one in Rust. What it does:
|
|
||||||
|
|
||||||
- **Forward mode by default** — transparent proxy to your existing DNS with
|
|
||||||
caching and ad blocking. Changes nothing about your network.
|
|
||||||
- **Recursive resolution** — set `mode = "recursive"` and it resolves directly
|
|
||||||
from root nameservers. No Quad9, no Cloudflare, no upstream dependency.
|
|
||||||
Each authoritative server only sees the query for its zone — no single
|
|
||||||
entity sees your full browsing pattern.
|
|
||||||
- **DNSSEC validation** — verifies the chain of trust from root KSK.
|
|
||||||
Responses are cryptographically verified — no one can tamper with them
|
|
||||||
in transit.
|
|
||||||
- **System-level ad blocking** — Hagezi Pro list (~385K+ domains),
|
|
||||||
works on any network. Coffee shop WiFi, airport, hotel.
|
|
||||||
- **ISP resistant** — in recursive mode, if UDP is blocked Numa switches
|
|
||||||
to TCP automatically. Or set `mode = "auto"` to probe on startup and
|
|
||||||
fall back to encrypted DoH if needed.
|
|
||||||
- **Query minimization** — root servers only see the TLD (.com), not
|
|
||||||
your full domain. RFC 7816.
|
|
||||||
- **Zero telemetry, zero cloud** — all data stays on your machine. No
|
|
||||||
account, no login, no analytics. Config is a single TOML file.
|
|
||||||
- **Local service naming** — bonus for developers: `https://app.numa`
|
|
||||||
instead of `localhost:3000`, with auto-generated TLS certs
|
|
||||||
|
|
||||||
Single binary, macOS + Linux. `sudo numa install` and it's your system
|
|
||||||
DNS — forward mode by default, recursive when you're ready. No Docker,
|
|
||||||
no PHP, no external dependencies.
|
|
||||||
|
|
||||||
The DNS wire protocol is parsed from scratch — no DNS libraries. You can
|
|
||||||
read every line of code.
|
|
||||||
|
|
||||||
```
|
|
||||||
brew install razvandimescu/tap/numa
|
|
||||||
# or
|
|
||||||
cargo install numa
|
|
||||||
```
|
|
||||||
|
|
||||||
MIT license. https://github.com/razvandimescu/numa
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### r/node
|
|
||||||
|
|
||||||
**Title:** I replaced localhost:5173 with frontend.numa — auto HTTPS, HMR works, no nginx
|
|
||||||
|
|
||||||
**Body:**
|
|
||||||
|
|
||||||
Running a Vite frontend on :5173, Express API on :3000, maybe docs on
|
|
||||||
:4000 — I could never remember which port was which. And CORS between
|
|
||||||
`localhost:5173` and `localhost:3000` is its own special hell.
|
|
||||||
|
|
||||||
How do you get named domains with HTTPS locally?
|
|
||||||
|
|
||||||
1. /etc/hosts + mkcert + nginx
|
|
||||||
2. dnsmasq + mkcert + Caddy
|
|
||||||
3. `sudo numa`
|
|
||||||
|
|
||||||
What it actually does:
|
|
||||||
|
|
||||||
```
|
|
||||||
curl -X POST localhost:5380/services \
|
|
||||||
-d '{"name":"frontend","target_port":5173}'
|
|
||||||
```
|
|
||||||
|
|
||||||
Now `https://frontend.numa` works in my browser. Green lock, valid cert.
|
|
||||||
|
|
||||||
- **HMR works** — Vite, webpack, socket.io all pass through the proxy.
|
|
||||||
No special config.
|
|
||||||
- **CORS solved** — `frontend.numa` and `api.numa` share the `.numa`
|
|
||||||
cookie domain. Cross-service auth just works.
|
|
||||||
- **Path routing** — `app.numa/api → :3000`, `app.numa/auth → :3001`.
|
|
||||||
Like nginx location blocks, zero config files.
|
|
||||||
|
|
||||||
No mkcert, no nginx.conf, no Caddyfile, no editing /etc/hosts.
|
|
||||||
Single binary, one command.
|
|
||||||
|
|
||||||
```
|
|
||||||
brew install razvandimescu/tap/numa
|
|
||||||
# or
|
|
||||||
cargo install numa
|
|
||||||
```
|
|
||||||
|
|
||||||
https://github.com/razvandimescu/numa
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### r/dns
|
|
||||||
|
|
||||||
**Title:** Numa — recursive DNS resolver from scratch in Rust, DNSSEC, no DNS libraries
|
|
||||||
|
|
||||||
**Body:**
|
|
||||||
|
|
||||||
I built a recursive DNS resolver where the entire wire protocol (RFC 1035 —
|
|
||||||
headers, label compression, EDNS0) is hand-parsed. No `hickory-dns`,
|
|
||||||
no `trust-dns`.
|
|
||||||
|
|
||||||
What it does:
|
|
||||||
- Full recursive resolver from root hints (iterative queries, no upstream needed)
|
|
||||||
- DNSSEC chain-of-trust validation (RSA/SHA-256, ECDSA P-256, Ed25519)
|
|
||||||
- EDNS0 with DO bit, 1232-byte payload (DNS Flag Day 2020 compliant)
|
|
||||||
- DNS-over-HTTPS as an alternative upstream mode
|
|
||||||
- Ad blocking (~385K+ domains via Hagezi Pro)
|
|
||||||
- Conditional forwarding (auto-detects Tailscale/VPN split-DNS)
|
|
||||||
- Local zones, ephemeral overrides with auto-revert via REST API
|
|
||||||
|
|
||||||
DNSSEC implementation: DNSKEY/DS/RRSIG record parsing, canonical wire format
|
|
||||||
for signed data, key tag computation (RFC 4034), DS digest verification.
|
|
||||||
Chain walks from zone → TLD → root trust anchor. ECDSA P-256 signature
|
|
||||||
verification in 174ns. TLD chain pre-warmed on startup. Referral DS records
|
|
||||||
piggybacked from authority sections during resolution.
|
|
||||||
|
|
||||||
NSEC/NSEC3 authenticated denial of existence: NXDOMAIN gap proofs, NSEC3
|
|
||||||
closest encloser proofs (3-part per RFC 5155), NODATA type absence proofs,
|
|
||||||
authority-section RRSIG verification. Iteration cap at 500 for NSEC3 DoS
|
|
||||||
prevention.
|
|
||||||
|
|
||||||
What it doesn't do (yet): no authoritative zone serving (AXFR/NOTIFY).
|
|
||||||
|
|
||||||
Single binary, macOS + Linux. MIT license.
|
|
||||||
|
|
||||||
https://github.com/razvandimescu/numa
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### Lobsters (invite-only)
|
|
||||||
|
|
||||||
**Title:** Numa — DNS resolver from scratch in Rust, no DNS libraries
|
|
||||||
|
|
||||||
**Body:**
|
|
||||||
|
|
||||||
I built a DNS resolver in Rust — RFC 1035 wire protocol parsed by hand,
|
|
||||||
no `trust-dns` or `hickory-dns`. Started as a learning project, became
|
|
||||||
my daily system DNS.
|
|
||||||
|
|
||||||
Beyond resolving, it does local `.numa` domains with auto HTTPS reverse
|
|
||||||
proxy (register `frontend.numa → localhost:5173`, get a green lock and
|
|
||||||
WebSocket passthrough), and LAN service discovery via UDP multicast —
|
|
||||||
two machines running Numa find each other's services automatically.
|
|
||||||
|
|
||||||
Implementation bits I found interesting: DNS label compression (chained
|
|
||||||
2-byte pointers back into the packet), browsers rejecting wildcard certs
|
|
||||||
under single-label TLDs (`*.numa` fails — need per-service SANs), and
|
|
||||||
`SO_REUSEPORT` on macOS for multiple processes binding the same multicast
|
|
||||||
port.
|
|
||||||
|
|
||||||
Set `mode = "recursive"` for DNSSEC-validated resolution from root
|
|
||||||
nameservers — no upstream, no middleman.
|
|
||||||
|
|
||||||
Single binary, macOS + Linux.
|
|
||||||
|
|
||||||
https://github.com/razvandimescu/numa
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### r/coolgithubprojects
|
|
||||||
|
|
||||||
**Post type:** Image post with `hero-demo.gif`, GitHub link in first comment.
|
|
||||||
|
|
||||||
**Title:** Numa — portable DNS resolver built from scratch in Rust. Ad blocking, local HTTPS domains, LAN discovery, recursive resolution with DNSSEC. Single binary.
|
|
||||||
|
|
||||||
**First comment (post immediately):**
|
|
||||||
|
|
||||||
https://github.com/razvandimescu/numa
|
|
||||||
|
|
||||||
```
|
|
||||||
brew install razvandimescu/tap/numa && sudo numa
|
|
||||||
```
|
|
||||||
|
|
||||||
No DNS libraries — RFC 1035 wire protocol parsed by hand.
|
|
||||||
Recursive resolution from root nameservers with full DNSSEC
|
|
||||||
chain-of-trust validation. 385K+ blocked ad domains.
|
|
||||||
.numa local domains with auto TLS and WebSocket proxy.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### r/sideproject
|
|
||||||
|
|
||||||
**Title:** I built a DNS resolver from scratch in Rust — it's now my daily system DNS
|
|
||||||
|
|
||||||
**Body:**
|
|
||||||
|
|
||||||
Last year I wanted to understand how DNS actually works at the wire
|
|
||||||
level, so I started parsing RFC 1035 packets by hand. No DNS libraries,
|
|
||||||
no trust-dns, no hickory-dns — just bytes and the spec.
|
|
||||||
|
|
||||||
It turned into something I use every day. What it does now:
|
|
||||||
|
|
||||||
- **Ad blocking** on any network (coffee shops, airports) — 385K+
|
|
||||||
domains blocked, travels with my laptop
|
|
||||||
- **Local service naming** — `https://frontend.numa` instead of
|
|
||||||
`localhost:5173`, with auto-generated TLS certs and WebSocket
|
|
||||||
passthrough for HMR
|
|
||||||
- **Recursive resolution** from root nameservers with DNSSEC
|
|
||||||
chain-of-trust validation — set `mode = "recursive"` for full
|
|
||||||
privacy, no upstream dependency, no single entity sees my query
|
|
||||||
pattern
|
|
||||||
- **LAN discovery** — two machines running Numa find each other's
|
|
||||||
services automatically via mDNS
|
|
||||||
|
|
||||||
Single Rust binary, ~8MB, MIT license. `sudo numa install` and it's your
|
|
||||||
system DNS — caching, ad blocking, .numa domains, zero config changes.
|
|
||||||
|
|
||||||
I wrote about the technical journey here:
|
|
||||||
- [I Built a DNS Resolver from Scratch](https://numa.rs/blog/posts/dns-from-scratch.html)
|
|
||||||
- [Implementing DNSSEC from Scratch](https://numa.rs/blog/posts/dnssec-from-scratch.html)
|
|
||||||
|
|
||||||
https://github.com/razvandimescu/numa
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### r/webdev (Showoff Saturday — posted 2026-03-28)
|
|
||||||
|
|
||||||
**Title:** I replaced localhost:5173 with frontend.numa — shared cookie domain, auto HTTPS, no nginx
|
|
||||||
|
|
||||||
**Body:**
|
|
||||||
|
|
||||||
The port numbers weren't the real problem. It was CORS between
|
|
||||||
`localhost:5173` and `localhost:3000`, Secure cookies not setting over
|
|
||||||
HTTP, and service workers requiring a secure context.
|
|
||||||
|
|
||||||
I built a DNS resolver that gives local services named domains under a
|
|
||||||
shared TLD:
|
|
||||||
|
|
||||||
```
|
|
||||||
curl -X POST localhost:5380/services \
|
|
||||||
-d '{"name":"frontend","target_port":5173}'
|
|
||||||
```
|
|
||||||
|
|
||||||
Now `https://frontend.numa` and `https://api.numa` share the `.numa`
|
|
||||||
cookie domain. Cross-service auth just works. Secure cookies set.
|
|
||||||
Service workers run.
|
|
||||||
|
|
||||||
What's under the hood:
|
|
||||||
- **Auto HTTPS** — generates a local CA + per-service TLS certs. Green
|
|
||||||
lock, no mkcert.
|
|
||||||
- **WebSocket passthrough** — Vite/webpack HMR goes through the proxy.
|
|
||||||
No special config.
|
|
||||||
- **Path routing** — `app.numa/api → :3000`, `app.numa/auth → :3001`.
|
|
||||||
Like nginx location blocks.
|
|
||||||
- **Also a full DNS resolver** — forward mode with caching and ad
|
|
||||||
blocking by default. Set `mode = "recursive"` for full DNSSEC-validated
|
|
||||||
resolution from root nameservers.
|
|
||||||
|
|
||||||
Single Rust binary. `sudo numa install` and it's your system DNS — caching,
|
|
||||||
ad blocking, .numa domains. No nginx, no Caddy, no /etc/hosts.
|
|
||||||
|
|
||||||
```
|
|
||||||
brew install razvandimescu/tap/numa
|
|
||||||
# or
|
|
||||||
cargo install numa
|
|
||||||
```
|
|
||||||
|
|
||||||
https://github.com/razvandimescu/numa
|
|
||||||
|
|
||||||
**Lessons from r/node (2026-03-24):** "Can't remember 3 ports?" got
|
|
||||||
pushback — the CORS/cookie angle resonated more. Lead with what you
|
|
||||||
can't do without it, not what's annoying.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### r/commandline
|
|
||||||
|
|
||||||
**Title:** numa — local dev DNS with auto HTTPS and LAN service discovery, single Rust binary
|
|
||||||
|
|
||||||
**Body:**
|
|
||||||
|
|
||||||
I run 5-6 local services and wanted named domains with HTTPS instead of
|
|
||||||
remembering port numbers. Built a DNS resolver that handles `.numa`
|
|
||||||
domains:
|
|
||||||
|
|
||||||
```
|
|
||||||
curl -X POST localhost:5380/services \
|
|
||||||
-d '{"name":"api","target_port":8000}'
|
|
||||||
```
|
|
||||||
|
|
||||||
Now `https://api.numa` resolves, proxies to localhost:8000, and has a
|
|
||||||
valid TLS cert. WebSocket passthrough works — Vite HMR goes through
|
|
||||||
the proxy fine.
|
|
||||||
|
|
||||||
The part I didn't expect to be useful: LAN service discovery. Two
|
|
||||||
machines running numa find each other via UDP multicast. I register
|
|
||||||
`api.numa` on my laptop, my teammate's numa instance picks it up
|
|
||||||
automatically. Zero config.
|
|
||||||
|
|
||||||
Also blocks ~385K+ ad domains since it's already your DNS resolver.
|
|
||||||
Portable — works on any network (coffee shops, airports). Set
|
|
||||||
`mode = "recursive"` for full DNSSEC-validated resolution from root
|
|
||||||
nameservers — no upstream dependency.
|
|
||||||
|
|
||||||
```
|
|
||||||
brew install razvandimescu/tap/numa
|
|
||||||
sudo numa
|
|
||||||
```
|
|
||||||
|
|
||||||
Single binary, DNS wire protocol parsed from scratch (no DNS libraries).
|
|
||||||
|
|
||||||
https://github.com/razvandimescu/numa
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### r/selfhosted (only if Show HN hits front page)
|
|
||||||
|
|
||||||
**Title:** Numa — recursive resolver + ad blocking + LAN service discovery in one binary
|
|
||||||
|
|
||||||
**Body:**
|
|
||||||
|
|
||||||
I built a DNS resolver in Rust that I've been running as my system DNS.
|
|
||||||
Two features I'm most proud of:
|
|
||||||
|
|
||||||
**Recursive resolution + DNSSEC** — set `mode = "recursive"` and it resolves
|
|
||||||
from root nameservers, no upstream dependency. Chain-of-trust verification
|
|
||||||
(RSA, ECDSA, Ed25519), NSEC/NSEC3 denial proofs. No single entity sees your
|
|
||||||
full query pattern — each authoritative server only sees its zone's queries.
|
|
||||||
|
|
||||||
**LAN service discovery** — I register `api.numa → localhost:8000` on my
|
|
||||||
laptop. My colleague's machine, also running Numa, picks it up via UDP
|
|
||||||
multicast — `api.numa` resolves to my IP on his machine. Zero config.
|
|
||||||
|
|
||||||
The rest of what it does:
|
|
||||||
- **Ad blocking** — ~385K+ domains (Hagezi Pro), portable. Works on any
|
|
||||||
network including coffee shops and airports.
|
|
||||||
- **DNS-over-HTTPS** — encrypted upstream as an alternative to recursive mode.
|
|
||||||
- **Auto HTTPS for local services** — generates a local CA + per-service
|
|
||||||
TLS certs. `https://frontend.numa` with a green lock, WebSocket passthrough.
|
|
||||||
- **Hub mode** — point other devices' DNS to it, they get ad blocking +
|
|
||||||
`.numa` resolution without installing anything.
|
|
||||||
|
|
||||||
Replaces Pi-hole + Unbound in one binary. No Raspberry Pi, no Docker, no PHP.
|
|
||||||
|
|
||||||
Single binary, macOS + Linux. Config is one optional TOML file.
|
|
||||||
|
|
||||||
**What it doesn't do (yet):** No web-based config editor (TOML + REST API).
|
|
||||||
DoT listener is in progress.
|
|
||||||
|
|
||||||
`brew install razvandimescu/tap/numa` or `cargo install numa`
|
|
||||||
|
|
||||||
https://github.com/razvandimescu/numa
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Preparation Checklist
|
|
||||||
|
|
||||||
- [ ] Verify GitHub repo is PUBLIC before any post
|
|
||||||
- [ ] Build some comment history on posting account first
|
|
||||||
- [ ] Post HN Tuesday-Thursday, 9-10 AM Eastern
|
|
||||||
- [ ] Respond to every comment within 2 hours for the first 6 hours
|
|
||||||
- [ ] Have fixes ready to ship within 24h for reported issues
|
|
||||||
- [ ] Don't oversell the pkarr/token vision — one sentence max
|
|
||||||
|
|
||||||
## Rules
|
|
||||||
|
|
||||||
- Verify GitHub repo is PUBLIC before every post
|
|
||||||
- Use an account with comment history, not a fresh one
|
|
||||||
- Respond to every comment within 2 hours
|
|
||||||
- Never be defensive — acknowledge valid criticism, redirect
|
|
||||||
- If someone says "just use X" — agree it works, explain what's *uniquely different*
|
|
||||||
- Lead with unique capabilities, not tool replacement
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Prepared Responses
|
|
||||||
|
|
||||||
**"What does this offer over /etc/hosts?"** *(actual r/programare objection)*
|
|
||||||
/etc/hosts is static and per-machine. Numa gives you: auto-revert after N
|
|
||||||
minutes (great for testing), a REST API so scripts can create/remove entries,
|
|
||||||
HTTPS reverse proxy with auto TLS, and LAN discovery so you don't have to
|
|
||||||
edit hosts on every device. Different tools for different problems.
|
|
||||||
|
|
||||||
**"Mature solutions already exist (dnsmasq, nginx, etc.)"** *(actual r/programare objection)*
|
|
||||||
Absolutely — and they're great. The thing they don't do: register a service
|
|
||||||
on machine A and have it automatically appear on machine B via multicast.
|
|
||||||
Numa integrates DNS + reverse proxy + TLS + discovery into one binary so
|
|
||||||
those pieces work together. If you only need DNS forwarding, dnsmasq is the
|
|
||||||
right tool.
|
|
||||||
|
|
||||||
**"Why not Pi-hole / AdGuard Home?"**
|
|
||||||
They're network appliances — need dedicated hardware or Docker. Numa is a
|
|
||||||
single binary on your laptop. When you move to a coffee shop, your ad
|
|
||||||
blocking comes with you. Plus the reverse proxy + LAN discovery.
|
|
||||||
|
|
||||||
**"Why from scratch / no DNS libraries?"**
|
|
||||||
Started as a learning project to understand the wire protocol. Turned out
|
|
||||||
having full control over the pipeline makes features like conditional
|
|
||||||
forwarding and override injection trivial — they're just steps in the
|
|
||||||
resolution chain.
|
|
||||||
|
|
||||||
**"Vibe coded / AI generated?"**
|
|
||||||
I use AI as a coding partner — same as using Stack Overflow or pair
|
|
||||||
programming. I make the architecture decisions, direct what gets built,
|
|
||||||
and review everything. The DNS wire protocol parser was the original
|
|
||||||
learning project I wrote by hand. Later features were built collaboratively
|
|
||||||
with AI assistance. You can read every line — nothing is opaque generated
|
|
||||||
slop.
|
|
||||||
|
|
||||||
**"Why sudo / why port 53?"**
|
|
||||||
Port 53 requires root on Unix. Numa only needs it for the UDP socket.
|
|
||||||
You can also bind to a high port for testing: `bind_addr = "127.0.0.1:5353"`.
|
|
||||||
|
|
||||||
**"What about .numa TLD conflicts?"**
|
|
||||||
The TLD is configurable in `numa.toml`. If `.numa` ever becomes official,
|
|
||||||
change it to anything else.
|
|
||||||
|
|
||||||
**"Does it support DoH/DoT?"**
|
|
||||||
DoH is built in — set `address = "https://9.9.9.9/dns-query"` in
|
|
||||||
`[upstream]` and your queries are encrypted. Or set `mode = "auto"` to
|
|
||||||
probe root servers and fall back to DoH if blocked. DoT listener support
|
|
||||||
is in progress (PR #25).
|
|
||||||
|
|
||||||
**"But Quad9/Cloudflare still sees my queries"**
|
|
||||||
In forward mode (the default), yes — your upstream resolver sees your queries.
|
|
||||||
Set `mode = "recursive"` and Numa resolves directly from root nameservers —
|
|
||||||
no single upstream sees your full query pattern. Each authoritative server
|
|
||||||
only sees the query relevant to its zone. Add `[dnssec] enabled = true` to
|
|
||||||
cryptographically verify responses.
|
|
||||||
|
|
||||||
**"Show me benchmarks / performance numbers"** *(actual r/programare request)*
|
|
||||||
Benchmark suite is in `benches/` (criterion). Cached round-trip: 691ns.
|
|
||||||
Pipeline throughput: ~2.0M qps. DNSSEC: ECDSA P-256 verify 174ns, RSA/SHA-256
|
|
||||||
10.9µs, DS verify 257ns. Cold-cache DNSSEC validation ~90ms (1 network fetch,
|
|
||||||
TLD chain pre-warmed). Full comparison against system resolver, Quad9,
|
|
||||||
Cloudflare, Google on the site.
|
|
||||||
|
|
||||||
**"Why not just use Unbound?"**
|
|
||||||
Numa supports recursive resolution with DNSSEC validation, same as Unbound
|
|
||||||
(`mode = "recursive"`). The difference:
|
|
||||||
Numa also has built-in ad blocking, a dashboard, `.numa` local domains with
|
|
||||||
auto HTTPS, LAN service discovery, and developer overrides. Unbound does
|
|
||||||
one thing well; Numa integrates six features into one binary.
|
|
||||||
|
|
||||||
**"Why not Technitium?"**
|
|
||||||
Technitium is the closest in features — recursive, DNSSEC, ad blocking,
|
|
||||||
dashboard. Good tool. Two differences: (1) Numa is a single static binary,
|
|
||||||
Technitium requires the .NET runtime; (2) Numa has developer tooling that
|
|
||||||
Technitium doesn't — `.numa` local domains with auto TLS reverse proxy,
|
|
||||||
path-based routing, LAN service discovery, ephemeral overrides with
|
|
||||||
auto-revert. Different audiences: Technitium targets server admins, Numa
|
|
||||||
targets developers on laptops.
|
|
||||||
|
|
||||||
**"Does it support Windows?"**
|
|
||||||
macOS and Linux are the primary targets. Windows has scaffolding in the code
|
|
||||||
but is not tested. If there's demand, it's on the list.
|
|
||||||
@@ -5,7 +5,7 @@ Wants=network-online.target
|
|||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=simple
|
Type=simple
|
||||||
ExecStart=/usr/local/bin/numa
|
ExecStart={{exe_path}}
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=2
|
RestartSec=2
|
||||||
StandardOutput=journal
|
StandardOutput=journal
|
||||||
|
|||||||
14
numa.toml
14
numa.toml
@@ -2,6 +2,12 @@
|
|||||||
bind_addr = "0.0.0.0:53"
|
bind_addr = "0.0.0.0:53"
|
||||||
api_port = 5380
|
api_port = 5380
|
||||||
# api_bind_addr = "127.0.0.1" # default; set to "0.0.0.0" for LAN dashboard access
|
# api_bind_addr = "127.0.0.1" # default; set to "0.0.0.0" for LAN dashboard access
|
||||||
|
# data_dir = "/var/lib/numa" # where numa stores TLS CA and cert material
|
||||||
|
# Defaults: /var/lib/numa on linux (FHS),
|
||||||
|
# /usr/local/var/numa on macos (homebrew prefix),
|
||||||
|
# %PROGRAMDATA%\numa on windows. Override for
|
||||||
|
# containerized deploys or tests that can't
|
||||||
|
# write to the system path.
|
||||||
|
|
||||||
# [upstream]
|
# [upstream]
|
||||||
# mode = "forward" # "forward" (default) — relay to upstream
|
# mode = "forward" # "forward" (default) — relay to upstream
|
||||||
@@ -83,6 +89,14 @@ tld = "numa"
|
|||||||
# enabled = false # opt-in: verify chain of trust from root KSK
|
# enabled = false # opt-in: verify chain of trust from root KSK
|
||||||
# strict = false # true = SERVFAIL on bogus signatures
|
# strict = false # true = SERVFAIL on bogus signatures
|
||||||
|
|
||||||
|
# DNS-over-TLS listener (RFC 7858) — encrypted DNS on port 853
|
||||||
|
# [dot]
|
||||||
|
# enabled = false # opt-in: accept DoT queries
|
||||||
|
# port = 853 # standard DoT port
|
||||||
|
# bind_addr = "0.0.0.0" # IPv4 or IPv6; unspecified binds all interfaces
|
||||||
|
# cert_path = "/etc/numa/dot.crt" # PEM cert; omit to use self-signed (proxy CA if available)
|
||||||
|
# key_path = "/etc/numa/dot.key" # PEM private key; must be set together with cert_path
|
||||||
|
|
||||||
# LAN service discovery via mDNS (disabled by default — no network traffic unless enabled)
|
# LAN service discovery via mDNS (disabled by default — no network traffic unless enabled)
|
||||||
# [lan]
|
# [lan]
|
||||||
# enabled = true # discover other Numa instances via mDNS (_numa._tcp.local)
|
# enabled = true # discover other Numa instances via mDNS (_numa._tcp.local)
|
||||||
|
|||||||
57
scripts/update-homebrew-formula.py
Executable file
57
scripts/update-homebrew-formula.py
Executable file
@@ -0,0 +1,57 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""Rewrite a Homebrew formula in place: bump version, URL paths, and sha256 lines.
|
||||||
|
|
||||||
|
Reads the formula path from argv[1], and the following env vars:
|
||||||
|
VERSION e.g. "0.10.0" (no leading v)
|
||||||
|
SHA_MACOS_AARCH64
|
||||||
|
SHA_MACOS_X86_64
|
||||||
|
SHA_LINUX_AARCH64
|
||||||
|
SHA_LINUX_X86_64
|
||||||
|
|
||||||
|
Assumptions about the formula:
|
||||||
|
- Has `version "X.Y.Z"` somewhere
|
||||||
|
- Has `url "...releases/download/vX.Y.Z/numa-<target>.tar.gz"` lines
|
||||||
|
- May or may not already have `sha256 "..."` lines immediately after each url
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
|
||||||
|
formula_path = sys.argv[1]
|
||||||
|
version = os.environ["VERSION"].lstrip("v")
|
||||||
|
shas = {
|
||||||
|
"macos-aarch64": os.environ["SHA_MACOS_AARCH64"],
|
||||||
|
"macos-x86_64": os.environ["SHA_MACOS_X86_64"],
|
||||||
|
"linux-aarch64": os.environ["SHA_LINUX_AARCH64"],
|
||||||
|
"linux-x86_64": os.environ["SHA_LINUX_X86_64"],
|
||||||
|
}
|
||||||
|
|
||||||
|
with open(formula_path) as f:
|
||||||
|
content = f.read()
|
||||||
|
|
||||||
|
content = re.sub(r'version "[^"]*"', f'version "{version}"', content)
|
||||||
|
content = re.sub(
|
||||||
|
r"releases/download/v[\d.]+/numa-",
|
||||||
|
f"releases/download/v{version}/numa-",
|
||||||
|
content,
|
||||||
|
)
|
||||||
|
content = re.sub(r'\n[ \t]*sha256 "[^"]*"', "", content)
|
||||||
|
|
||||||
|
|
||||||
|
def add_sha(match: re.Match) -> str:
|
||||||
|
indent = match.group(1)
|
||||||
|
target = match.group(2)
|
||||||
|
if target not in shas:
|
||||||
|
return match.group(0)
|
||||||
|
return f'{match.group(0)}\n{indent}sha256 "{shas[target]}"'
|
||||||
|
|
||||||
|
|
||||||
|
content = re.sub(
|
||||||
|
r'^([ \t]+)url "[^"]*numa-([\w-]+)\.tar\.gz"',
|
||||||
|
add_sha,
|
||||||
|
content,
|
||||||
|
flags=re.MULTILINE,
|
||||||
|
)
|
||||||
|
|
||||||
|
with open(formula_path, "w") as f:
|
||||||
|
f.write(content)
|
||||||
@@ -906,7 +906,7 @@ async fn remove_route(
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn serve_ca(State(ctx): State<Arc<ServerCtx>>) -> Result<impl IntoResponse, StatusCode> {
|
async fn serve_ca(State(ctx): State<Arc<ServerCtx>>) -> Result<impl IntoResponse, StatusCode> {
|
||||||
let ca_path = ctx.data_dir.join("ca.pem");
|
let ca_path = ctx.data_dir.join(crate::tls::CA_FILE_NAME);
|
||||||
let bytes = tokio::task::spawn_blocking(move || std::fs::read(ca_path))
|
let bytes = tokio::task::spawn_blocking(move || std::fs::read(ca_path))
|
||||||
.await
|
.await
|
||||||
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?
|
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::net::Ipv4Addr;
|
use std::net::Ipv4Addr;
|
||||||
use std::net::Ipv6Addr;
|
use std::net::Ipv6Addr;
|
||||||
use std::path::Path;
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
|
|
||||||
@@ -29,6 +29,8 @@ pub struct Config {
|
|||||||
pub lan: LanConfig,
|
pub lan: LanConfig,
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub dnssec: DnssecConfig,
|
pub dnssec: DnssecConfig,
|
||||||
|
#[serde(default)]
|
||||||
|
pub dot: DotConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
@@ -39,6 +41,10 @@ pub struct ServerConfig {
|
|||||||
pub api_port: u16,
|
pub api_port: u16,
|
||||||
#[serde(default = "default_api_bind_addr")]
|
#[serde(default = "default_api_bind_addr")]
|
||||||
pub api_bind_addr: String,
|
pub api_bind_addr: String,
|
||||||
|
/// Where numa writes TLS material (CA, leaf certs, regenerated state).
|
||||||
|
/// Defaults to `crate::data_dir()` (platform-specific system path) if unset.
|
||||||
|
#[serde(default)]
|
||||||
|
pub data_dir: Option<PathBuf>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for ServerConfig {
|
impl Default for ServerConfig {
|
||||||
@@ -47,6 +53,7 @@ impl Default for ServerConfig {
|
|||||||
bind_addr: default_bind_addr(),
|
bind_addr: default_bind_addr(),
|
||||||
api_port: default_api_port(),
|
api_port: default_api_port(),
|
||||||
api_bind_addr: default_api_bind_addr(),
|
api_bind_addr: default_api_bind_addr(),
|
||||||
|
data_dir: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -370,6 +377,41 @@ pub struct DnssecConfig {
|
|||||||
pub strict: bool,
|
pub strict: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Clone)]
|
||||||
|
pub struct DotConfig {
|
||||||
|
#[serde(default)]
|
||||||
|
pub enabled: bool,
|
||||||
|
#[serde(default = "default_dot_port")]
|
||||||
|
pub port: u16,
|
||||||
|
#[serde(default = "default_dot_bind_addr")]
|
||||||
|
pub bind_addr: String,
|
||||||
|
/// Path to TLS certificate (PEM). If None, uses self-signed CA.
|
||||||
|
#[serde(default)]
|
||||||
|
pub cert_path: Option<PathBuf>,
|
||||||
|
/// Path to TLS private key (PEM). If None, uses self-signed CA.
|
||||||
|
#[serde(default)]
|
||||||
|
pub key_path: Option<PathBuf>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for DotConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
DotConfig {
|
||||||
|
enabled: false,
|
||||||
|
port: default_dot_port(),
|
||||||
|
bind_addr: default_dot_bind_addr(),
|
||||||
|
cert_path: None,
|
||||||
|
key_path: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_dot_port() -> u16 {
|
||||||
|
853
|
||||||
|
}
|
||||||
|
fn default_dot_bind_addr() -> String {
|
||||||
|
"0.0.0.0".to_string()
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|||||||
80
src/ctx.rs
80
src/ctx.rs
@@ -62,24 +62,21 @@ pub struct ServerCtx {
|
|||||||
pub dnssec_strict: bool,
|
pub dnssec_strict: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_query(
|
/// Transport-agnostic DNS resolution. Runs the full pipeline (overrides, blocklist,
|
||||||
mut buffer: BytePacketBuffer,
|
/// cache, upstream, DNSSEC) and returns the serialized response in a buffer.
|
||||||
|
/// Callers use `.filled()` to get the response bytes without heap allocation.
|
||||||
|
/// Callers are responsible for parsing the incoming buffer into a `DnsPacket`
|
||||||
|
/// (and logging parse errors) before calling this function.
|
||||||
|
pub async fn resolve_query(
|
||||||
|
query: DnsPacket,
|
||||||
src_addr: SocketAddr,
|
src_addr: SocketAddr,
|
||||||
ctx: &ServerCtx,
|
ctx: &ServerCtx,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<BytePacketBuffer> {
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
|
|
||||||
let query = match DnsPacket::from_buffer(&mut buffer) {
|
|
||||||
Ok(packet) => packet,
|
|
||||||
Err(e) => {
|
|
||||||
warn!("{} | PARSE ERROR | {}", src_addr, e);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let (qname, qtype) = match query.questions.first() {
|
let (qname, qtype) = match query.questions.first() {
|
||||||
Some(q) => (q.name.clone(), q.qtype),
|
Some(q) => (q.name.clone(), q.qtype),
|
||||||
None => return Ok(()),
|
None => return Err("empty question section".into()),
|
||||||
};
|
};
|
||||||
|
|
||||||
// Pipeline: overrides -> .tld interception -> blocklist -> local zones -> cache -> upstream
|
// Pipeline: overrides -> .tld interception -> blocklist -> local zones -> cache -> upstream
|
||||||
@@ -162,6 +159,29 @@ pub async fn handle_query(
|
|||||||
resp.header.authed_data = true;
|
resp.header.authed_data = true;
|
||||||
}
|
}
|
||||||
(resp, QueryPath::Cached, cached_dnssec)
|
(resp, QueryPath::Cached, cached_dnssec)
|
||||||
|
} else if let Some(fwd_addr) =
|
||||||
|
crate::system_dns::match_forwarding_rule(&qname, &ctx.forwarding_rules)
|
||||||
|
{
|
||||||
|
// Conditional forwarding takes priority over recursive mode
|
||||||
|
// (e.g. Tailscale .ts.net, VPC private zones)
|
||||||
|
let upstream = Upstream::Udp(fwd_addr);
|
||||||
|
match forward_query(&query, &upstream, ctx.timeout).await {
|
||||||
|
Ok(resp) => {
|
||||||
|
ctx.cache.write().unwrap().insert(&qname, qtype, &resp);
|
||||||
|
(resp, QueryPath::Forwarded, DnssecStatus::Indeterminate)
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
error!(
|
||||||
|
"{} | {:?} {} | FORWARD ERROR | {}",
|
||||||
|
src_addr, qtype, qname, e
|
||||||
|
);
|
||||||
|
(
|
||||||
|
DnsPacket::response_from(&query, ResultCode::SERVFAIL),
|
||||||
|
QueryPath::UpstreamError,
|
||||||
|
DnssecStatus::Indeterminate,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
} else if ctx.upstream_mode == UpstreamMode::Recursive {
|
} else if ctx.upstream_mode == UpstreamMode::Recursive {
|
||||||
let key = (qname.clone(), qtype);
|
let key = (qname.clone(), qtype);
|
||||||
let (resp, path, err) = resolve_coalesced(&ctx.inflight, key, &query, || {
|
let (resp, path, err) = resolve_coalesced(&ctx.inflight, key, &query, || {
|
||||||
@@ -283,17 +303,17 @@ pub async fn handle_query(
|
|||||||
response.resources.len(),
|
response.resources.len(),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Serialize response
|
||||||
|
// TODO: TC bit is UDP-specific; DoT connections could carry up to 65535 bytes.
|
||||||
|
// Once BytePacketBuffer supports larger buffers, skip truncation for TCP/TLS.
|
||||||
let mut resp_buffer = BytePacketBuffer::new();
|
let mut resp_buffer = BytePacketBuffer::new();
|
||||||
if response.write(&mut resp_buffer).is_err() {
|
if response.write(&mut resp_buffer).is_err() {
|
||||||
// Response too large for UDP — set TC bit and send header + question only
|
// Response too large — set TC bit and send header + question only
|
||||||
debug!("response too large, setting TC bit for {}", qname);
|
debug!("response too large, setting TC bit for {}", qname);
|
||||||
let mut tc_response = DnsPacket::response_from(&query, response.header.rescode);
|
let mut tc_response = DnsPacket::response_from(&query, response.header.rescode);
|
||||||
tc_response.header.truncated_message = true;
|
tc_response.header.truncated_message = true;
|
||||||
let mut tc_buffer = BytePacketBuffer::new();
|
resp_buffer = BytePacketBuffer::new();
|
||||||
tc_response.write(&mut tc_buffer)?;
|
tc_response.write(&mut resp_buffer)?;
|
||||||
ctx.socket.send_to(tc_buffer.filled(), src_addr).await?;
|
|
||||||
} else {
|
|
||||||
ctx.socket.send_to(resp_buffer.filled(), src_addr).await?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Record stats and query log
|
// Record stats and query log
|
||||||
@@ -316,6 +336,30 @@ pub async fn handle_query(
|
|||||||
dnssec,
|
dnssec,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
Ok(resp_buffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Handle a DNS query received over UDP. Thin wrapper around resolve_query.
|
||||||
|
pub async fn handle_query(
|
||||||
|
mut buffer: BytePacketBuffer,
|
||||||
|
src_addr: SocketAddr,
|
||||||
|
ctx: &ServerCtx,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
let query = match DnsPacket::from_buffer(&mut buffer) {
|
||||||
|
Ok(packet) => packet,
|
||||||
|
Err(e) => {
|
||||||
|
warn!("{} | PARSE ERROR | {}", src_addr, e);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
match resolve_query(query, src_addr, ctx).await {
|
||||||
|
Ok(resp_buffer) => {
|
||||||
|
ctx.socket.send_to(resp_buffer.filled(), src_addr).await?;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!("{} | RESOLVE ERROR | {}", src_addr, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
542
src/dot.rs
Normal file
542
src/dot.rs
Normal file
@@ -0,0 +1,542 @@
|
|||||||
|
use std::net::{IpAddr, SocketAddr};
|
||||||
|
use std::path::Path;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use log::{debug, error, info, warn};
|
||||||
|
use rustls::ServerConfig;
|
||||||
|
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||||
|
use tokio::net::TcpListener;
|
||||||
|
use tokio::sync::Semaphore;
|
||||||
|
use tokio_rustls::TlsAcceptor;
|
||||||
|
|
||||||
|
use crate::buffer::BytePacketBuffer;
|
||||||
|
use crate::config::DotConfig;
|
||||||
|
use crate::ctx::{resolve_query, ServerCtx};
|
||||||
|
use crate::header::ResultCode;
|
||||||
|
use crate::packet::DnsPacket;
|
||||||
|
|
||||||
|
const MAX_CONNECTIONS: usize = 512;
|
||||||
|
const IDLE_TIMEOUT: Duration = Duration::from_secs(30);
|
||||||
|
const HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(10);
|
||||||
|
const WRITE_TIMEOUT: Duration = Duration::from_secs(10);
|
||||||
|
// Matches BytePacketBuffer::BUF_SIZE — RFC 7858 allows up to 65535 but our
|
||||||
|
// buffer would silently truncate anything larger.
|
||||||
|
const MAX_MSG_LEN: usize = 4096;
|
||||||
|
|
||||||
|
fn dot_alpn() -> Vec<Vec<u8>> {
|
||||||
|
vec![b"dot".to_vec()]
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build a TLS ServerConfig for DoT from user-provided cert/key PEM files.
|
||||||
|
fn load_tls_config(cert_path: &Path, key_path: &Path) -> crate::Result<Arc<ServerConfig>> {
|
||||||
|
// rustls needs a CryptoProvider installed before ServerConfig::builder().
|
||||||
|
// The proxy's build_tls_config also does this; we repeat it here because
|
||||||
|
// running DoT with user-provided certs while the proxy is disabled would
|
||||||
|
// otherwise panic on first handshake (no default provider).
|
||||||
|
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||||
|
|
||||||
|
let cert_pem = std::fs::read(cert_path)?;
|
||||||
|
let key_pem = std::fs::read(key_path)?;
|
||||||
|
|
||||||
|
let certs: Vec<_> = rustls_pemfile::certs(&mut &cert_pem[..]).collect::<Result<_, _>>()?;
|
||||||
|
let key = rustls_pemfile::private_key(&mut &key_pem[..])?
|
||||||
|
.ok_or("no private key found in key file")?;
|
||||||
|
|
||||||
|
let mut config = ServerConfig::builder()
|
||||||
|
.with_no_client_auth()
|
||||||
|
.with_single_cert(certs, key)?;
|
||||||
|
config.alpn_protocols = dot_alpn();
|
||||||
|
|
||||||
|
Ok(Arc::new(config))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build a self-signed DoT TLS config. Can't reuse `ctx.tls_config` (the
|
||||||
|
/// proxy's shared config) because DoT needs its own ALPN advertisement.
|
||||||
|
///
|
||||||
|
/// Pass `proxy_tld` itself as a service name so the cert gets an explicit
|
||||||
|
/// `{tld}.{tld}` SAN (e.g. "numa.numa") matching the ServerName that
|
||||||
|
/// setup-phone's mobileconfig sends as SNI. The `*.{tld}` wildcard alone
|
||||||
|
/// is rejected by strict TLS clients under single-label TLDs (per the
|
||||||
|
/// note in tls.rs::generate_service_cert).
|
||||||
|
fn self_signed_tls(ctx: &ServerCtx) -> Option<Arc<ServerConfig>> {
|
||||||
|
let service_names = [ctx.proxy_tld.clone()];
|
||||||
|
match crate::tls::build_tls_config(&ctx.proxy_tld, &service_names, dot_alpn(), &ctx.data_dir) {
|
||||||
|
Ok(cfg) => Some(cfg),
|
||||||
|
Err(e) => {
|
||||||
|
warn!(
|
||||||
|
"DoT: failed to generate self-signed TLS: {} — DoT disabled",
|
||||||
|
e
|
||||||
|
);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Start the DNS-over-TLS listener (RFC 7858).
|
||||||
|
pub async fn start_dot(ctx: Arc<ServerCtx>, config: &DotConfig) {
|
||||||
|
let tls_config = match (&config.cert_path, &config.key_path) {
|
||||||
|
(Some(cert), Some(key)) => match load_tls_config(cert, key) {
|
||||||
|
Ok(cfg) => cfg,
|
||||||
|
Err(e) => {
|
||||||
|
warn!("DoT: failed to load TLS cert/key: {} — DoT disabled", e);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
_ => match self_signed_tls(&ctx) {
|
||||||
|
Some(cfg) => cfg,
|
||||||
|
None => return,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
let bind_addr: IpAddr = config
|
||||||
|
.bind_addr
|
||||||
|
.parse()
|
||||||
|
.unwrap_or(IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED));
|
||||||
|
let addr = SocketAddr::new(bind_addr, config.port);
|
||||||
|
let listener = match TcpListener::bind(addr).await {
|
||||||
|
Ok(l) => l,
|
||||||
|
Err(e) => {
|
||||||
|
warn!("DoT: could not bind {} ({}) — DoT disabled", addr, e);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
info!("DoT listening on {}", addr);
|
||||||
|
|
||||||
|
accept_loop(listener, TlsAcceptor::from(tls_config), ctx).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn accept_loop(listener: TcpListener, acceptor: TlsAcceptor, ctx: Arc<ServerCtx>) {
|
||||||
|
let semaphore = Arc::new(Semaphore::new(MAX_CONNECTIONS));
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let (tcp_stream, remote_addr) = match listener.accept().await {
|
||||||
|
Ok(conn) => conn,
|
||||||
|
Err(e) => {
|
||||||
|
error!("DoT: TCP accept error: {}", e);
|
||||||
|
// Back off to avoid tight-looping on persistent failures (e.g. fd exhaustion).
|
||||||
|
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let permit = match semaphore.clone().try_acquire_owned() {
|
||||||
|
Ok(p) => p,
|
||||||
|
Err(_) => {
|
||||||
|
debug!("DoT: connection limit reached, rejecting {}", remote_addr);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let acceptor = acceptor.clone();
|
||||||
|
let ctx = Arc::clone(&ctx);
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let _permit = permit; // held until task exits
|
||||||
|
|
||||||
|
let tls_stream =
|
||||||
|
match tokio::time::timeout(HANDSHAKE_TIMEOUT, acceptor.accept(tcp_stream)).await {
|
||||||
|
Ok(Ok(s)) => s,
|
||||||
|
Ok(Err(e)) => {
|
||||||
|
debug!("DoT: TLS handshake failed from {}: {}", remote_addr, e);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
debug!("DoT: TLS handshake timeout from {}", remote_addr);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
handle_dot_connection(tls_stream, remote_addr, &ctx).await;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Handle a single persistent DoT connection (RFC 7858).
|
||||||
|
/// Reads length-prefixed DNS queries until EOF, idle timeout, or error.
|
||||||
|
async fn handle_dot_connection<S>(mut stream: S, remote_addr: SocketAddr, ctx: &ServerCtx)
|
||||||
|
where
|
||||||
|
S: AsyncReadExt + AsyncWriteExt + Unpin,
|
||||||
|
{
|
||||||
|
loop {
|
||||||
|
// Read 2-byte length prefix (RFC 1035 §4.2.2) with idle timeout
|
||||||
|
let mut len_buf = [0u8; 2];
|
||||||
|
let Ok(Ok(_)) = tokio::time::timeout(IDLE_TIMEOUT, stream.read_exact(&mut len_buf)).await
|
||||||
|
else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
let msg_len = u16::from_be_bytes(len_buf) as usize;
|
||||||
|
if msg_len > MAX_MSG_LEN {
|
||||||
|
debug!("DoT: oversized message {} from {}", msg_len, remote_addr);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut buffer = BytePacketBuffer::new();
|
||||||
|
let Ok(Ok(_)) =
|
||||||
|
tokio::time::timeout(IDLE_TIMEOUT, stream.read_exact(&mut buffer.buf[..msg_len])).await
|
||||||
|
else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Parse query up-front so we can echo its question section in SERVFAIL
|
||||||
|
// responses when resolve_query fails.
|
||||||
|
let query = match DnsPacket::from_buffer(&mut buffer) {
|
||||||
|
Ok(q) => q,
|
||||||
|
Err(e) => {
|
||||||
|
warn!("{} | PARSE ERROR | {}", remote_addr, e);
|
||||||
|
// BytePacketBuffer is zero-initialized, so buf[0..2] reads as 0x0000
|
||||||
|
// for sub-2-byte messages — harmless FORMERR with id=0.
|
||||||
|
let query_id = u16::from_be_bytes([buffer.buf[0], buffer.buf[1]]);
|
||||||
|
let mut resp = DnsPacket::new();
|
||||||
|
resp.header.id = query_id;
|
||||||
|
resp.header.response = true;
|
||||||
|
resp.header.rescode = ResultCode::FORMERR;
|
||||||
|
if send_response(&mut stream, &resp, remote_addr)
|
||||||
|
.await
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
match resolve_query(query.clone(), remote_addr, ctx).await {
|
||||||
|
Ok(resp_buffer) => {
|
||||||
|
if write_framed(&mut stream, resp_buffer.filled())
|
||||||
|
.await
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!("{} | RESOLVE ERROR | {}", remote_addr, e);
|
||||||
|
// SERVFAIL that echoes the original question section.
|
||||||
|
let resp = DnsPacket::response_from(&query, ResultCode::SERVFAIL);
|
||||||
|
if send_response(&mut stream, &resp, remote_addr)
|
||||||
|
.await
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Serialize a DNS response and send it framed. Logs serialization failures
|
||||||
|
/// and returns Err so the caller can tear down the connection.
|
||||||
|
async fn send_response<S>(
|
||||||
|
stream: &mut S,
|
||||||
|
resp: &DnsPacket,
|
||||||
|
remote_addr: SocketAddr,
|
||||||
|
) -> std::io::Result<()>
|
||||||
|
where
|
||||||
|
S: AsyncWriteExt + Unpin,
|
||||||
|
{
|
||||||
|
let mut out_buf = BytePacketBuffer::new();
|
||||||
|
if resp.write(&mut out_buf).is_err() {
|
||||||
|
debug!(
|
||||||
|
"DoT: failed to serialize {:?} response for {}",
|
||||||
|
resp.header.rescode, remote_addr
|
||||||
|
);
|
||||||
|
return Err(std::io::Error::other("serialize failed"));
|
||||||
|
}
|
||||||
|
write_framed(stream, out_buf.filled()).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Write a DNS message with its 2-byte length prefix, coalesced into one syscall.
|
||||||
|
/// Bounded by WRITE_TIMEOUT so a stalled reader can't indefinitely hold a worker.
|
||||||
|
async fn write_framed<S>(stream: &mut S, msg: &[u8]) -> std::io::Result<()>
|
||||||
|
where
|
||||||
|
S: AsyncWriteExt + Unpin,
|
||||||
|
{
|
||||||
|
let mut out = Vec::with_capacity(2 + msg.len());
|
||||||
|
out.extend_from_slice(&(msg.len() as u16).to_be_bytes());
|
||||||
|
out.extend_from_slice(msg);
|
||||||
|
match tokio::time::timeout(WRITE_TIMEOUT, async {
|
||||||
|
stream.write_all(&out).await?;
|
||||||
|
stream.flush().await
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(result) => result,
|
||||||
|
Err(_) => Err(std::io::Error::other("write timeout")),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::sync::{Mutex, RwLock};
|
||||||
|
|
||||||
|
use rcgen::{CertificateParams, DnType, KeyPair};
|
||||||
|
use rustls::pki_types::{CertificateDer, PrivateKeyDer, PrivatePkcs8KeyDer, ServerName};
|
||||||
|
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||||
|
|
||||||
|
use crate::buffer::BytePacketBuffer;
|
||||||
|
use crate::header::ResultCode;
|
||||||
|
use crate::packet::DnsPacket;
|
||||||
|
use crate::question::QueryType;
|
||||||
|
use crate::record::DnsRecord;
|
||||||
|
|
||||||
|
/// Generate a self-signed DoT server config and return its leaf cert DER
|
||||||
|
/// so callers can build matching client configs with arbitrary ALPN.
|
||||||
|
fn test_tls_configs() -> (Arc<ServerConfig>, CertificateDer<'static>) {
|
||||||
|
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||||
|
|
||||||
|
// Mirror production self_signed_tls SAN shape: *.numa wildcard plus
|
||||||
|
// explicit numa.numa apex (the ServerName setup-phone uses as SNI).
|
||||||
|
let key_pair = KeyPair::generate().unwrap();
|
||||||
|
let mut params = CertificateParams::default();
|
||||||
|
params
|
||||||
|
.distinguished_name
|
||||||
|
.push(DnType::CommonName, "Numa .numa services");
|
||||||
|
params.subject_alt_names = vec![
|
||||||
|
rcgen::SanType::DnsName("*.numa".try_into().unwrap()),
|
||||||
|
rcgen::SanType::DnsName("numa.numa".try_into().unwrap()),
|
||||||
|
];
|
||||||
|
let cert = params.self_signed(&key_pair).unwrap();
|
||||||
|
|
||||||
|
let cert_der = CertificateDer::from(cert.der().to_vec());
|
||||||
|
let key_der = PrivateKeyDer::Pkcs8(PrivatePkcs8KeyDer::from(key_pair.serialize_der()));
|
||||||
|
|
||||||
|
let mut server_config = ServerConfig::builder()
|
||||||
|
.with_no_client_auth()
|
||||||
|
.with_single_cert(vec![cert_der.clone()], key_der)
|
||||||
|
.unwrap();
|
||||||
|
server_config.alpn_protocols = dot_alpn();
|
||||||
|
|
||||||
|
(Arc::new(server_config), cert_der)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build a TLS client config that trusts `cert_der` and advertises the
|
||||||
|
/// given ALPN protocols. Used by tests to vary ALPN per test case.
|
||||||
|
fn dot_client(
|
||||||
|
cert_der: &CertificateDer<'static>,
|
||||||
|
alpn: Vec<Vec<u8>>,
|
||||||
|
) -> Arc<rustls::ClientConfig> {
|
||||||
|
let mut root_store = rustls::RootCertStore::empty();
|
||||||
|
root_store.add(cert_der.clone()).unwrap();
|
||||||
|
let mut config = rustls::ClientConfig::builder()
|
||||||
|
.with_root_certificates(root_store)
|
||||||
|
.with_no_client_auth();
|
||||||
|
config.alpn_protocols = alpn;
|
||||||
|
Arc::new(config)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Spin up a DoT listener with a test TLS config. Returns the bind addr
|
||||||
|
/// and the leaf cert DER so callers can build clients with arbitrary ALPN.
|
||||||
|
/// The upstream is pointed at a bound-but-unresponsive UDP socket we own, so
|
||||||
|
/// any query that escapes to the upstream path times out deterministically
|
||||||
|
/// (SERVFAIL) regardless of what the host has running on port 53.
|
||||||
|
async fn spawn_dot_server() -> (SocketAddr, CertificateDer<'static>) {
|
||||||
|
let (server_tls, cert_der) = test_tls_configs();
|
||||||
|
|
||||||
|
let socket = tokio::net::UdpSocket::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
// Bind an unresponsive upstream and leak it so it lives for the test duration.
|
||||||
|
let blackhole = Box::leak(Box::new(std::net::UdpSocket::bind("127.0.0.1:0").unwrap()));
|
||||||
|
let upstream_addr = blackhole.local_addr().unwrap();
|
||||||
|
let ctx = Arc::new(ServerCtx {
|
||||||
|
socket,
|
||||||
|
zone_map: {
|
||||||
|
let mut m = HashMap::new();
|
||||||
|
let mut inner = HashMap::new();
|
||||||
|
inner.insert(
|
||||||
|
QueryType::A,
|
||||||
|
vec![DnsRecord::A {
|
||||||
|
domain: "dot-test.example".to_string(),
|
||||||
|
addr: std::net::Ipv4Addr::new(10, 0, 0, 1),
|
||||||
|
ttl: 300,
|
||||||
|
}],
|
||||||
|
);
|
||||||
|
m.insert("dot-test.example".to_string(), inner);
|
||||||
|
m
|
||||||
|
},
|
||||||
|
cache: RwLock::new(crate::cache::DnsCache::new(100, 60, 86400)),
|
||||||
|
stats: Mutex::new(crate::stats::ServerStats::new()),
|
||||||
|
overrides: RwLock::new(crate::override_store::OverrideStore::new()),
|
||||||
|
blocklist: RwLock::new(crate::blocklist::BlocklistStore::new()),
|
||||||
|
query_log: Mutex::new(crate::query_log::QueryLog::new(100)),
|
||||||
|
services: Mutex::new(crate::service_store::ServiceStore::new()),
|
||||||
|
lan_peers: Mutex::new(crate::lan::PeerStore::new(90)),
|
||||||
|
forwarding_rules: Vec::new(),
|
||||||
|
upstream: Mutex::new(crate::forward::Upstream::Udp(upstream_addr)),
|
||||||
|
upstream_auto: false,
|
||||||
|
upstream_port: 53,
|
||||||
|
lan_ip: Mutex::new(std::net::Ipv4Addr::LOCALHOST),
|
||||||
|
timeout: Duration::from_millis(200),
|
||||||
|
proxy_tld: "numa".to_string(),
|
||||||
|
proxy_tld_suffix: ".numa".to_string(),
|
||||||
|
lan_enabled: false,
|
||||||
|
config_path: String::new(),
|
||||||
|
config_found: false,
|
||||||
|
config_dir: std::path::PathBuf::from("/tmp"),
|
||||||
|
data_dir: std::path::PathBuf::from("/tmp"),
|
||||||
|
tls_config: Some(arc_swap::ArcSwap::from(server_tls)),
|
||||||
|
upstream_mode: crate::config::UpstreamMode::Forward,
|
||||||
|
root_hints: Vec::new(),
|
||||||
|
srtt: RwLock::new(crate::srtt::SrttCache::new(true)),
|
||||||
|
inflight: Mutex::new(HashMap::new()),
|
||||||
|
dnssec_enabled: false,
|
||||||
|
dnssec_strict: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
let addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
|
let tls_config = Arc::clone(&*ctx.tls_config.as_ref().unwrap().load());
|
||||||
|
let acceptor = TlsAcceptor::from(tls_config);
|
||||||
|
|
||||||
|
tokio::spawn(accept_loop(listener, acceptor, ctx));
|
||||||
|
|
||||||
|
(addr, cert_der)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Open a TLS connection to the DoT server and return the stream.
|
||||||
|
/// Uses SNI "numa.numa" to mirror what setup-phone's mobileconfig sends.
|
||||||
|
async fn dot_connect(
|
||||||
|
addr: SocketAddr,
|
||||||
|
client_config: &Arc<rustls::ClientConfig>,
|
||||||
|
) -> tokio_rustls::client::TlsStream<tokio::net::TcpStream> {
|
||||||
|
let connector = tokio_rustls::TlsConnector::from(Arc::clone(client_config));
|
||||||
|
let tcp = tokio::net::TcpStream::connect(addr).await.unwrap();
|
||||||
|
connector
|
||||||
|
.connect(ServerName::try_from("numa.numa").unwrap(), tcp)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Send a DNS query over a DoT stream and read the response.
|
||||||
|
async fn dot_exchange(
|
||||||
|
stream: &mut tokio_rustls::client::TlsStream<tokio::net::TcpStream>,
|
||||||
|
query: &DnsPacket,
|
||||||
|
) -> DnsPacket {
|
||||||
|
let mut buf = BytePacketBuffer::new();
|
||||||
|
query.write(&mut buf).unwrap();
|
||||||
|
let msg = buf.filled();
|
||||||
|
|
||||||
|
let mut out = Vec::with_capacity(2 + msg.len());
|
||||||
|
out.extend_from_slice(&(msg.len() as u16).to_be_bytes());
|
||||||
|
out.extend_from_slice(msg);
|
||||||
|
stream.write_all(&out).await.unwrap();
|
||||||
|
|
||||||
|
let mut len_buf = [0u8; 2];
|
||||||
|
stream.read_exact(&mut len_buf).await.unwrap();
|
||||||
|
let resp_len = u16::from_be_bytes(len_buf) as usize;
|
||||||
|
|
||||||
|
let mut data = vec![0u8; resp_len];
|
||||||
|
stream.read_exact(&mut data).await.unwrap();
|
||||||
|
|
||||||
|
let mut resp_buf = BytePacketBuffer::from_bytes(&data);
|
||||||
|
DnsPacket::from_buffer(&mut resp_buf).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn dot_resolves_local_zone() {
|
||||||
|
let (addr, cert_der) = spawn_dot_server().await;
|
||||||
|
let client_config = dot_client(&cert_der, dot_alpn());
|
||||||
|
let mut stream = dot_connect(addr, &client_config).await;
|
||||||
|
|
||||||
|
let query = DnsPacket::query(0x1234, "dot-test.example", QueryType::A);
|
||||||
|
let resp = dot_exchange(&mut stream, &query).await;
|
||||||
|
|
||||||
|
assert_eq!(resp.header.id, 0x1234);
|
||||||
|
assert!(resp.header.response);
|
||||||
|
assert_eq!(resp.header.rescode, ResultCode::NOERROR);
|
||||||
|
assert_eq!(resp.answers.len(), 1);
|
||||||
|
match &resp.answers[0] {
|
||||||
|
DnsRecord::A { domain, addr, ttl } => {
|
||||||
|
assert_eq!(domain, "dot-test.example");
|
||||||
|
assert_eq!(*addr, std::net::Ipv4Addr::new(10, 0, 0, 1));
|
||||||
|
assert_eq!(*ttl, 300);
|
||||||
|
}
|
||||||
|
other => panic!("expected A record, got {:?}", other),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn dot_multiple_queries_on_persistent_connection() {
|
||||||
|
let (addr, cert_der) = spawn_dot_server().await;
|
||||||
|
let client_config = dot_client(&cert_der, dot_alpn());
|
||||||
|
let mut stream = dot_connect(addr, &client_config).await;
|
||||||
|
|
||||||
|
for i in 0..3u16 {
|
||||||
|
let query = DnsPacket::query(0xA000 + i, "dot-test.example", QueryType::A);
|
||||||
|
let resp = dot_exchange(&mut stream, &query).await;
|
||||||
|
assert_eq!(resp.header.id, 0xA000 + i);
|
||||||
|
assert_eq!(resp.header.rescode, ResultCode::NOERROR);
|
||||||
|
assert_eq!(resp.answers.len(), 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn dot_nxdomain_for_unknown() {
|
||||||
|
let (addr, cert_der) = spawn_dot_server().await;
|
||||||
|
let client_config = dot_client(&cert_der, dot_alpn());
|
||||||
|
let mut stream = dot_connect(addr, &client_config).await;
|
||||||
|
|
||||||
|
let query = DnsPacket::query(0xBEEF, "nonexistent.test", QueryType::A);
|
||||||
|
let resp = dot_exchange(&mut stream, &query).await;
|
||||||
|
|
||||||
|
assert_eq!(resp.header.id, 0xBEEF);
|
||||||
|
assert!(resp.header.response);
|
||||||
|
// Query goes to the blackhole upstream which never replies → SERVFAIL.
|
||||||
|
// The SERVFAIL response echoes the question section.
|
||||||
|
assert_eq!(resp.header.rescode, ResultCode::SERVFAIL);
|
||||||
|
assert_eq!(resp.questions.len(), 1);
|
||||||
|
assert_eq!(resp.questions[0].name, "nonexistent.test");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn dot_negotiates_alpn() {
|
||||||
|
let (addr, cert_der) = spawn_dot_server().await;
|
||||||
|
let client_config = dot_client(&cert_der, dot_alpn());
|
||||||
|
let stream = dot_connect(addr, &client_config).await;
|
||||||
|
let (_io, conn) = stream.get_ref();
|
||||||
|
assert_eq!(conn.alpn_protocol(), Some(&b"dot"[..]));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn dot_rejects_non_dot_alpn() {
|
||||||
|
// Cross-protocol confusion defense: a client that only offers "h2"
|
||||||
|
// (e.g. an HTTP/2 client mistakenly hitting :853) must not complete
|
||||||
|
// a TLS handshake with the DoT server. Verifies the rustls server
|
||||||
|
// sends `no_application_protocol` rather than silently negotiating.
|
||||||
|
let (addr, cert_der) = spawn_dot_server().await;
|
||||||
|
let client_config = dot_client(&cert_der, vec![b"h2".to_vec()]);
|
||||||
|
let connector = tokio_rustls::TlsConnector::from(client_config);
|
||||||
|
let tcp = tokio::net::TcpStream::connect(addr).await.unwrap();
|
||||||
|
let result = connector
|
||||||
|
.connect(ServerName::try_from("numa.numa").unwrap(), tcp)
|
||||||
|
.await;
|
||||||
|
assert!(
|
||||||
|
result.is_err(),
|
||||||
|
"DoT server must reject ALPN that doesn't include \"dot\""
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn dot_concurrent_connections() {
|
||||||
|
let (addr, cert_der) = spawn_dot_server().await;
|
||||||
|
let client_config = dot_client(&cert_der, dot_alpn());
|
||||||
|
|
||||||
|
let mut handles = Vec::new();
|
||||||
|
for i in 0..5u16 {
|
||||||
|
let cfg = Arc::clone(&client_config);
|
||||||
|
handles.push(tokio::spawn(async move {
|
||||||
|
let mut stream = dot_connect(addr, &cfg).await;
|
||||||
|
let query = DnsPacket::query(0xC000 + i, "dot-test.example", QueryType::A);
|
||||||
|
let resp = dot_exchange(&mut stream, &query).await;
|
||||||
|
assert_eq!(resp.header.id, 0xC000 + i);
|
||||||
|
assert_eq!(resp.header.rescode, ResultCode::NOERROR);
|
||||||
|
assert_eq!(resp.answers.len(), 1);
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
for h in handles {
|
||||||
|
h.await.unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
72
src/lib.rs
72
src/lib.rs
@@ -5,6 +5,7 @@ pub mod cache;
|
|||||||
pub mod config;
|
pub mod config;
|
||||||
pub mod ctx;
|
pub mod ctx;
|
||||||
pub mod dnssec;
|
pub mod dnssec;
|
||||||
|
pub mod dot;
|
||||||
pub mod forward;
|
pub mod forward;
|
||||||
pub mod header;
|
pub mod header;
|
||||||
pub mod lan;
|
pub mod lan;
|
||||||
@@ -25,7 +26,10 @@ pub type Error = Box<dyn std::error::Error + Send + Sync>;
|
|||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
|
|
||||||
/// Shared config directory for persistent data (services.json, etc).
|
/// Shared config directory for persistent data (services.json, etc).
|
||||||
/// Unix: ~/.config/numa/ (or /usr/local/var/numa/ when running as root daemon)
|
/// Unix users: ~/.config/numa/
|
||||||
|
/// Linux root daemon: /var/lib/numa (FHS) — falls back to /usr/local/var/numa
|
||||||
|
/// if a pre-v0.10.1 install already lives there.
|
||||||
|
/// macOS root daemon: /usr/local/var/numa (Homebrew prefix)
|
||||||
/// Windows: %APPDATA%\numa
|
/// Windows: %APPDATA%\numa
|
||||||
pub fn config_dir() -> std::path::PathBuf {
|
pub fn config_dir() -> std::path::PathBuf {
|
||||||
#[cfg(windows)]
|
#[cfg(windows)]
|
||||||
@@ -62,11 +66,15 @@ fn config_dir_unix() -> std::path::PathBuf {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Running as root daemon (launchd/systemd) — use system-wide path
|
// Running as root daemon (launchd/systemd) — use system-wide path
|
||||||
std::path::PathBuf::from("/usr/local/var/numa")
|
daemon_data_dir()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// System-wide data directory for TLS certs.
|
/// Default system-wide data directory for TLS certs. Overridable via
|
||||||
/// Unix: /usr/local/var/numa
|
/// `[server] data_dir = "..."` in numa.toml — this function only provides
|
||||||
|
/// the fallback when the config doesn't set it.
|
||||||
|
/// Linux: /var/lib/numa (FHS) — falls back to /usr/local/var/numa if a
|
||||||
|
/// pre-v0.10.1 install already has data there.
|
||||||
|
/// macOS: /usr/local/var/numa (Homebrew prefix)
|
||||||
/// Windows: %PROGRAMDATA%\numa
|
/// Windows: %PROGRAMDATA%\numa
|
||||||
pub fn data_dir() -> std::path::PathBuf {
|
pub fn data_dir() -> std::path::PathBuf {
|
||||||
#[cfg(windows)]
|
#[cfg(windows)]
|
||||||
@@ -78,6 +86,62 @@ pub fn data_dir() -> std::path::PathBuf {
|
|||||||
}
|
}
|
||||||
#[cfg(not(windows))]
|
#[cfg(not(windows))]
|
||||||
{
|
{
|
||||||
|
daemon_data_dir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resolve the system-wide data directory for the running platform.
|
||||||
|
/// Honors backwards compatibility with pre-v0.10.1 installs that still
|
||||||
|
/// have their CA cert + services.json under `/usr/local/var/numa`.
|
||||||
|
#[cfg(not(windows))]
|
||||||
|
fn daemon_data_dir() -> std::path::PathBuf {
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
{
|
||||||
|
std::path::PathBuf::from(resolve_linux_data_dir(
|
||||||
|
std::path::Path::new("/usr/local/var/numa").exists(),
|
||||||
|
std::path::Path::new("/var/lib/numa").exists(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
#[cfg(target_os = "macos")]
|
||||||
|
{
|
||||||
|
// macOS uses the Homebrew prefix convention; no FHS migration needed.
|
||||||
std::path::PathBuf::from("/usr/local/var/numa")
|
std::path::PathBuf::from("/usr/local/var/numa")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Extracted as a pure function so the migration logic is unit-testable
|
||||||
|
/// without touching the real filesystem.
|
||||||
|
#[cfg(any(target_os = "linux", test))]
|
||||||
|
fn resolve_linux_data_dir(legacy_exists: bool, fhs_exists: bool) -> &'static str {
|
||||||
|
if legacy_exists && !fhs_exists {
|
||||||
|
"/usr/local/var/numa"
|
||||||
|
} else {
|
||||||
|
"/var/lib/numa"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn linux_data_dir_fresh_install_uses_fhs() {
|
||||||
|
assert_eq!(resolve_linux_data_dir(false, false), "/var/lib/numa");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn linux_data_dir_upgrading_install_keeps_legacy() {
|
||||||
|
// Migration must keep legacy so the user doesn't lose their CA on upgrade.
|
||||||
|
assert_eq!(resolve_linux_data_dir(true, false), "/usr/local/var/numa");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn linux_data_dir_after_migration_uses_fhs() {
|
||||||
|
assert_eq!(resolve_linux_data_dir(true, true), "/var/lib/numa");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn linux_data_dir_only_fhs_uses_fhs() {
|
||||||
|
assert_eq!(resolve_linux_data_dir(false, true), "/var/lib/numa");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
68
src/main.rs
68
src/main.rs
@@ -20,6 +20,9 @@ use numa::system_dns::{
|
|||||||
discover_system_dns, install_service, restart_service, service_status, uninstall_service,
|
discover_system_dns, install_service, restart_service, service_status, uninstall_service,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const QUAD9_IP: &str = "9.9.9.9";
|
||||||
|
const DOH_FALLBACK: &str = "https://9.9.9.9/dns-query";
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() -> numa::Result<()> {
|
async fn main() -> numa::Result<()> {
|
||||||
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info"))
|
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info"))
|
||||||
@@ -126,7 +129,7 @@ async fn main() -> numa::Result<()> {
|
|||||||
.use_rustls_tls()
|
.use_rustls_tls()
|
||||||
.build()
|
.build()
|
||||||
.unwrap_or_default();
|
.unwrap_or_default();
|
||||||
let url = "https://dns.quad9.net/dns-query".to_string();
|
let url = DOH_FALLBACK.to_string();
|
||||||
let label = url.clone();
|
let label = url.clone();
|
||||||
(
|
(
|
||||||
numa::config::UpstreamMode::Forward,
|
numa::config::UpstreamMode::Forward,
|
||||||
@@ -152,7 +155,7 @@ async fn main() -> numa::Result<()> {
|
|||||||
.or_else(numa::system_dns::detect_dhcp_dns)
|
.or_else(numa::system_dns::detect_dhcp_dns)
|
||||||
.unwrap_or_else(|| {
|
.unwrap_or_else(|| {
|
||||||
info!("could not detect system DNS, falling back to Quad9 DoH");
|
info!("could not detect system DNS, falling back to Quad9 DoH");
|
||||||
"https://dns.quad9.net/dns-query".to_string()
|
DOH_FALLBACK.to_string()
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
config.upstream.address.clone()
|
config.upstream.address.clone()
|
||||||
@@ -201,13 +204,30 @@ async fn main() -> numa::Result<()> {
|
|||||||
|
|
||||||
let forwarding_rules = system_dns.forwarding_rules;
|
let forwarding_rules = system_dns.forwarding_rules;
|
||||||
|
|
||||||
|
// Resolve data_dir from config, falling back to the platform default.
|
||||||
|
// Used for TLS CA storage below and stored on ServerCtx for runtime use.
|
||||||
|
let resolved_data_dir = config
|
||||||
|
.server
|
||||||
|
.data_dir
|
||||||
|
.clone()
|
||||||
|
.unwrap_or_else(numa::data_dir);
|
||||||
|
|
||||||
// Build initial TLS config before ServerCtx (so ArcSwap is ready at construction)
|
// Build initial TLS config before ServerCtx (so ArcSwap is ready at construction)
|
||||||
let initial_tls = if config.proxy.enabled && config.proxy.tls_port > 0 {
|
let initial_tls = if config.proxy.enabled && config.proxy.tls_port > 0 {
|
||||||
let service_names = service_store.names();
|
let service_names = service_store.names();
|
||||||
match numa::tls::build_tls_config(&config.proxy.tld, &service_names) {
|
match numa::tls::build_tls_config(
|
||||||
|
&config.proxy.tld,
|
||||||
|
&service_names,
|
||||||
|
Vec::new(),
|
||||||
|
&resolved_data_dir,
|
||||||
|
) {
|
||||||
Ok(tls_config) => Some(ArcSwap::from(tls_config)),
|
Ok(tls_config) => Some(ArcSwap::from(tls_config)),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
log::warn!("TLS setup failed, HTTPS proxy disabled: {}", e);
|
if let Some(advisory) = numa::tls::try_data_dir_advisory(&e, &resolved_data_dir) {
|
||||||
|
eprint!("{}", advisory);
|
||||||
|
} else {
|
||||||
|
log::warn!("TLS setup failed, HTTPS proxy disabled: {}", e);
|
||||||
|
}
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -215,8 +235,21 @@ async fn main() -> numa::Result<()> {
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let socket = match UdpSocket::bind(&config.server.bind_addr).await {
|
||||||
|
Ok(s) => s,
|
||||||
|
Err(e) => {
|
||||||
|
if let Some(advisory) =
|
||||||
|
numa::system_dns::try_port53_advisory(&config.server.bind_addr, &e)
|
||||||
|
{
|
||||||
|
eprint!("{}", advisory);
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
return Err(e.into());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let ctx = Arc::new(ServerCtx {
|
let ctx = Arc::new(ServerCtx {
|
||||||
socket: UdpSocket::bind(&config.server.bind_addr).await?,
|
socket,
|
||||||
zone_map: build_zone_map(&config.zones)?,
|
zone_map: build_zone_map(&config.zones)?,
|
||||||
cache: RwLock::new(DnsCache::new(
|
cache: RwLock::new(DnsCache::new(
|
||||||
config.cache.max_entries,
|
config.cache.max_entries,
|
||||||
@@ -245,7 +278,7 @@ async fn main() -> numa::Result<()> {
|
|||||||
config_path: resolved_config_path,
|
config_path: resolved_config_path,
|
||||||
config_found,
|
config_found,
|
||||||
config_dir: numa::config_dir(),
|
config_dir: numa::config_dir(),
|
||||||
data_dir: numa::data_dir(),
|
data_dir: resolved_data_dir,
|
||||||
tls_config: initial_tls,
|
tls_config: initial_tls,
|
||||||
upstream_mode: resolved_mode,
|
upstream_mode: resolved_mode,
|
||||||
root_hints,
|
root_hints,
|
||||||
@@ -367,6 +400,9 @@ async fn main() -> numa::Result<()> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if config.dot.enabled {
|
||||||
|
row("DoT", g, &format!("tls://:{}", config.dot.port));
|
||||||
|
}
|
||||||
if config.lan.enabled {
|
if config.lan.enabled {
|
||||||
row("LAN", g, "mDNS (_numa._tcp.local)");
|
row("LAN", g, "mDNS (_numa._tcp.local)");
|
||||||
}
|
}
|
||||||
@@ -474,11 +510,27 @@ async fn main() -> numa::Result<()> {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Spawn DNS-over-TLS listener (RFC 7858)
|
||||||
|
if config.dot.enabled {
|
||||||
|
let dot_ctx = Arc::clone(&ctx);
|
||||||
|
let dot_config = config.dot.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
numa::dot::start_dot(dot_ctx, &dot_config).await;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
// UDP DNS listener
|
// UDP DNS listener
|
||||||
#[allow(clippy::infinite_loop)]
|
#[allow(clippy::infinite_loop)]
|
||||||
loop {
|
loop {
|
||||||
let mut buffer = BytePacketBuffer::new();
|
let mut buffer = BytePacketBuffer::new();
|
||||||
let (_, src_addr) = ctx.socket.recv_from(&mut buffer.buf).await?;
|
let (_, src_addr) = match ctx.socket.recv_from(&mut buffer.buf).await {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) if e.kind() == std::io::ErrorKind::ConnectionReset => {
|
||||||
|
// Windows delivers ICMP port-unreachable as ConnectionReset on UDP sockets
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
Err(e) => return Err(e.into()),
|
||||||
|
};
|
||||||
|
|
||||||
let ctx = Arc::clone(&ctx);
|
let ctx = Arc::clone(&ctx);
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
@@ -521,7 +573,7 @@ async fn network_watch_loop(ctx: Arc<numa::ctx::ServerCtx>) {
|
|||||||
let new_addr = dns_info
|
let new_addr = dns_info
|
||||||
.default_upstream
|
.default_upstream
|
||||||
.or_else(numa::system_dns::detect_dhcp_dns)
|
.or_else(numa::system_dns::detect_dhcp_dns)
|
||||||
.unwrap_or_else(|| "9.9.9.9".to_string());
|
.unwrap_or_else(|| QUAD9_IP.to_string());
|
||||||
if let Ok(new_sock) =
|
if let Ok(new_sock) =
|
||||||
format!("{}:{}", new_addr, ctx.upstream_port).parse::<SocketAddr>()
|
format!("{}:{}", new_addr, ctx.upstream_port).parse::<SocketAddr>()
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -870,14 +870,25 @@ mod tests {
|
|||||||
};
|
};
|
||||||
let handler = handler.clone();
|
let handler = handler.clone();
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
|
let timeout = std::time::Duration::from_secs(5);
|
||||||
// Read length-prefixed DNS query
|
// Read length-prefixed DNS query
|
||||||
let mut len_buf = [0u8; 2];
|
let mut len_buf = [0u8; 2];
|
||||||
if stream.read_exact(&mut len_buf).await.is_err() {
|
if tokio::time::timeout(timeout, stream.read_exact(&mut len_buf))
|
||||||
|
.await
|
||||||
|
.ok()
|
||||||
|
.and_then(|r| r.ok())
|
||||||
|
.is_none()
|
||||||
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
let len = u16::from_be_bytes(len_buf) as usize;
|
let len = u16::from_be_bytes(len_buf) as usize;
|
||||||
let mut data = vec![0u8; len];
|
let mut data = vec![0u8; len];
|
||||||
if stream.read_exact(&mut data).await.is_err() {
|
if tokio::time::timeout(timeout, stream.read_exact(&mut data))
|
||||||
|
.await
|
||||||
|
.ok()
|
||||||
|
.and_then(|r| r.ok())
|
||||||
|
.is_none()
|
||||||
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
101
src/srtt.rs
101
src/srtt.rs
@@ -47,16 +47,19 @@ impl SrttCache {
|
|||||||
|
|
||||||
/// Apply time-based decay: each DECAY_AFTER_SECS period halves distance to INITIAL.
|
/// Apply time-based decay: each DECAY_AFTER_SECS period halves distance to INITIAL.
|
||||||
fn decayed_srtt(entry: &SrttEntry) -> u64 {
|
fn decayed_srtt(entry: &SrttEntry) -> u64 {
|
||||||
let age_secs = entry.updated_at.elapsed().as_secs();
|
Self::decay_for_age(entry.srtt_ms, entry.updated_at.elapsed().as_secs())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn decay_for_age(srtt_ms: u64, age_secs: u64) -> u64 {
|
||||||
if age_secs > DECAY_AFTER_SECS {
|
if age_secs > DECAY_AFTER_SECS {
|
||||||
let periods = (age_secs / DECAY_AFTER_SECS).min(8);
|
let periods = (age_secs / DECAY_AFTER_SECS).min(8);
|
||||||
let mut srtt = entry.srtt_ms;
|
let mut srtt = srtt_ms;
|
||||||
for _ in 0..periods {
|
for _ in 0..periods {
|
||||||
srtt = (srtt + INITIAL_SRTT_MS) / 2;
|
srtt = (srtt + INITIAL_SRTT_MS) / 2;
|
||||||
}
|
}
|
||||||
srtt
|
srtt
|
||||||
} else {
|
} else {
|
||||||
entry.srtt_ms
|
srtt_ms
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -116,13 +119,6 @@ impl SrttCache {
|
|||||||
self.entries.is_empty()
|
self.entries.is_empty()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
fn set_updated_at(&mut self, ip: IpAddr, at: Instant) {
|
|
||||||
if let Some(entry) = self.entries.get_mut(&ip) {
|
|
||||||
entry.updated_at = at;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn maybe_evict(&mut self) {
|
fn maybe_evict(&mut self) {
|
||||||
if self.entries.len() < MAX_ENTRIES {
|
if self.entries.len() < MAX_ENTRIES {
|
||||||
return;
|
return;
|
||||||
@@ -218,63 +214,41 @@ mod tests {
|
|||||||
assert_eq!(addrs, original);
|
assert_eq!(addrs, original);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn age(secs: u64) -> Instant {
|
|
||||||
Instant::now() - std::time::Duration::from_secs(secs)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Cache with ip(1) saturated at FAILURE_PENALTY_MS
|
|
||||||
fn saturated_penalty_cache() -> SrttCache {
|
|
||||||
let mut cache = SrttCache::new(true);
|
|
||||||
for _ in 0..30 {
|
|
||||||
cache.record_rtt(ip(1), FAILURE_PENALTY_MS, false);
|
|
||||||
}
|
|
||||||
cache
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn no_decay_within_threshold() {
|
fn no_decay_within_threshold() {
|
||||||
let mut cache = SrttCache::new(true);
|
// At exactly DECAY_AFTER_SECS, no decay applied
|
||||||
cache.record_rtt(ip(1), 5000, false);
|
let result = SrttCache::decay_for_age(FAILURE_PENALTY_MS, DECAY_AFTER_SECS);
|
||||||
cache.set_updated_at(ip(1), age(DECAY_AFTER_SECS));
|
assert_eq!(result, FAILURE_PENALTY_MS);
|
||||||
assert_eq!(cache.get(ip(1)), cache.entries[&ip(1)].srtt_ms);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn one_decay_period() {
|
fn one_decay_period() {
|
||||||
let mut cache = saturated_penalty_cache();
|
let result = SrttCache::decay_for_age(FAILURE_PENALTY_MS, DECAY_AFTER_SECS + 1);
|
||||||
let raw = cache.entries[&ip(1)].srtt_ms;
|
let expected = (FAILURE_PENALTY_MS + INITIAL_SRTT_MS) / 2;
|
||||||
cache.set_updated_at(ip(1), age(DECAY_AFTER_SECS + 1));
|
assert_eq!(result, expected);
|
||||||
let expected = (raw + INITIAL_SRTT_MS) / 2;
|
|
||||||
assert_eq!(cache.get(ip(1)), expected);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn multiple_decay_periods() {
|
fn multiple_decay_periods() {
|
||||||
let mut cache = saturated_penalty_cache();
|
let result = SrttCache::decay_for_age(FAILURE_PENALTY_MS, DECAY_AFTER_SECS * 4 + 1);
|
||||||
let raw = cache.entries[&ip(1)].srtt_ms;
|
let mut expected = FAILURE_PENALTY_MS;
|
||||||
cache.set_updated_at(ip(1), age(DECAY_AFTER_SECS * 4 + 1));
|
|
||||||
let mut expected = raw;
|
|
||||||
for _ in 0..4 {
|
for _ in 0..4 {
|
||||||
expected = (expected + INITIAL_SRTT_MS) / 2;
|
expected = (expected + INITIAL_SRTT_MS) / 2;
|
||||||
}
|
}
|
||||||
assert_eq!(cache.get(ip(1)), expected);
|
assert_eq!(result, expected);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn decay_caps_at_8_periods() {
|
fn decay_caps_at_8_periods() {
|
||||||
// 9 periods and 100 periods should produce the same result (capped at 8)
|
// 9 periods and 100 periods should produce the same result (capped at 8)
|
||||||
let mut cache_a = saturated_penalty_cache();
|
let a = SrttCache::decay_for_age(FAILURE_PENALTY_MS, DECAY_AFTER_SECS * 9 + 1);
|
||||||
let mut cache_b = saturated_penalty_cache();
|
let b = SrttCache::decay_for_age(FAILURE_PENALTY_MS, DECAY_AFTER_SECS * 100);
|
||||||
cache_a.set_updated_at(ip(1), age(DECAY_AFTER_SECS * 9 + 1));
|
assert_eq!(a, b);
|
||||||
cache_b.set_updated_at(ip(1), age(DECAY_AFTER_SECS * 100));
|
|
||||||
assert_eq!(cache_a.get(ip(1)), cache_b.get(ip(1)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn decay_converges_toward_initial() {
|
fn decay_converges_toward_initial() {
|
||||||
let mut cache = saturated_penalty_cache();
|
let decayed = SrttCache::decay_for_age(FAILURE_PENALTY_MS, DECAY_AFTER_SECS * 100);
|
||||||
cache.set_updated_at(ip(1), age(DECAY_AFTER_SECS * 100));
|
|
||||||
let decayed = cache.get(ip(1));
|
|
||||||
let diff = decayed.abs_diff(INITIAL_SRTT_MS);
|
let diff = decayed.abs_diff(INITIAL_SRTT_MS);
|
||||||
assert!(
|
assert!(
|
||||||
diff < 25,
|
diff < 25,
|
||||||
@@ -286,29 +260,28 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn record_rtt_applies_decay_before_ewma() {
|
fn record_rtt_applies_decay_before_ewma() {
|
||||||
let mut cache = saturated_penalty_cache();
|
// Verify decay is applied before EWMA in record_rtt by checking
|
||||||
cache.set_updated_at(ip(1), age(DECAY_AFTER_SECS * 8));
|
// that a saturated penalty + long age + new sample produces a low SRTT
|
||||||
cache.record_rtt(ip(1), 50, false);
|
let decayed = SrttCache::decay_for_age(FAILURE_PENALTY_MS, DECAY_AFTER_SECS * 8);
|
||||||
let srtt = cache.get(ip(1));
|
// EWMA: (decayed * 7 + 50) / 8
|
||||||
// Without decay-before-EWMA, result would be ~(5000*7+50)/8 ≈ 4381
|
let after_ewma = (decayed * 7 + 50) / 8;
|
||||||
assert!(srtt < 500, "expected decay before EWMA, got srtt={}", srtt);
|
assert!(
|
||||||
|
after_ewma < 500,
|
||||||
|
"expected decay before EWMA, got srtt={}",
|
||||||
|
after_ewma
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn decay_reranks_stale_failures() {
|
fn decay_reranks_stale_failures() {
|
||||||
let mut cache = saturated_penalty_cache();
|
// After enough decay, a failed server (5000ms) converges toward
|
||||||
for _ in 0..30 {
|
// INITIAL (200ms), which is below a stable server at 300ms
|
||||||
cache.record_rtt(ip(2), 300, false);
|
let decayed = SrttCache::decay_for_age(FAILURE_PENALTY_MS, DECAY_AFTER_SECS * 100);
|
||||||
}
|
assert!(
|
||||||
let mut addrs = vec![sock(1), sock(2)];
|
decayed < 300,
|
||||||
cache.sort_by_rtt(&mut addrs);
|
"expected decayed penalty ({}) < 300ms",
|
||||||
assert_eq!(addrs, vec![sock(2), sock(1)]);
|
decayed
|
||||||
|
);
|
||||||
// Age server 1 so it decays toward INITIAL (200ms) — below server 2's 300ms
|
|
||||||
cache.set_updated_at(ip(1), age(DECAY_AFTER_SECS * 100));
|
|
||||||
let mut addrs = vec![sock(1), sock(2)];
|
|
||||||
cache.sort_by_rtt(&mut addrs);
|
|
||||||
assert_eq!(addrs, vec![sock(1), sock(2)]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
1127
src/system_dns.rs
1127
src/system_dns.rs
File diff suppressed because it is too large
Load Diff
94
src/tls.rs
94
src/tls.rs
@@ -13,6 +13,13 @@ use time::{Duration, OffsetDateTime};
|
|||||||
const CA_VALIDITY_DAYS: i64 = 3650; // 10 years
|
const CA_VALIDITY_DAYS: i64 = 3650; // 10 years
|
||||||
const CERT_VALIDITY_DAYS: i64 = 365; // 1 year
|
const CERT_VALIDITY_DAYS: i64 = 365; // 1 year
|
||||||
|
|
||||||
|
/// Common Name on Numa's local CA. Referenced by trust-store helpers
|
||||||
|
/// (`security`, `certutil`) when locating the cert for removal.
|
||||||
|
pub const CA_COMMON_NAME: &str = "Numa Local CA";
|
||||||
|
|
||||||
|
/// Filename of the CA certificate inside the data dir.
|
||||||
|
pub const CA_FILE_NAME: &str = "ca.pem";
|
||||||
|
|
||||||
/// Collect all service + LAN peer names and regenerate the TLS cert.
|
/// Collect all service + LAN peer names and regenerate the TLS cert.
|
||||||
pub fn regenerate_tls(ctx: &ServerCtx) {
|
pub fn regenerate_tls(ctx: &ServerCtx) {
|
||||||
let tls = match &ctx.tls_config {
|
let tls = match &ctx.tls_config {
|
||||||
@@ -24,7 +31,7 @@ pub fn regenerate_tls(ctx: &ServerCtx) {
|
|||||||
names.extend(ctx.lan_peers.lock().unwrap().names());
|
names.extend(ctx.lan_peers.lock().unwrap().names());
|
||||||
let names: Vec<String> = names.into_iter().collect();
|
let names: Vec<String> = names.into_iter().collect();
|
||||||
|
|
||||||
match build_tls_config(&ctx.proxy_tld, &names) {
|
match build_tls_config(&ctx.proxy_tld, &names, Vec::new(), &ctx.data_dir) {
|
||||||
Ok(new_config) => {
|
Ok(new_config) => {
|
||||||
tls.store(new_config);
|
tls.store(new_config);
|
||||||
info!("TLS cert regenerated for {} services", names.len());
|
info!("TLS cert regenerated for {} services", names.len());
|
||||||
@@ -33,20 +40,63 @@ pub fn regenerate_tls(ctx: &ServerCtx) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Advisory for TLS-setup failures caused by a non-writable data dir;
|
||||||
|
/// `None` if not applicable so the caller can fall back to the raw error.
|
||||||
|
pub fn try_data_dir_advisory(err: &crate::Error, data_dir: &Path) -> Option<String> {
|
||||||
|
let io_err = err.downcast_ref::<std::io::Error>()?;
|
||||||
|
if io_err.kind() != std::io::ErrorKind::PermissionDenied {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let o = "\x1b[1;38;2;192;98;58m";
|
||||||
|
let r = "\x1b[0m";
|
||||||
|
Some(format!(
|
||||||
|
"
|
||||||
|
{o}Numa{r} — HTTPS proxy disabled: cannot write TLS CA to {}.
|
||||||
|
|
||||||
|
The data directory is not writable by the current user. Numa needs
|
||||||
|
to persist a local Certificate Authority there to serve .numa over
|
||||||
|
HTTPS. DNS resolution and plain-HTTP proxy continue to work.
|
||||||
|
|
||||||
|
Fix — pick one:
|
||||||
|
|
||||||
|
1. Install Numa as the system resolver (sets up a writable data dir):
|
||||||
|
|
||||||
|
sudo numa install (on Windows, run as Administrator)
|
||||||
|
|
||||||
|
2. Point data_dir at a path you can write.
|
||||||
|
Create ~/.config/numa/numa.toml with:
|
||||||
|
|
||||||
|
[server]
|
||||||
|
data_dir = \"/path/you/can/write\"
|
||||||
|
|
||||||
|
",
|
||||||
|
data_dir.display()
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
/// Build a TLS config with a cert covering all provided service names.
|
/// Build a TLS config with a cert covering all provided service names.
|
||||||
/// Wildcards under single-label TLDs (*.numa) are rejected by browsers,
|
/// Wildcards under single-label TLDs (*.numa) are rejected by browsers,
|
||||||
/// so we list each service explicitly as a SAN.
|
/// so we list each service explicitly as a SAN.
|
||||||
pub fn build_tls_config(tld: &str, service_names: &[String]) -> crate::Result<Arc<ServerConfig>> {
|
/// `alpn` is advertised in the TLS ServerHello — pass empty for the proxy
|
||||||
let dir = crate::data_dir();
|
/// (which accepts any ALPN), or `[b"dot"]` for DoT (RFC 7858 §3.2).
|
||||||
let (ca_cert, ca_key) = ensure_ca(&dir)?;
|
/// `data_dir` is where the CA material is stored — taken from
|
||||||
|
/// `[server] data_dir` in numa.toml (defaults to `crate::data_dir()`).
|
||||||
|
pub fn build_tls_config(
|
||||||
|
tld: &str,
|
||||||
|
service_names: &[String],
|
||||||
|
alpn: Vec<Vec<u8>>,
|
||||||
|
data_dir: &Path,
|
||||||
|
) -> crate::Result<Arc<ServerConfig>> {
|
||||||
|
let (ca_cert, ca_key) = ensure_ca(data_dir)?;
|
||||||
let (cert_chain, key) = generate_service_cert(&ca_cert, &ca_key, tld, service_names)?;
|
let (cert_chain, key) = generate_service_cert(&ca_cert, &ca_key, tld, service_names)?;
|
||||||
|
|
||||||
// Ensure a crypto provider is installed (rustls needs one)
|
// Ensure a crypto provider is installed (rustls needs one)
|
||||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||||
|
|
||||||
let config = ServerConfig::builder()
|
let mut config = ServerConfig::builder()
|
||||||
.with_no_client_auth()
|
.with_no_client_auth()
|
||||||
.with_single_cert(cert_chain, key)?;
|
.with_single_cert(cert_chain, key)?;
|
||||||
|
config.alpn_protocols = alpn;
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
"TLS configured for {} .{} domains",
|
"TLS configured for {} .{} domains",
|
||||||
@@ -58,7 +108,7 @@ pub fn build_tls_config(tld: &str, service_names: &[String]) -> crate::Result<Ar
|
|||||||
|
|
||||||
fn ensure_ca(dir: &Path) -> crate::Result<(rcgen::Certificate, KeyPair)> {
|
fn ensure_ca(dir: &Path) -> crate::Result<(rcgen::Certificate, KeyPair)> {
|
||||||
let ca_key_path = dir.join("ca.key");
|
let ca_key_path = dir.join("ca.key");
|
||||||
let ca_cert_path = dir.join("ca.pem");
|
let ca_cert_path = dir.join(CA_FILE_NAME);
|
||||||
|
|
||||||
if ca_key_path.exists() && ca_cert_path.exists() {
|
if ca_key_path.exists() && ca_cert_path.exists() {
|
||||||
let key_pem = std::fs::read_to_string(&ca_key_path)?;
|
let key_pem = std::fs::read_to_string(&ca_key_path)?;
|
||||||
@@ -77,7 +127,7 @@ fn ensure_ca(dir: &Path) -> crate::Result<(rcgen::Certificate, KeyPair)> {
|
|||||||
let mut params = CertificateParams::default();
|
let mut params = CertificateParams::default();
|
||||||
params
|
params
|
||||||
.distinguished_name
|
.distinguished_name
|
||||||
.push(DnType::CommonName, "Numa Local CA");
|
.push(DnType::CommonName, CA_COMMON_NAME);
|
||||||
params.is_ca = IsCa::Ca(BasicConstraints::Unconstrained);
|
params.is_ca = IsCa::Ca(BasicConstraints::Unconstrained);
|
||||||
params.key_usages = vec![KeyUsagePurpose::KeyCertSign, KeyUsagePurpose::CrlSign];
|
params.key_usages = vec![KeyUsagePurpose::KeyCertSign, KeyUsagePurpose::CrlSign];
|
||||||
params.not_before = OffsetDateTime::now_utc();
|
params.not_before = OffsetDateTime::now_utc();
|
||||||
@@ -154,3 +204,33 @@ fn generate_service_cert(
|
|||||||
|
|
||||||
Ok((vec![cert_der, ca_der], key_der))
|
Ok((vec![cert_der, ca_der], key_der))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn try_data_dir_advisory_permission_denied() {
|
||||||
|
let err: crate::Error =
|
||||||
|
Box::new(std::io::Error::from(std::io::ErrorKind::PermissionDenied));
|
||||||
|
let path = PathBuf::from("/usr/local/var/numa");
|
||||||
|
let msg = try_data_dir_advisory(&err, &path).expect("should advise");
|
||||||
|
assert!(msg.contains("HTTPS proxy disabled"));
|
||||||
|
assert!(msg.contains("/usr/local/var/numa"));
|
||||||
|
assert!(msg.contains("numa install"));
|
||||||
|
assert!(msg.contains("data_dir"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn try_data_dir_advisory_skips_other_io_kinds() {
|
||||||
|
let err: crate::Error = Box::new(std::io::Error::from(std::io::ErrorKind::NotFound));
|
||||||
|
assert!(try_data_dir_advisory(&err, &PathBuf::from("/x")).is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn try_data_dir_advisory_skips_non_io_errors() {
|
||||||
|
let err: crate::Error = "rcgen failure".into();
|
||||||
|
assert!(try_data_dir_advisory(&err, &PathBuf::from("/x")).is_none());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
123
tests/docker/install-trust.sh
Executable file
123
tests/docker/install-trust.sh
Executable file
@@ -0,0 +1,123 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
#
|
||||||
|
# Cross-distro CA trust contract test for issue #35.
|
||||||
|
#
|
||||||
|
# Runs the exact shell commands `src/system_dns.rs::trust_ca_linux` would run
|
||||||
|
# on each Linux trust-store family (Debian, Fedora pki, Arch p11-kit), and
|
||||||
|
# asserts the certificate ends up in (and is removed from) the system bundle.
|
||||||
|
#
|
||||||
|
# This is a contract test, not an integration test: it doesn't drive the Rust
|
||||||
|
# code (that would need systemd-in-container). It verifies the assumptions in
|
||||||
|
# `LINUX_TRUST_STORES` against the real distro behavior. If you change that
|
||||||
|
# table in src/system_dns.rs, update the per-distro cases below to match.
|
||||||
|
#
|
||||||
|
# Requirements: docker, openssl (host).
|
||||||
|
# Usage: ./tests/docker/install-trust.sh
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
cd "$(dirname "$0")/../.."
|
||||||
|
|
||||||
|
GREEN="\033[32m"; RED="\033[31m"; RESET="\033[0m"
|
||||||
|
|
||||||
|
# Self-signed CA fixture, mounted into each container as ca.pem.
|
||||||
|
# basicConstraints=CA:TRUE is required — without it, Debian's
|
||||||
|
# update-ca-certificates silently skips the cert during bundle build.
|
||||||
|
FIXTURE_DIR=$(mktemp -d)
|
||||||
|
trap 'rm -rf "$FIXTURE_DIR"' EXIT
|
||||||
|
openssl req -x509 -newkey rsa:2048 -nodes -days 1 \
|
||||||
|
-keyout "$FIXTURE_DIR/ca.key" \
|
||||||
|
-out "$FIXTURE_DIR/ca.pem" \
|
||||||
|
-subj "/CN=Numa Local CA Test $(date +%s)" \
|
||||||
|
-addext "basicConstraints=critical,CA:TRUE" \
|
||||||
|
-addext "keyUsage=critical,keyCertSign,cRLSign" >/dev/null 2>&1
|
||||||
|
|
||||||
|
# Distro bundles store certs differently — Debian writes raw PEM only,
|
||||||
|
# Fedora prepends "# CN" comment headers, Arch via extract-compat is
|
||||||
|
# raw PEM. To detect cert presence uniformly we grep for a deterministic
|
||||||
|
# substring of the base64 body (first base64 line is unique per cert).
|
||||||
|
CERT_TAG=$(sed -n '2p' "$FIXTURE_DIR/ca.pem")
|
||||||
|
|
||||||
|
PASSED=0; FAILED=0
|
||||||
|
|
||||||
|
run_case() {
|
||||||
|
local distro="$1"; shift
|
||||||
|
local image="$1"; shift
|
||||||
|
local platform="$1"; shift
|
||||||
|
local script="$1"
|
||||||
|
|
||||||
|
printf "── %s (%s) ──\n" "$distro" "$image"
|
||||||
|
if docker run --rm \
|
||||||
|
--platform "$platform" \
|
||||||
|
--security-opt seccomp=unconfined \
|
||||||
|
-e CERT_TAG="$CERT_TAG" \
|
||||||
|
-e DEBIAN_FRONTEND=noninteractive \
|
||||||
|
-v "$FIXTURE_DIR/ca.pem:/fixture/ca.pem:ro" \
|
||||||
|
"$image" bash -c "$script"; then
|
||||||
|
printf "${GREEN}✓${RESET} %s\n\n" "$distro"
|
||||||
|
PASSED=$((PASSED + 1))
|
||||||
|
else
|
||||||
|
printf "${RED}✗${RESET} %s\n\n" "$distro"
|
||||||
|
FAILED=$((FAILED + 1))
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Debian / Ubuntu / Mint — anchor: /usr/local/share/ca-certificates/*.crt
|
||||||
|
run_case "debian" "debian:stable" "linux/amd64" '
|
||||||
|
set -e
|
||||||
|
apt-get update -qq
|
||||||
|
apt-get install -qq -y ca-certificates >/dev/null
|
||||||
|
install -m 0644 /fixture/ca.pem /usr/local/share/ca-certificates/numa-local-ca.crt
|
||||||
|
update-ca-certificates >/dev/null 2>&1
|
||||||
|
grep -q "$CERT_TAG" /etc/ssl/certs/ca-certificates.crt
|
||||||
|
echo " install: cert present in bundle"
|
||||||
|
rm /usr/local/share/ca-certificates/numa-local-ca.crt
|
||||||
|
update-ca-certificates --fresh >/dev/null 2>&1
|
||||||
|
if grep -q "$CERT_TAG" /etc/ssl/certs/ca-certificates.crt; then
|
||||||
|
echo " uninstall: cert STILL present (regression)" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo " uninstall: cert removed from bundle"
|
||||||
|
'
|
||||||
|
|
||||||
|
# Fedora / RHEL / CentOS / SUSE — anchor: /etc/pki/ca-trust/source/anchors/*.pem
|
||||||
|
run_case "fedora" "fedora:latest" "linux/amd64" '
|
||||||
|
set -e
|
||||||
|
dnf install -q -y ca-certificates >/dev/null
|
||||||
|
install -m 0644 /fixture/ca.pem /etc/pki/ca-trust/source/anchors/numa-local-ca.pem
|
||||||
|
update-ca-trust extract
|
||||||
|
grep -q "$CERT_TAG" /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
|
||||||
|
echo " install: cert present in bundle"
|
||||||
|
rm /etc/pki/ca-trust/source/anchors/numa-local-ca.pem
|
||||||
|
update-ca-trust extract
|
||||||
|
if grep -q "$CERT_TAG" /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem; then
|
||||||
|
echo " uninstall: cert STILL present (regression)" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo " uninstall: cert removed from bundle"
|
||||||
|
'
|
||||||
|
|
||||||
|
# Arch / Manjaro — anchor: /etc/ca-certificates/trust-source/anchors/*.pem
|
||||||
|
# archlinux:latest is x86_64-only; --platform forces emulation on Apple Silicon.
|
||||||
|
run_case "arch" "archlinux:latest" "linux/amd64" '
|
||||||
|
set -e
|
||||||
|
# pacman 7+ filters syscalls in its own sandbox; disable for Rosetta/qemu emulation.
|
||||||
|
sed -i "s/^#DisableSandboxSyscalls/DisableSandboxSyscalls/" /etc/pacman.conf
|
||||||
|
pacman -Sy --noconfirm --needed ca-certificates p11-kit >/dev/null 2>&1
|
||||||
|
install -m 0644 /fixture/ca.pem /etc/ca-certificates/trust-source/anchors/numa-local-ca.pem
|
||||||
|
trust extract-compat
|
||||||
|
grep -q "$CERT_TAG" /etc/ssl/certs/ca-certificates.crt
|
||||||
|
echo " install: cert present in bundle"
|
||||||
|
rm /etc/ca-certificates/trust-source/anchors/numa-local-ca.pem
|
||||||
|
trust extract-compat
|
||||||
|
if grep -q "$CERT_TAG" /etc/ssl/certs/ca-certificates.crt; then
|
||||||
|
echo " uninstall: cert STILL present (regression)" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo " uninstall: cert removed from bundle"
|
||||||
|
'
|
||||||
|
|
||||||
|
printf "── summary ──\n"
|
||||||
|
printf " ${GREEN}passed${RESET}: %d\n" "$PASSED"
|
||||||
|
printf " ${RED}failed${RESET}: %d\n" "$FAILED"
|
||||||
|
[ "$FAILED" -eq 0 ]
|
||||||
147
tests/docker/smoke-arch.sh
Executable file
147
tests/docker/smoke-arch.sh
Executable file
@@ -0,0 +1,147 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
#
|
||||||
|
# Arch Linux compatibility smoke test.
|
||||||
|
#
|
||||||
|
# Builds numa from source inside an archlinux:latest container, runs it
|
||||||
|
# in forward mode on port 5354, and verifies a single DNS query returns
|
||||||
|
# an A record. Validates the "Arch compatible" claim end-to-end before
|
||||||
|
# release announcements.
|
||||||
|
#
|
||||||
|
# Dogfooding: the test numa forwards to the host's running numa via
|
||||||
|
# host.docker.internal (Docker Desktop's host gateway). This avoids the
|
||||||
|
# Docker NAT/UDP issues with public resolvers and exercises the realistic
|
||||||
|
# numa-on-numa shape. Requires the host to be running numa on port 53.
|
||||||
|
#
|
||||||
|
# First run is slow (~8-12 min): image pull + pacman + cold cargo build.
|
||||||
|
# No caching across runs.
|
||||||
|
#
|
||||||
|
# Requirements: docker, host running numa on 0.0.0.0:53
|
||||||
|
# Usage: ./tests/docker/smoke-arch.sh
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
cd "$(dirname "$0")/../.."
|
||||||
|
|
||||||
|
GREEN="\033[32m"; RED="\033[31m"; RESET="\033[0m"
|
||||||
|
|
||||||
|
# Precondition: the test numa-on-arch forwards to the host numa as its
|
||||||
|
# upstream (dogfood pattern). Fail fast with a clear error if there is
|
||||||
|
# no working DNS on the host, rather than letting the dig inside the
|
||||||
|
# container time out with "deadline has elapsed".
|
||||||
|
if ! dig @127.0.0.1 google.com A +short +time=1 +tries=1 >/dev/null 2>&1; then
|
||||||
|
printf "${RED}error:${RESET} host numa is not answering on 127.0.0.1:53\n" >&2
|
||||||
|
echo " This test forwards to the host numa via host.docker.internal." >&2
|
||||||
|
echo " Start numa on the host first (sudo numa install), then rerun." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "── building + running numa on archlinux:latest ──"
|
||||||
|
echo " (first run is slow: image pull + pacman + cold cargo build, ~8-12 min)"
|
||||||
|
echo
|
||||||
|
|
||||||
|
docker run --rm \
|
||||||
|
--platform linux/amd64 \
|
||||||
|
--security-opt seccomp=unconfined \
|
||||||
|
-v "$PWD:/src:ro" \
|
||||||
|
-v numa-arch-cargo:/root/.cargo \
|
||||||
|
-v numa-arch-target:/work/target \
|
||||||
|
archlinux:latest bash -c '
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# pacman 7+ filters syscalls in its own sandbox; disable for Rosetta/qemu
|
||||||
|
sed -i "s/^#DisableSandboxSyscalls/DisableSandboxSyscalls/" /etc/pacman.conf
|
||||||
|
|
||||||
|
echo "── pacman: installing build + runtime deps ──"
|
||||||
|
pacman -Sy --noconfirm --needed rust gcc pkgconf cmake make perl bind 2>&1 | tail -3
|
||||||
|
echo
|
||||||
|
|
||||||
|
# Copy source to a writable workdir, skipping target/ + .git so we
|
||||||
|
# do not pull in the host (macOS) build artifacts.
|
||||||
|
mkdir -p /work
|
||||||
|
tar -C /src --exclude=./target --exclude=./.git -cf - . | tar -C /work -xf -
|
||||||
|
cd /work
|
||||||
|
|
||||||
|
echo "── cargo build --release --locked ──"
|
||||||
|
cargo build --release --locked 2>&1 | tail -5
|
||||||
|
echo
|
||||||
|
|
||||||
|
# Dogfood: forward to the host numa via host.docker.internal.
|
||||||
|
# numa parses upstream.address as a literal SocketAddr, so we resolve
|
||||||
|
# the hostname to an IPv4 address first (force v4 — getent hosts may
|
||||||
|
# return IPv6 first, and IPv6 addresses need bracketed addr:port form).
|
||||||
|
HOST_IP=$(getent ahostsv4 host.docker.internal | awk "/STREAM/ {print \$1; exit}")
|
||||||
|
if [ -z "$HOST_IP" ]; then
|
||||||
|
echo " ✗ could not resolve host.docker.internal to IPv4 (not on Docker Desktop?)"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "── starting numa on :5354 (forward to host numa at $HOST_IP:53) ──"
|
||||||
|
# Intentionally NOT setting [server] data_dir — we want to exercise the
|
||||||
|
# default code path (data_dir() → daemon_data_dir() → /var/lib/numa) so
|
||||||
|
# the FHS-path assertion below verifies the live wiring, not just the
|
||||||
|
# unit-tested helper.
|
||||||
|
cat > /tmp/numa.toml <<EOF
|
||||||
|
[server]
|
||||||
|
bind_addr = "127.0.0.1:5354"
|
||||||
|
api_port = 5381
|
||||||
|
|
||||||
|
[upstream]
|
||||||
|
mode = "forward"
|
||||||
|
address = "$HOST_IP"
|
||||||
|
port = 53
|
||||||
|
EOF
|
||||||
|
|
||||||
|
./target/release/numa /tmp/numa.toml > /tmp/numa.log 2>&1 &
|
||||||
|
NUMA_PID=$!
|
||||||
|
|
||||||
|
# Poll for readiness — numa is ready when it answers a query
|
||||||
|
READY=0
|
||||||
|
for i in 1 2 3 4 5 6 7 8; do
|
||||||
|
sleep 1
|
||||||
|
if dig @127.0.0.1 -p 5354 google.com A +short +time=1 +tries=1 2>/dev/null \
|
||||||
|
| grep -qE "^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$"; then
|
||||||
|
READY=1
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "$READY" -ne 1 ]; then
|
||||||
|
echo " ✗ numa did not return an A record after 8s"
|
||||||
|
echo " numa log:"
|
||||||
|
cat /tmp/numa.log
|
||||||
|
kill $NUMA_PID 2>/dev/null || true
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "── dig @127.0.0.1 -p 5354 google.com A ──"
|
||||||
|
ANSWER=$(dig @127.0.0.1 -p 5354 google.com A +short +time=2 +tries=1)
|
||||||
|
echo "$ANSWER" | sed "s/^/ /"
|
||||||
|
|
||||||
|
kill $NUMA_PID 2>/dev/null || true
|
||||||
|
|
||||||
|
# FHS path assertion: the default data dir on Linux must be /var/lib/numa
|
||||||
|
# (not the legacy /usr/local/var/numa). The CA cert generated at startup
|
||||||
|
# is the canonical proof that numa wrote to the right place.
|
||||||
|
echo
|
||||||
|
echo "── FHS path check ──"
|
||||||
|
if [ -f /var/lib/numa/ca.pem ]; then
|
||||||
|
echo " ✓ CA cert at /var/lib/numa/ca.pem (FHS path)"
|
||||||
|
else
|
||||||
|
echo " ✗ CA cert NOT at /var/lib/numa/ca.pem"
|
||||||
|
echo " ls /var/lib/numa/:"
|
||||||
|
ls -la /var/lib/numa/ 2>&1 | sed "s/^/ /"
|
||||||
|
echo " ls /usr/local/var/numa/:"
|
||||||
|
ls -la /usr/local/var/numa/ 2>&1 | sed "s/^/ /"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -e /usr/local/var/numa ]; then
|
||||||
|
echo " ✗ legacy path /usr/local/var/numa unexpectedly exists on a fresh container"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo " ✓ legacy path /usr/local/var/numa absent (fresh install used FHS)"
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo " ✓ numa built, ran, answered a forward query, and used the FHS data dir on Arch"
|
||||||
|
'
|
||||||
|
|
||||||
|
echo
|
||||||
|
printf "${GREEN}── smoke-arch passed ──${RESET}\n"
|
||||||
138
tests/docker/smoke-port53.sh
Executable file
138
tests/docker/smoke-port53.sh
Executable file
@@ -0,0 +1,138 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
#
|
||||||
|
# Port-53 conflict advisory integration test.
|
||||||
|
#
|
||||||
|
# Builds numa from source inside a debian:bookworm container, pre-binds
|
||||||
|
# port 53 with a UDP socket, then runs numa bare (default bind_addr
|
||||||
|
# 0.0.0.0:53). Verifies:
|
||||||
|
# - process exits with code 1
|
||||||
|
# - stderr contains the advisory ("cannot bind to")
|
||||||
|
# - stderr contains both fix suggestions ("numa install", "bind_addr")
|
||||||
|
#
|
||||||
|
# This is the end-to-end test for the fix in:
|
||||||
|
# src/main.rs — AddrInUse match arm → eprint advisory + process::exit(1)
|
||||||
|
#
|
||||||
|
# No systemd-resolved needed — the conflict is simulated by a Python
|
||||||
|
# UDP socket held open before numa starts.
|
||||||
|
#
|
||||||
|
# Requirements: docker
|
||||||
|
# Usage: ./tests/docker/smoke-port53.sh
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
cd "$(dirname "$0")/../.."
|
||||||
|
|
||||||
|
GREEN="\033[32m"; RED="\033[31m"; RESET="\033[0m"
|
||||||
|
|
||||||
|
pass() { printf " ${GREEN}✓${RESET} %s\n" "$1"; }
|
||||||
|
fail() { printf " ${RED}✗${RESET} %s\n" "$1"; printf " %s\n" "$2"; FAILED=$((FAILED+1)); }
|
||||||
|
FAILED=0
|
||||||
|
|
||||||
|
echo "── smoke-port53: building + testing numa on debian:bookworm ──"
|
||||||
|
echo " (first run is slow: image pull + cold cargo build, ~5-8 min)"
|
||||||
|
echo
|
||||||
|
|
||||||
|
OUTPUT=$(docker run --rm \
|
||||||
|
--platform linux/amd64 \
|
||||||
|
-v "$PWD:/src:ro" \
|
||||||
|
-v numa-port53-cargo:/root/.cargo \
|
||||||
|
-v numa-port53-target:/work/target \
|
||||||
|
debian:bookworm bash -c '
|
||||||
|
set -e
|
||||||
|
|
||||||
|
apt-get update -qq && apt-get install -y -qq curl build-essential python3 2>&1 | tail -3
|
||||||
|
|
||||||
|
# Install rustup if not already in the cargo cache volume
|
||||||
|
if ! command -v cargo &>/dev/null; then
|
||||||
|
curl -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal --quiet
|
||||||
|
fi
|
||||||
|
. "$HOME/.cargo/env"
|
||||||
|
|
||||||
|
# Copy source to a writable workdir
|
||||||
|
mkdir -p /work
|
||||||
|
tar -C /src --exclude=./target --exclude=./.git -cf - . | tar -C /work -xf -
|
||||||
|
cd /work
|
||||||
|
|
||||||
|
echo "── cargo build --release --locked ──"
|
||||||
|
cargo build --release --locked 2>&1 | tail -5
|
||||||
|
echo
|
||||||
|
|
||||||
|
# Write the holder script to a file to avoid quoting hell.
|
||||||
|
# Holds port 53 until killed — no sleep race.
|
||||||
|
cat > /tmp/hold53.py << '"'"'PYEOF'"'"'
|
||||||
|
import socket, signal
|
||||||
|
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||||
|
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 0)
|
||||||
|
s.bind(("", 53))
|
||||||
|
signal.pause()
|
||||||
|
PYEOF
|
||||||
|
|
||||||
|
python3 /tmp/hold53.py &
|
||||||
|
HOLDER_PID=$!
|
||||||
|
|
||||||
|
# Verify the holder is actually up before proceeding
|
||||||
|
sleep 0.3
|
||||||
|
if ! kill -0 $HOLDER_PID 2>/dev/null; then
|
||||||
|
echo "holder_failed=1"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "── running numa with port 53 already bound ──"
|
||||||
|
# timeout 5: guards against numa not exiting (advisory not fired, bug present)
|
||||||
|
# Capture stderr to a file so the exit code is not clobbered by || or $()
|
||||||
|
set +e
|
||||||
|
timeout 5 ./target/release/numa > /tmp/numa-stderr.txt 2>&1
|
||||||
|
EXIT_CODE=$?
|
||||||
|
set -e
|
||||||
|
STDERR=$(cat /tmp/numa-stderr.txt)
|
||||||
|
|
||||||
|
kill $HOLDER_PID 2>/dev/null || true
|
||||||
|
|
||||||
|
echo "exit_code=$EXIT_CODE"
|
||||||
|
printf "%s" "$STDERR" | sed "s/^/ numa: /"
|
||||||
|
' 2>&1)
|
||||||
|
|
||||||
|
echo "$OUTPUT"
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo "── assertions ──"
|
||||||
|
|
||||||
|
if echo "$OUTPUT" | grep -q "holder_failed=1"; then
|
||||||
|
echo " SETUP FAILED: could not pre-bind port 53 inside container"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
EXIT_CODE=$(echo "$OUTPUT" | grep '^exit_code=' | cut -d= -f2)
|
||||||
|
|
||||||
|
if [ "${EXIT_CODE:-}" = "1" ]; then
|
||||||
|
pass "exits with code 1"
|
||||||
|
else
|
||||||
|
fail "exits with code 1" "got: exit_code=${EXIT_CODE:-<missing>}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if echo "$OUTPUT" | grep -q "cannot bind to"; then
|
||||||
|
pass "advisory printed to stderr"
|
||||||
|
else
|
||||||
|
fail "advisory printed to stderr" "stderr did not contain 'cannot bind to'"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if echo "$OUTPUT" | grep -q "numa install"; then
|
||||||
|
pass "advisory offers 'sudo numa install'"
|
||||||
|
else
|
||||||
|
fail "advisory offers 'sudo numa install'" "not found in output"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if echo "$OUTPUT" | grep -q "bind_addr"; then
|
||||||
|
pass "advisory offers non-privileged port alternative"
|
||||||
|
else
|
||||||
|
fail "advisory offers non-privileged port alternative" "'bind_addr' not found in output"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo
|
||||||
|
if [ "$FAILED" -eq 0 ]; then
|
||||||
|
printf "${GREEN}── smoke-port53 passed ──${RESET}\n"
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
printf "${RED}── smoke-port53 failed ($FAILED assertion(s)) ──${RESET}\n"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
@@ -404,6 +404,241 @@ check "Cache flushed" \
|
|||||||
|
|
||||||
kill "$NUMA_PID" 2>/dev/null || true
|
kill "$NUMA_PID" 2>/dev/null || true
|
||||||
wait "$NUMA_PID" 2>/dev/null || true
|
wait "$NUMA_PID" 2>/dev/null || true
|
||||||
|
sleep 1
|
||||||
|
|
||||||
|
# ---- Suite 5: DNS-over-TLS (RFC 7858) ----
|
||||||
|
echo ""
|
||||||
|
echo "╔══════════════════════════════════════════╗"
|
||||||
|
echo "║ Suite 5: DNS-over-TLS (RFC 7858) ║"
|
||||||
|
echo "╚══════════════════════════════════════════╝"
|
||||||
|
|
||||||
|
if ! command -v kdig >/dev/null 2>&1; then
|
||||||
|
printf " ${DIM}skipped — install 'knot' for kdig${RESET}\n"
|
||||||
|
elif ! command -v openssl >/dev/null 2>&1; then
|
||||||
|
printf " ${DIM}skipped — openssl not found${RESET}\n"
|
||||||
|
else
|
||||||
|
DOT_PORT=8853
|
||||||
|
DOT_CERT=/tmp/numa-integration-dot.crt
|
||||||
|
DOT_KEY=/tmp/numa-integration-dot.key
|
||||||
|
|
||||||
|
# Generate a test cert mirroring production self_signed_tls SAN shape
|
||||||
|
# (*.numa wildcard + explicit numa.numa apex).
|
||||||
|
openssl req -x509 -newkey rsa:2048 -nodes -days 1 \
|
||||||
|
-keyout "$DOT_KEY" -out "$DOT_CERT" \
|
||||||
|
-subj "/CN=Numa .numa services" \
|
||||||
|
-addext "subjectAltName=DNS:*.numa,DNS:numa.numa" \
|
||||||
|
>/dev/null 2>&1
|
||||||
|
|
||||||
|
# Suite 5 uses a local zone so it's upstream-independent — the point is
|
||||||
|
# to exercise the DoT transport layer (handshake, ALPN, framing,
|
||||||
|
# persistent connections), not re-test recursive resolution.
|
||||||
|
cat > "$CONFIG" << CONF
|
||||||
|
[server]
|
||||||
|
bind_addr = "127.0.0.1:$PORT"
|
||||||
|
api_port = $API_PORT
|
||||||
|
|
||||||
|
[upstream]
|
||||||
|
mode = "forward"
|
||||||
|
address = "127.0.0.1"
|
||||||
|
port = 65535
|
||||||
|
|
||||||
|
[cache]
|
||||||
|
max_entries = 10000
|
||||||
|
|
||||||
|
[blocking]
|
||||||
|
enabled = false
|
||||||
|
|
||||||
|
[proxy]
|
||||||
|
enabled = false
|
||||||
|
|
||||||
|
[dot]
|
||||||
|
enabled = true
|
||||||
|
port = $DOT_PORT
|
||||||
|
bind_addr = "127.0.0.1"
|
||||||
|
cert_path = "$DOT_CERT"
|
||||||
|
key_path = "$DOT_KEY"
|
||||||
|
|
||||||
|
[[zones]]
|
||||||
|
domain = "dot-test.example"
|
||||||
|
record_type = "A"
|
||||||
|
value = "10.0.0.1"
|
||||||
|
ttl = 60
|
||||||
|
CONF
|
||||||
|
|
||||||
|
RUST_LOG=info "$BINARY" "$CONFIG" > "$LOG" 2>&1 &
|
||||||
|
NUMA_PID=$!
|
||||||
|
sleep 4
|
||||||
|
|
||||||
|
if ! kill -0 "$NUMA_PID" 2>/dev/null; then
|
||||||
|
FAILED=$((FAILED + 1))
|
||||||
|
printf " ${RED}✗${RESET} DoT startup\n"
|
||||||
|
printf " ${DIM}%s${RESET}\n" "$(tail -5 "$LOG")"
|
||||||
|
else
|
||||||
|
echo ""
|
||||||
|
echo "=== Listener ==="
|
||||||
|
|
||||||
|
check "DoT bound on 127.0.0.1:$DOT_PORT" \
|
||||||
|
"DoT listening on 127.0.0.1:$DOT_PORT" \
|
||||||
|
"$(grep 'DoT listening' "$LOG")"
|
||||||
|
|
||||||
|
KDIG="kdig @127.0.0.1 -p $DOT_PORT +tls +tls-ca=$DOT_CERT +tls-hostname=numa.numa +time=5 +retry=0"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "=== Queries over DoT ==="
|
||||||
|
|
||||||
|
check "DoT local zone A record" \
|
||||||
|
"10.0.0.1" \
|
||||||
|
"$($KDIG +short dot-test.example A 2>/dev/null)"
|
||||||
|
|
||||||
|
# +keepopen reuses one TLS connection for multiple queries — tests
|
||||||
|
# persistent connection handling. kdig applies options left-to-right,
|
||||||
|
# so +short and +keepopen must come before the query specs.
|
||||||
|
check "DoT persistent connection (3 queries, 1 handshake)" \
|
||||||
|
"10.0.0.1" \
|
||||||
|
"$($KDIG +keepopen +short dot-test.example A dot-test.example A dot-test.example A 2>/dev/null | head -1)"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "=== ALPN ==="
|
||||||
|
|
||||||
|
# Positive case: client offers "dot", server picks it.
|
||||||
|
ALPN_OK=$(echo "" | openssl s_client -connect "127.0.0.1:$DOT_PORT" \
|
||||||
|
-servername numa.numa -alpn dot -CAfile "$DOT_CERT" 2>&1 </dev/null || true)
|
||||||
|
check "DoT negotiates ALPN \"dot\"" \
|
||||||
|
"ALPN protocol: dot" \
|
||||||
|
"$ALPN_OK"
|
||||||
|
|
||||||
|
# Negative case: client offers only "h2", server must reject the
|
||||||
|
# handshake with no_application_protocol alert (cross-protocol
|
||||||
|
# confusion defense, RFC 7858bis §3.2).
|
||||||
|
if echo "" | openssl s_client -connect "127.0.0.1:$DOT_PORT" \
|
||||||
|
-servername numa.numa -alpn h2 -CAfile "$DOT_CERT" \
|
||||||
|
</dev/null >/dev/null 2>&1; then
|
||||||
|
ALPN_MISMATCH="handshake unexpectedly succeeded"
|
||||||
|
else
|
||||||
|
ALPN_MISMATCH="rejected"
|
||||||
|
fi
|
||||||
|
check "DoT rejects non-dot ALPN" \
|
||||||
|
"rejected" \
|
||||||
|
"$ALPN_MISMATCH"
|
||||||
|
fi
|
||||||
|
|
||||||
|
kill "$NUMA_PID" 2>/dev/null || true
|
||||||
|
wait "$NUMA_PID" 2>/dev/null || true
|
||||||
|
rm -f "$DOT_CERT" "$DOT_KEY"
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
|
||||||
|
# ---- Suite 6: Proxy + DoT coexistence ----
|
||||||
|
echo ""
|
||||||
|
echo "╔══════════════════════════════════════════╗"
|
||||||
|
echo "║ Suite 6: Proxy + DoT Coexistence ║"
|
||||||
|
echo "╚══════════════════════════════════════════╝"
|
||||||
|
|
||||||
|
if ! command -v kdig >/dev/null 2>&1 || ! command -v openssl >/dev/null 2>&1; then
|
||||||
|
printf " ${DIM}skipped — needs kdig + openssl${RESET}\n"
|
||||||
|
else
|
||||||
|
DOT_PORT=8853
|
||||||
|
PROXY_HTTP_PORT=8080
|
||||||
|
PROXY_HTTPS_PORT=8443
|
||||||
|
NUMA_DATA=/tmp/numa-integration-data
|
||||||
|
|
||||||
|
# Fresh data dir so we generate a fresh CA for this suite. Path is set
|
||||||
|
# via [server] data_dir in the TOML below, not an env var — numa treats
|
||||||
|
# its config file as the single source of truth for all knobs.
|
||||||
|
rm -rf "$NUMA_DATA"
|
||||||
|
mkdir -p "$NUMA_DATA"
|
||||||
|
|
||||||
|
cat > "$CONFIG" << CONF
|
||||||
|
[server]
|
||||||
|
bind_addr = "127.0.0.1:$PORT"
|
||||||
|
api_port = $API_PORT
|
||||||
|
data_dir = "$NUMA_DATA"
|
||||||
|
|
||||||
|
[upstream]
|
||||||
|
mode = "forward"
|
||||||
|
address = "127.0.0.1"
|
||||||
|
port = 65535
|
||||||
|
|
||||||
|
[cache]
|
||||||
|
max_entries = 10000
|
||||||
|
|
||||||
|
[blocking]
|
||||||
|
enabled = false
|
||||||
|
|
||||||
|
[proxy]
|
||||||
|
enabled = true
|
||||||
|
port = $PROXY_HTTP_PORT
|
||||||
|
tls_port = $PROXY_HTTPS_PORT
|
||||||
|
tld = "numa"
|
||||||
|
bind_addr = "127.0.0.1"
|
||||||
|
|
||||||
|
[dot]
|
||||||
|
enabled = true
|
||||||
|
port = $DOT_PORT
|
||||||
|
bind_addr = "127.0.0.1"
|
||||||
|
|
||||||
|
[[zones]]
|
||||||
|
domain = "dot-test.example"
|
||||||
|
record_type = "A"
|
||||||
|
value = "10.0.0.1"
|
||||||
|
ttl = 60
|
||||||
|
CONF
|
||||||
|
|
||||||
|
RUST_LOG=info "$BINARY" "$CONFIG" > "$LOG" 2>&1 &
|
||||||
|
NUMA_PID=$!
|
||||||
|
sleep 4
|
||||||
|
|
||||||
|
if ! kill -0 "$NUMA_PID" 2>/dev/null; then
|
||||||
|
FAILED=$((FAILED + 1))
|
||||||
|
printf " ${RED}✗${RESET} Startup with proxy + DoT\n"
|
||||||
|
printf " ${DIM}%s${RESET}\n" "$(tail -5 "$LOG")"
|
||||||
|
else
|
||||||
|
echo ""
|
||||||
|
echo "=== Both listeners ==="
|
||||||
|
|
||||||
|
check "DoT listener bound" \
|
||||||
|
"DoT listening on 127.0.0.1:$DOT_PORT" \
|
||||||
|
"$(grep 'DoT listening' "$LOG")"
|
||||||
|
|
||||||
|
check "HTTPS proxy listener bound" \
|
||||||
|
"HTTPS proxy listening on 127.0.0.1:$PROXY_HTTPS_PORT" \
|
||||||
|
"$(grep 'HTTPS proxy listening' "$LOG")"
|
||||||
|
|
||||||
|
PANIC_COUNT=$(grep -c 'panicked' "$LOG" 2>/dev/null || echo 0)
|
||||||
|
check "No startup panics in log" \
|
||||||
|
"^0$" \
|
||||||
|
"$PANIC_COUNT"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "=== DoT works with proxy enabled ==="
|
||||||
|
|
||||||
|
# Proxy's build_tls_config runs first and creates the CA in
|
||||||
|
# $NUMA_DATA_DIR. DoT self_signed_tls then loads the same CA and
|
||||||
|
# issues its own leaf cert. One CA trusts both listeners.
|
||||||
|
CA="$NUMA_DATA/ca.pem"
|
||||||
|
KDIG="kdig @127.0.0.1 -p $DOT_PORT +tls +tls-ca=$CA +tls-hostname=numa.numa +time=5 +retry=0"
|
||||||
|
|
||||||
|
check "DoT local zone A (with proxy on)" \
|
||||||
|
"10.0.0.1" \
|
||||||
|
"$($KDIG +short dot-test.example A 2>/dev/null)"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "=== Proxy TLS works with DoT enabled ==="
|
||||||
|
|
||||||
|
# Proxy cert has SAN numa.numa (auto-added "numa" service). A
|
||||||
|
# successful handshake validates that the proxy's separate
|
||||||
|
# ServerConfig wasn't disturbed by DoT's own cert generation.
|
||||||
|
PROXY_TLS=$(echo "" | openssl s_client -connect "127.0.0.1:$PROXY_HTTPS_PORT" \
|
||||||
|
-servername numa.numa -CAfile "$CA" 2>&1 </dev/null || true)
|
||||||
|
check "Proxy HTTPS TLS handshake succeeds" \
|
||||||
|
"Verify return code: 0 (ok)" \
|
||||||
|
"$PROXY_TLS"
|
||||||
|
fi
|
||||||
|
|
||||||
|
kill "$NUMA_PID" 2>/dev/null || true
|
||||||
|
wait "$NUMA_PID" 2>/dev/null || true
|
||||||
|
rm -rf "$NUMA_DATA"
|
||||||
|
fi
|
||||||
|
|
||||||
# Summary
|
# Summary
|
||||||
echo ""
|
echo ""
|
||||||
|
|||||||
94
tests/manual/install-trust-macos.sh
Executable file
94
tests/manual/install-trust-macos.sh
Executable file
@@ -0,0 +1,94 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
#
|
||||||
|
# Manual macOS CA trust contract test.
|
||||||
|
#
|
||||||
|
# Mirrors src/system_dns.rs::trust_ca_macos / untrust_ca_macos by running
|
||||||
|
# the same `security` shell commands against a fixture cert with a unique
|
||||||
|
# CN. Safe to run alongside a production numa install:
|
||||||
|
#
|
||||||
|
# - Test cert CN = "Numa Local CA Test <pid-ts>", always strictly longer
|
||||||
|
# than the production CN "Numa Local CA". `security find-certificate -c`
|
||||||
|
# does substring matching, so the test's search for $TEST_CN can never
|
||||||
|
# match the production cert (the search term is longer than the prod CN).
|
||||||
|
# - All deletes use `delete-certificate -Z <hash>`, which only touches the
|
||||||
|
# cert with that exact hash. Production and test certs have different
|
||||||
|
# hashes by construction (different key material), so the delete cannot
|
||||||
|
# reach the production cert even if a CN search somehow returned both.
|
||||||
|
#
|
||||||
|
# Mutates the System keychain (briefly). Cleans up on success or interrupt.
|
||||||
|
# Requires sudo for `security add-trusted-cert` and `delete-certificate`.
|
||||||
|
#
|
||||||
|
# Usage: ./tests/manual/install-trust-macos.sh
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
if [[ "$OSTYPE" != darwin* ]]; then
|
||||||
|
echo "This test is macOS-only." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
GREEN="\033[32m"; RED="\033[31m"; RESET="\033[0m"
|
||||||
|
|
||||||
|
# Production constant from src/tls.rs::CA_COMMON_NAME — keep in sync.
|
||||||
|
PROD_CN="Numa Local CA"
|
||||||
|
KEYCHAIN="/Library/Keychains/System.keychain"
|
||||||
|
|
||||||
|
# Notice if production numa is already installed. We proceed regardless —
|
||||||
|
# see header for why coexistence is safe (unique CN + by-hash deletion).
|
||||||
|
if security find-certificate -c "$PROD_CN" "$KEYCHAIN" >/dev/null 2>&1; then
|
||||||
|
echo " note: production '$PROD_CN' detected — proceeding alongside (test cert can't touch it)"
|
||||||
|
echo
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Unique CN ensures the test cert can never collide with production.
|
||||||
|
TEST_CN="Numa Local CA Test $$-$(date +%s)"
|
||||||
|
FIXTURE_DIR=$(mktemp -d)
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
# Best-effort: remove any test certs by hash if still present.
|
||||||
|
if security find-certificate -c "$TEST_CN" "$KEYCHAIN" >/dev/null 2>&1; then
|
||||||
|
echo " cleanup: removing leftover test cert"
|
||||||
|
security find-certificate -c "$TEST_CN" -a -Z "$KEYCHAIN" 2>/dev/null \
|
||||||
|
| awk '/^SHA-1 hash:/ {print $NF}' \
|
||||||
|
| while read -r hash; do
|
||||||
|
sudo security delete-certificate -Z "$hash" "$KEYCHAIN" >/dev/null 2>&1 || true
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
rm -rf "$FIXTURE_DIR"
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
echo "── generating fixture CA ──"
|
||||||
|
openssl req -x509 -newkey rsa:2048 -nodes -days 1 \
|
||||||
|
-keyout "$FIXTURE_DIR/ca.key" \
|
||||||
|
-out "$FIXTURE_DIR/ca.pem" \
|
||||||
|
-subj "/CN=$TEST_CN" \
|
||||||
|
-addext "basicConstraints=critical,CA:TRUE" \
|
||||||
|
-addext "keyUsage=critical,keyCertSign,cRLSign" >/dev/null 2>&1
|
||||||
|
echo " CN: $TEST_CN"
|
||||||
|
echo
|
||||||
|
|
||||||
|
echo "── trust step (mirrors trust_ca_macos) ──"
|
||||||
|
sudo security add-trusted-cert -d -r trustRoot -k "$KEYCHAIN" "$FIXTURE_DIR/ca.pem"
|
||||||
|
if security find-certificate -c "$TEST_CN" "$KEYCHAIN" >/dev/null 2>&1; then
|
||||||
|
printf " ${GREEN}✓${RESET} test cert found in keychain\n"
|
||||||
|
else
|
||||||
|
printf " ${RED}✗${RESET} test cert NOT found after add-trusted-cert\n"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo
|
||||||
|
|
||||||
|
echo "── untrust step (mirrors untrust_ca_macos) ──"
|
||||||
|
security find-certificate -c "$TEST_CN" -a -Z "$KEYCHAIN" 2>/dev/null \
|
||||||
|
| awk '/^SHA-1 hash:/ {print $NF}' \
|
||||||
|
| while read -r hash; do
|
||||||
|
sudo security delete-certificate -Z "$hash" "$KEYCHAIN" >/dev/null
|
||||||
|
done
|
||||||
|
if security find-certificate -c "$TEST_CN" "$KEYCHAIN" >/dev/null 2>&1; then
|
||||||
|
printf " ${RED}✗${RESET} test cert STILL present after delete (regression)\n"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
printf " ${GREEN}✓${RESET} test cert removed from keychain\n"
|
||||||
|
echo
|
||||||
|
|
||||||
|
printf "${GREEN}all checks passed${RESET}\n"
|
||||||
Reference in New Issue
Block a user